aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--Documentation/RCU/trace.txt254
-rw-r--r--Documentation/RCU/whatisRCU.txt2
-rw-r--r--Documentation/dontdiff3
-rw-r--r--Documentation/fb/framebuffer.txt6
-rw-r--r--Documentation/filesystems/caching/fscache.txt110
-rw-r--r--Documentation/filesystems/caching/netfs-api.txt21
-rw-r--r--Documentation/filesystems/ocfs2.txt6
-rw-r--r--Documentation/kernel-parameters.txt21
-rw-r--r--Documentation/pcmcia/driver-changes.txt12
-rw-r--r--Documentation/slow-work.txt160
-rw-r--r--Documentation/vm/page-types.c2
-rw-r--r--MAINTAINERS77
-rw-r--r--Makefile5
-rw-r--r--arch/alpha/boot/tools/objstrip.c2
-rw-r--r--arch/alpha/include/asm/fcntl.h2
-rw-r--r--arch/alpha/include/asm/thread_info.h27
-rw-r--r--arch/alpha/kernel/core_marvel.c2
-rw-r--r--arch/alpha/kernel/core_titan.c2
-rw-r--r--arch/alpha/kernel/irq.c2
-rw-r--r--arch/alpha/kernel/irq_alpha.c2
-rw-r--r--arch/alpha/kernel/irq_i8259.c2
-rw-r--r--arch/alpha/kernel/irq_pyxis.c2
-rw-r--r--arch/alpha/kernel/irq_srm.c2
-rw-r--r--arch/alpha/kernel/sys_alcor.c2
-rw-r--r--arch/alpha/kernel/sys_cabriolet.c2
-rw-r--r--arch/alpha/kernel/sys_dp264.c4
-rw-r--r--arch/alpha/kernel/sys_eb64p.c2
-rw-r--r--arch/alpha/kernel/sys_eiger.c2
-rw-r--r--arch/alpha/kernel/sys_jensen.c2
-rw-r--r--arch/alpha/kernel/sys_marvel.c6
-rw-r--r--arch/alpha/kernel/sys_mikasa.c2
-rw-r--r--arch/alpha/kernel/sys_noritake.c2
-rw-r--r--arch/alpha/kernel/sys_rawhide.c2
-rw-r--r--arch/alpha/kernel/sys_ruffian.c2
-rw-r--r--arch/alpha/kernel/sys_rx164.c2
-rw-r--r--arch/alpha/kernel/sys_sable.c2
-rw-r--r--arch/alpha/kernel/sys_takara.c2
-rw-r--r--arch/alpha/kernel/sys_titan.c2
-rw-r--r--arch/alpha/kernel/sys_wildfire.c2
-rw-r--r--arch/arm/include/asm/kmap_types.h6
-rw-r--r--arch/arm/kernel/signal.c8
-rw-r--r--arch/arm/mach-at91/Kconfig20
-rw-r--r--arch/arm/mach-at91/board-sam9g20ek-2slot-mmc.c23
-rw-r--r--arch/arm/mach-kirkwood/common.c8
-rw-r--r--arch/arm/mach-kirkwood/include/mach/bridge-regs.h3
-rw-r--r--arch/arm/mach-mmp/include/mach/mfp-pxa910.h2
-rw-r--r--arch/arm/mach-omap2/board-zoom2.c87
-rw-r--r--arch/arm/mach-omap2/clock34xx.c2
-rw-r--r--arch/arm/mach-omap2/clock34xx.h4
-rw-r--r--arch/arm/mach-omap2/gpmc.c2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa320.c4
-rw-r--r--arch/arm/mach-pxa/cpufreq-pxa2xx.c1
-rw-r--r--arch/arm/mach-pxa/cpufreq-pxa3xx.c2
-rw-r--r--arch/arm/mach-pxa/hx4700.c2
-rw-r--r--arch/arm/mach-pxa/include/mach/entry-macro.S1
-rw-r--r--arch/arm/mach-pxa/spitz.c2
-rw-r--r--arch/arm/mach-u300/core.c2
-rw-r--r--arch/arm/plat-omap/gpio.c2
-rw-r--r--arch/arm/plat-pxa/include/plat/mfp.h2
-rw-r--r--arch/arm/plat-pxa/mfp.c4
-rw-r--r--arch/arm/tools/mach-types119
-rw-r--r--arch/avr32/include/asm/bug.h2
-rw-r--r--arch/blackfin/kernel/bfin_dma_5xx.c9
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbinit.c2
-rw-r--r--arch/blackfin/kernel/process.c2
-rw-r--r--arch/blackfin/kernel/ptrace.c2
-rw-r--r--arch/blackfin/mach-bf518/include/mach/anomaly.h18
-rw-r--r--arch/blackfin/mach-bf527/include/mach/anomaly.h20
-rw-r--r--arch/blackfin/mach-bf533/include/mach/anomaly.h18
-rw-r--r--arch/blackfin/mach-bf537/include/mach/anomaly.h18
-rw-r--r--arch/blackfin/mach-bf538/include/mach/anomaly.h18
-rw-r--r--arch/blackfin/mach-bf548/include/mach/anomaly.h23
-rw-r--r--arch/blackfin/mach-bf561/atomic.S14
-rw-r--r--arch/blackfin/mach-bf561/include/mach/anomaly.h25
-rw-r--r--arch/blackfin/mach-common/arch_checks.c5
-rw-r--r--arch/blackfin/mach-common/smp.c6
-rw-r--r--arch/ia64/include/asm/swiotlb.h2
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c4
-rw-r--r--arch/mips/Kconfig22
-rw-r--r--arch/mips/bcm47xx/prom.c2
-rw-r--r--arch/mips/configs/rbtx49xx_defconfig252
-rw-r--r--arch/mips/include/asm/bug.h4
-rw-r--r--arch/mips/include/asm/dma-mapping.h11
-rw-r--r--arch/mips/include/asm/mman.h1
-rw-r--r--arch/mips/include/asm/system.h15
-rw-r--r--arch/mips/kernel/cevt-smtc.c3
-rw-r--r--arch/mips/kernel/syscall.c4
-rw-r--r--arch/mips/math-emu/cp1emu.c41
-rw-r--r--arch/mips/mm/dma-default.c7
-rw-r--r--arch/mips/mti-malta/malta-memory.c3
-rw-r--r--arch/mips/rb532/devices.c3
-rw-r--r--arch/mips/rb532/prom.c2
-rw-r--r--arch/mips/txx9/generic/setup.c4
-rw-r--r--arch/parisc/include/asm/fcntl.h2
-rw-r--r--arch/parisc/kernel/unwind.c2
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S6
-rw-r--r--arch/powerpc/boot/addRamDisk.c6
-rw-r--r--arch/powerpc/include/asm/kmap_types.h11
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/s390/Kconfig28
-rw-r--r--arch/s390/include/asm/bug.h2
-rw-r--r--arch/s390/include/asm/spinlock.h29
-rw-r--r--arch/s390/kernel/early.c9
-rw-r--r--arch/s390/kernel/entry.S8
-rw-r--r--arch/s390/kernel/entry64.S8
-rw-r--r--arch/sh/kernel/cpu/irq/imask.c2
-rw-r--r--arch/sh/kernel/cpu/irq/intc-sh5.c2
-rw-r--r--arch/sparc/boot/btfixupprep.c8
-rw-r--r--arch/sparc/boot/piggyback_32.c10
-rw-r--r--arch/sparc/boot/piggyback_64.c2
-rw-r--r--arch/sparc/mm/init_64.h2
-rw-r--r--arch/x86/include/asm/amd_iommu.h16
-rw-r--r--arch/x86/include/asm/amd_iommu_proto.h38
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h54
-rw-r--r--arch/x86/include/asm/bug.h4
-rw-r--r--arch/x86/include/asm/calgary.h2
-rw-r--r--arch/x86/include/asm/device.h2
-rw-r--r--arch/x86/include/asm/dma-mapping.h5
-rw-r--r--arch/x86/include/asm/gart.h9
-rw-r--r--arch/x86/include/asm/iommu.h2
-rw-r--r--arch/x86/include/asm/swiotlb.h9
-rw-r--r--arch/x86/include/asm/x86_init.h10
-rw-r--r--arch/x86/kernel/acpi/processor.c3
-rw-r--r--arch/x86/kernel/amd_iommu.c1247
-rw-r--r--arch/x86/kernel/amd_iommu_init.c94
-rw-r--r--arch/x86/kernel/aperture_64.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c23
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longhaul.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-ich.c19
-rw-r--r--arch/x86/kernel/crash.c5
-rw-r--r--arch/x86/kernel/pci-calgary_64.c94
-rw-r--r--arch/x86/kernel/pci-dma.c39
-rw-r--r--arch/x86/kernel/pci-gart_64.c156
-rw-r--r--arch/x86/kernel/pci-nommu.c11
-rw-r--r--arch/x86/kernel/pci-swiotlb.c18
-rw-r--r--arch/x86/kernel/reboot.c4
-rw-r--r--arch/x86/kernel/x86_init.c8
-rw-r--r--crypto/async_tx/Kconfig5
-rw-r--r--crypto/async_tx/async_pq.c14
-rw-r--r--crypto/async_tx/async_xor.c15
-rw-r--r--crypto/gcm.c107
-rw-r--r--drivers/acpi/acpica/acpredef.h5
-rw-r--r--drivers/acpi/blacklist.c17
-rw-r--r--drivers/acpi/sleep.c8
-rw-r--r--drivers/ata/pata_pcmcia.c17
-rw-r--r--drivers/ata/sata_fsl.c84
-rw-r--r--drivers/base/power/runtime.c12
-rw-r--r--drivers/block/aoe/aoecmd.c23
-rw-r--r--drivers/block/cciss.c16
-rw-r--r--drivers/bluetooth/bluecard_cs.c16
-rw-r--r--drivers/bluetooth/bt3c_cs.c13
-rw-r--r--drivers/bluetooth/btuart_cs.c13
-rw-r--r--drivers/bluetooth/btusb.c4
-rw-r--r--drivers/bluetooth/dtl1_cs.c12
-rw-r--r--drivers/char/agp/Kconfig3
-rw-r--r--drivers/char/agp/intel-agp.c19
-rw-r--r--drivers/char/keyboard.c2
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c73
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c52
-rw-r--r--drivers/char/pcmcia/ipwireless/hardware.c8
-rw-r--r--drivers/char/pcmcia/ipwireless/main.c296
-rw-r--r--drivers/char/pcmcia/synclink_cs.c80
-rw-r--r--drivers/char/tpm/tpm.c2
-rw-r--r--drivers/char/tpm/tpm_tis.c11
-rw-r--r--drivers/char/tty_port.c10
-rw-r--r--drivers/char/vt_ioctl.c6
-rw-r--r--drivers/cpufreq/cpufreq.c48
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c4
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c4
-rw-r--r--drivers/crypto/padlock-aes.c4
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/dmaengine.c10
-rw-r--r--drivers/dma/ioat/dca.c6
-rw-r--r--drivers/dma/ioat/dma.h4
-rw-r--r--drivers/dma/ioat/dma_v2.c2
-rw-r--r--drivers/dma/ioat/dma_v3.c44
-rw-r--r--drivers/dma/ioat/hw.h2
-rw-r--r--drivers/dma/ioat/registers.h4
-rw-r--r--drivers/dma/shdma.c12
-rw-r--r--drivers/firewire/ohci.c41
-rw-r--r--drivers/gpio/langwell_gpio.c11
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/drm_edid.c6
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c6
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c9
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h12
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c10
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c36
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c15
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c26
-rw-r--r--drivers/gpu/drm/radeon/atom.c1
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515.c9
-rw-r--r--drivers/hwmon/adt7475.c17
-rw-r--r--drivers/hwmon/s3c-hwmon.c11
-rw-r--r--drivers/i2c/busses/i2c-pnx.c9
-rw-r--r--drivers/i2c/chips/tsl2550.c3
-rw-r--r--drivers/i2c/i2c-core.c11
-rw-r--r--drivers/ide/ide-cs.c33
-rw-r--r--drivers/ide/ide-ioctls.c2
-rw-r--r--drivers/ieee802154/fakehard.c5
-rw-r--r--drivers/input/ff-core.c20
-rw-r--r--drivers/input/ff-memless.c26
-rw-r--r--drivers/input/input.c29
-rw-r--r--drivers/input/keyboard/atkbd.c13
-rw-r--r--drivers/input/mouse/lifebook.c3
-rw-r--r--drivers/input/mouse/psmouse-base.c4
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h21
-rw-r--r--drivers/isdn/hardware/avm/avm_cs.c4
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c2
-rw-r--r--drivers/isdn/hisax/avma1_cs.c28
-rw-r--r--drivers/isdn/hisax/elsa_cs.c46
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c64
-rw-r--r--drivers/isdn/hisax/teles_cs.c38
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c352
-rw-r--r--drivers/leds/leds-gpio.c2
-rw-r--r--drivers/md/md.c41
-rw-r--r--drivers/md/raid1.c7
-rw-r--r--drivers/md/raid5.c85
-rw-r--r--drivers/media/common/ir-functions.c2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c1
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c10
-rw-r--r--drivers/media/dvb/siano/Kconfig2
-rw-r--r--drivers/media/radio/radio-gemtek-pci.c2
-rw-r--r--drivers/media/video/davinci/vpif_display.c1
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c2
-rw-r--r--drivers/media/video/mx1_camera.c1
-rw-r--r--drivers/media/video/mx3_camera.c1
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c7
-rw-r--r--drivers/media/video/soc_camera.c17
-rw-r--r--drivers/media/video/videobuf-dma-contig.c1
-rw-r--r--drivers/mfd/wm831x-core.c2
-rw-r--r--drivers/misc/eeprom/at24.c76
-rw-r--r--drivers/mmc/host/pxamci.c4
-rw-r--r--drivers/mtd/maps/pcmciamtd.c197
-rw-r--r--drivers/mtd/maps/sa1100-flash.c4
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/arm/ep93xx_eth.c12
-rw-r--r--drivers/net/au1000_eth.c15
-rw-r--r--drivers/net/b44.c3
-rw-r--r--drivers/net/can/Kconfig60
-rw-r--r--drivers/net/can/dev.c6
-rw-r--r--drivers/net/can/sja1000/Kconfig47
-rw-r--r--drivers/net/can/usb/Kconfig10
-rw-r--r--drivers/net/can/usb/Makefile2
-rw-r--r--drivers/net/cxgb3/sge.c4
-rw-r--r--drivers/net/davinci_emac.c29
-rw-r--r--drivers/net/e100.c17
-rw-r--r--drivers/net/e1000e/e1000.h2
-rw-r--r--drivers/net/e1000e/ethtool.c12
-rw-r--r--drivers/net/e1000e/ich8lan.c4
-rw-r--r--drivers/net/e1000e/netdev.c50
-rw-r--r--drivers/net/e1000e/phy.c56
-rw-r--r--drivers/net/forcedeth.c5
-rw-r--r--drivers/net/ibm_newemac/emac.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c3
-rw-r--r--drivers/net/ks8851_mll.c142
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/netxen/netxen_nic.h2
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h2
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c55
-rw-r--r--drivers/net/netxen/netxen_nic_init.c2
-rw-r--r--drivers/net/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/pcmcia/3c574_cs.c90
-rw-r--r--drivers/net/pcmcia/3c589_cs.c102
-rw-r--r--drivers/net/pcmcia/axnet_cs.c56
-rw-r--r--drivers/net/pcmcia/com20020_cs.c63
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c189
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c71
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c173
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c80
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c340
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c259
-rw-r--r--drivers/net/phy/mdio-gpio.c2
-rw-r--r--drivers/net/ppp_generic.c11
-rw-r--r--drivers/net/r6040.c2
-rw-r--r--drivers/net/r8169.c11
-rw-r--r--drivers/net/s2io.c1
-rw-r--r--drivers/net/smc91x.c2
-rw-r--r--drivers/net/smsc911x.c2
-rw-r--r--drivers/net/smsc9420.c14
-rw-r--r--drivers/net/stmmac/stmmac_main.c50
-rw-r--r--drivers/net/stmmac/stmmac_timer.c4
-rw-r--r--drivers/net/stmmac/stmmac_timer.h1
-rw-r--r--drivers/net/sungem.c10
-rw-r--r--drivers/net/usb/hso.c8
-rw-r--r--drivers/net/veth.c35
-rw-r--r--drivers/net/wan/cosa.c1
-rw-r--r--drivers/net/wireless/airo_cs.c55
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/atmel_cs.c51
-rw-r--r--drivers/net/wireless/b43/main.c3
-rw-r--r--drivers/net/wireless/b43/pcmcia.c26
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c51
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c6
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c141
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h8
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c42
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c10
-rw-r--r--drivers/net/wireless/libertas/ethtool.c17
-rw-r--r--drivers/net/wireless/libertas/if_cs.c72
-rw-r--r--drivers/net/wireless/netwave_cs.c95
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c33
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c60
-rw-r--r--drivers/net/wireless/p54/p54usb.c10
-rw-r--r--drivers/net/wireless/ray_cs.c357
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_rfkill.c1
-rw-r--r--drivers/net/wireless/wavelan_cs.c35
-rw-r--r--drivers/net/wireless/wl3501_cs.c74
-rw-r--r--drivers/parport/parport_cs.c38
-rw-r--r--drivers/pci/dmar.c59
-rw-r--r--drivers/pci/intel-iommu.c45
-rw-r--r--drivers/pcmcia/Kconfig42
-rw-r--r--drivers/pcmcia/Makefile10
-rw-r--r--drivers/pcmcia/cardbus.c4
-rw-r--r--drivers/pcmcia/cirrus.h10
-rw-r--r--drivers/pcmcia/cistpl.c71
-rw-r--r--drivers/pcmcia/cs.c67
-rw-r--r--drivers/pcmcia/cs_internal.h42
-rw-r--r--drivers/pcmcia/ds.c188
-rw-r--r--drivers/pcmcia/i82365.c37
-rw-r--r--drivers/pcmcia/m32r_cfc.c105
-rw-r--r--drivers/pcmcia/m32r_pcc.c51
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c40
-rw-r--r--drivers/pcmcia/o2micro.h22
-rw-r--r--drivers/pcmcia/pcmcia_ioctl.c92
-rw-r--r--drivers/pcmcia/pcmcia_resource.c482
-rw-r--r--drivers/pcmcia/pd6729.c70
-rw-r--r--drivers/pcmcia/pd6729.h7
-rw-r--r--drivers/pcmcia/pxa2xx_base.c94
-rw-r--r--drivers/pcmcia/pxa2xx_base.h3
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x255.c2
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x270.c2
-rw-r--r--drivers/pcmcia/pxa2xx_e740.c2
-rw-r--r--drivers/pcmcia/pxa2xx_lubbock.c14
-rw-r--r--drivers/pcmcia/pxa2xx_mainstone.c2
-rw-r--r--drivers/pcmcia/pxa2xx_palmld.c2
-rw-r--r--drivers/pcmcia/pxa2xx_palmtx.c2
-rw-r--r--drivers/pcmcia/pxa2xx_sharpsl.c2
-rw-r--r--drivers/pcmcia/pxa2xx_trizeps4.c4
-rw-r--r--drivers/pcmcia/pxa2xx_viper.c2
-rw-r--r--drivers/pcmcia/rsrc_mgr.c1
-rw-r--r--drivers/pcmcia/sa1100_assabet.c2
-rw-r--r--drivers/pcmcia/sa1100_badge4.c11
-rw-r--r--drivers/pcmcia/sa1100_cerf.c2
-rw-r--r--drivers/pcmcia/sa1100_generic.c11
-rw-r--r--drivers/pcmcia/sa1100_h3600.c4
-rw-r--r--drivers/pcmcia/sa1100_jornada720.c42
-rw-r--r--drivers/pcmcia/sa1100_neponset.c13
-rw-r--r--drivers/pcmcia/sa1100_shannon.c2
-rw-r--r--drivers/pcmcia/sa1100_simpad.c2
-rw-r--r--drivers/pcmcia/sa1111_generic.c65
-rw-r--r--drivers/pcmcia/sa1111_generic.h17
-rw-r--r--drivers/pcmcia/sa11xx_base.c99
-rw-r--r--drivers/pcmcia/sa11xx_base.h2
-rw-r--r--drivers/pcmcia/soc_common.c225
-rw-r--r--drivers/pcmcia/soc_common.h10
-rw-r--r--drivers/pcmcia/tcic.c29
-rw-r--r--drivers/pcmcia/topic.h15
-rw-r--r--drivers/platform/x86/acerhdf.c12
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c26
-rw-r--r--drivers/regulator/core.c3
-rw-r--r--drivers/regulator/fixed.c5
-rw-r--r--drivers/regulator/wm831x-isink.c2
-rw-r--r--drivers/regulator/wm831x-ldo.c6
-rw-r--r--drivers/rtc/rtc-coh901331.c11
-rw-r--r--drivers/rtc/rtc-pcf50633.c3
-rw-r--r--drivers/rtc/rtc-x1205.c6
-rw-r--r--drivers/s390/char/monreader.c1
-rw-r--r--drivers/s390/char/sclp_quiesce.c48
-rw-r--r--drivers/scsi/bfa/bfad_fwimg.c2
-rw-r--r--drivers/scsi/bfa/bfad_im.c2
-rw-r--r--drivers/scsi/gdth.c2
-rw-r--r--drivers/scsi/hosts.c2
-rw-r--r--drivers/scsi/ipr.c42
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/libsas/sas_expander.c1
-rw-r--r--drivers/scsi/pcmcia/aha152x_stub.c42
-rw-r--r--drivers/scsi/pcmcia/fdomain_stub.c44
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c8
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c45
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c42
-rw-r--r--drivers/scsi/pmcraid.c10
-rw-r--r--drivers/scsi/scsi_scan.c18
-rw-r--r--drivers/scsi/scsi_sysfs.c63
-rw-r--r--drivers/scsi/scsi_transport_fc.c3
-rw-r--r--drivers/scsi/sd_dif.c2
-rw-r--r--drivers/serial/bcm63xx_uart.c4
-rw-r--r--drivers/serial/of_serial.c1
-rw-r--r--drivers/serial/serial_cs.c143
-rw-r--r--drivers/serial/suncore.c37
-rw-r--r--drivers/serial/suncore.h5
-rw-r--r--drivers/serial/sunhv.c2
-rw-r--r--drivers/serial/sunsab.c9
-rw-r--r--drivers/serial/sunsu.c36
-rw-r--r--drivers/serial/sunzilog.c8
-rw-r--r--drivers/spi/spi_stmp.c2
-rw-r--r--drivers/spi/spi_txx9.c13
-rw-r--r--drivers/ssb/pcmcia.c232
-rw-r--r--drivers/ssb/scan.c6
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c221
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c203
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c237
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c236
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c225
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_cs.c127
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c219
-rw-r--r--drivers/staging/go7007/s2250-board.c4
-rw-r--r--drivers/staging/go7007/s2250-loader.h24
-rw-r--r--drivers/staging/hv/BlkVsc.c1
-rw-r--r--drivers/staging/hv/Channel.c16
-rw-r--r--drivers/staging/hv/ChannelMgmt.c2
-rw-r--r--drivers/staging/hv/NetVsc.c1
-rw-r--r--drivers/staging/hv/NetVsc.h1
-rw-r--r--drivers/staging/hv/StorVsc.c10
-rw-r--r--drivers/staging/hv/blkvsc_drv.c1
-rw-r--r--drivers/staging/hv/netvsc_drv.c1
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c2
-rw-r--r--drivers/staging/octeon/ethernet-spi.c2
-rw-r--r--drivers/staging/octeon/ethernet.c53
-rw-r--r--drivers/staging/rtl8187se/TODO3
-rw-r--r--drivers/staging/rtl8192su/TODO3
-rw-r--r--drivers/staging/vt6655/TODO5
-rw-r--r--drivers/staging/vt6656/TODO5
-rw-r--r--drivers/telephony/ixj_pcmcia.c36
-rw-r--r--drivers/uio/uio_pdrv_genirq.c1
-rw-r--r--drivers/usb/class/cdc-acm.c18
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/gadget/amd5536udc.c49
-rw-r--r--drivers/usb/host/ehci-hcd.c2
-rw-r--r--drivers/usb/host/ehci-pci.c4
-rw-r--r--drivers/usb/host/ehci-q.c16
-rw-r--r--drivers/usb/host/ehci-sched.c12
-rw-r--r--drivers/usb/host/ehci.h2
-rw-r--r--drivers/usb/host/ohci-hcd.c5
-rw-r--r--drivers/usb/host/ohci-pci.c20
-rw-r--r--drivers/usb/host/ohci-q.c18
-rw-r--r--drivers/usb/host/ohci.h9
-rw-r--r--drivers/usb/host/sl811_cs.c49
-rw-r--r--drivers/usb/host/xhci-mem.c10
-rw-r--r--drivers/usb/host/xhci-ring.c7
-rw-r--r--drivers/usb/mon/mon_bin.c11
-rw-r--r--drivers/usb/musb/cppi_dma.c10
-rw-r--r--drivers/usb/musb/musb_core.c4
-rw-r--r--drivers/usb/musb/musb_gadget.c79
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c3
-rw-r--r--drivers/usb/musb/musb_host.c5
-rw-r--r--drivers/usb/serial/cp210x.c21
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/option.c7
-rw-r--r--drivers/video/backlight/corgi_lcd.c5
-rw-r--r--drivers/video/backlight/lcd.c2
-rw-r--r--drivers/video/da8xx-fb.c13
-rw-r--r--drivers/video/gbefb.c2
-rw-r--r--drivers/watchdog/pnx4008_wdt.c4
-rw-r--r--drivers/watchdog/rc32434_wdt.c4
-rw-r--r--fs/9p/cache.c14
-rw-r--r--fs/afs/file.c15
-rw-r--r--fs/cachefiles/interface.c32
-rw-r--r--fs/cachefiles/namei.c187
-rw-r--r--fs/cachefiles/rdwr.c130
-rw-r--r--fs/cifs/CHANGES9
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/dir.c8
-rw-r--r--fs/cifs/misc.c2
-rw-r--r--fs/exec.c4
-rw-r--r--fs/fcntl.c4
-rw-r--r--fs/file_table.c2
-rw-r--r--fs/fscache/Kconfig7
-rw-r--r--fs/fscache/Makefile1
-rw-r--r--fs/fscache/cache.c5
-rw-r--r--fs/fscache/cookie.c26
-rw-r--r--fs/fscache/internal.h56
-rw-r--r--fs/fscache/main.c6
-rw-r--r--fs/fscache/object-list.c432
-rw-r--r--fs/fscache/object.c104
-rw-r--r--fs/fscache/operation.c120
-rw-r--r--fs/fscache/page.c273
-rw-r--r--fs/fscache/proc.c13
-rw-r--r--fs/fscache/stats.c94
-rw-r--r--fs/fuse/dir.c3
-rw-r--r--fs/gfs2/Kconfig2
-rw-r--r--fs/gfs2/acl.c357
-rw-r--r--fs/gfs2/acl.h24
-rw-r--r--fs/gfs2/aops.c20
-rw-r--r--fs/gfs2/dir.c34
-rw-r--r--fs/gfs2/glock.c31
-rw-r--r--fs/gfs2/glock.h9
-rw-r--r--fs/gfs2/glops.c5
-rw-r--r--fs/gfs2/incore.h5
-rw-r--r--fs/gfs2/inode.c4
-rw-r--r--fs/gfs2/log.c2
-rw-r--r--fs/gfs2/lops.c4
-rw-r--r--fs/gfs2/main.c4
-rw-r--r--fs/gfs2/ops_fstype.c154
-rw-r--r--fs/gfs2/quota.c393
-rw-r--r--fs/gfs2/quota.h5
-rw-r--r--fs/gfs2/recovery.c4
-rw-r--r--fs/gfs2/rgrp.c14
-rw-r--r--fs/gfs2/super.c110
-rw-r--r--fs/gfs2/super.h4
-rw-r--r--fs/gfs2/sys.c14
-rw-r--r--fs/gfs2/xattr.c74
-rw-r--r--fs/gfs2/xattr.h8
-rw-r--r--fs/inode.c10
-rw-r--r--fs/jffs2/read.c9
-rw-r--r--fs/namespace.c20
-rw-r--r--fs/nfs/fscache.c10
-rw-r--r--fs/nfs/nfs4proc.c2
-rw-r--r--fs/nfsd/nfs3xdr.c2
-rw-r--r--fs/nilfs2/cpfile.c2
-rw-r--r--fs/nilfs2/inode.c1
-rw-r--r--fs/nilfs2/ioctl.c6
-rw-r--r--fs/ocfs2/file.c3
-rw-r--r--fs/ocfs2/ocfs2.h7
-rw-r--r--fs/ocfs2/refcounttree.c69
-rw-r--r--fs/ocfs2/super.c20
-rw-r--r--fs/ocfs2/uptodate.c5
-rw-r--r--fs/open.c27
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/quota/Kconfig2
-rw-r--r--fs/quota/dquot.c93
-rw-r--r--fs/quota/quota.c93
-rw-r--r--fs/xattr_acl.c2
-rw-r--r--fs/xfs/xfs_log_recover.c4
-rw-r--r--fs/xfs/xfs_trans_ail.c23
-rw-r--r--include/asm-generic/fcntl.h2
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/bootmem.h1
-rw-r--r--include/linux/capability.h2
-rw-r--r--include/linux/compiler-gcc.h1
-rw-r--r--include/linux/compiler-gcc4.h14
-rw-r--r--include/linux/compiler.h9
-rw-r--r--include/linux/dmar.h15
-rw-r--r--include/linux/fscache-cache.h40
-rw-r--r--include/linux/fscache.h27
-rw-r--r--include/linux/gfs2_ondisk.h6
-rw-r--r--include/linux/hardirq.h24
-rw-r--r--include/linux/i2c-pnx.h2
-rw-r--r--include/linux/init_task.h4
-rw-r--r--include/linux/input.h4
-rw-r--r--include/linux/interrupt.h6
-rw-r--r--include/linux/irqflags.h2
-rw-r--r--include/linux/isdn_ppp.h2
-rw-r--r--include/linux/kernel.h5
-rw-r--r--include/linux/lsm_audit.h18
-rw-r--r--include/linux/mfd/wm831x/regulator.h4
-rw-r--r--include/linux/net.h1
-rw-r--r--include/linux/nilfs2_fs.h9
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/posix_acl.h14
-rw-r--r--include/linux/quota.h11
-rw-r--r--include/linux/ratelimit.h33
-rw-r--r--include/linux/rcupdate.h10
-rw-r--r--include/linux/rcutiny.h104
-rw-r--r--include/linux/rcutree.h7
-rw-r--r--include/linux/sched.h23
-rw-r--r--include/linux/securebits.h24
-rw-r--r--include/linux/security.h48
-rw-r--r--include/linux/slow-work.h72
-rw-r--r--include/linux/smp.h11
-rw-r--r--include/linux/spinlock.h6
-rw-r--r--include/linux/spinlock_api_smp.h75
-rw-r--r--include/linux/srcu.h1
-rw-r--r--include/linux/suspend.h21
-rw-r--r--include/linux/swiotlb.h12
-rw-r--r--include/linux/tpm.h9
-rw-r--r--include/linux/vt.h4
-rw-r--r--include/net/mac80211.h6
-rw-r--r--include/net/sctp/structs.h3
-rw-r--r--include/pcmcia/cs.h14
-rw-r--r--include/pcmcia/cs_types.h3
-rw-r--r--include/pcmcia/ds.h88
-rw-r--r--include/pcmcia/ss.h13
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--include/scsi/scsi_host.h29
-rw-r--r--include/trace/events/syscalls.h3
-rw-r--r--include/trace/ftrace.h2
-rw-r--r--include/trace/power.h32
-rw-r--r--init/Kconfig22
-rw-r--r--init/main.c11
-rw-r--r--kernel/Kconfig.locks202
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/capability.c15
-rw-r--r--kernel/hung_task.c2
-rw-r--r--kernel/irq/chip.c6
-rw-r--r--kernel/irq/proc.c40
-rw-r--r--kernel/irq/spurious.c14
-rw-r--r--kernel/kmod.c8
-rw-r--r--kernel/kprobes.c4
-rw-r--r--kernel/module.c5
-rw-r--r--kernel/mutex.c4
-rw-r--r--kernel/printk.c7
-rw-r--r--kernel/rcupdate.c122
-rw-r--r--kernel/rcutiny.c282
-rw-r--r--kernel/rcutorture.c65
-rw-r--r--kernel/rcutree.c465
-rw-r--r--kernel/rcutree.h69
-rw-r--r--kernel/rcutree_plugin.h309
-rw-r--r--kernel/rcutree_trace.c12
-rw-r--r--kernel/sched.c3
-rw-r--r--kernel/signal.c46
-rw-r--r--kernel/slow-work-debugfs.c227
-rw-r--r--kernel/slow-work.c512
-rw-r--r--kernel/slow-work.h72
-rw-r--r--kernel/smp.c56
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/spinlock.c310
-rw-r--r--kernel/srcu.c74
-rw-r--r--kernel/sysctl.c3
-rw-r--r--kernel/trace/ftrace.c11
-rw-r--r--kernel/trace/ring_buffer.c9
-rw-r--r--kernel/trace/ring_buffer_benchmark.c85
-rw-r--r--kernel/trace/trace.c41
-rw-r--r--kernel/trace/trace_clock.c8
-rw-r--r--kernel/trace/trace_export.c4
-rw-r--r--kernel/workqueue.c28
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/radix-tree.c5
-rw-r--r--lib/ratelimit.c45
-rw-r--r--lib/string.c20
-rw-r--r--lib/swiotlb.c46
-rw-r--r--mm/Kconfig5
-rw-r--r--mm/backing-dev.c8
-rw-r--r--mm/bootmem.c24
-rw-r--r--mm/memory_hotplug.c24
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/percpu.c121
-rw-r--r--net/8021q/vlan.c7
-rw-r--r--net/bluetooth/hci_conn.c1
-rw-r--r--net/bluetooth/l2cap.c13
-rw-r--r--net/core/dev.c11
-rw-r--r--net/core/pktgen.c32
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/core/sysctl_net_core.c2
-rw-r--r--net/core/utils.c2
-rw-r--r--net/ipv4/ip_fragment.c2
-rw-r--r--net/ipv4/ipmr.c4
-rw-r--r--net/ipv4/tcp.c19
-rw-r--r--net/mac80211/agg-rx.c4
-rw-r--r--net/mac80211/agg-tx.c35
-rw-r--r--net/mac80211/ht.c8
-rw-r--r--net/mac80211/ieee80211_i.h10
-rw-r--r--net/mac80211/util.c19
-rw-r--r--net/netfilter/nf_log.c18
-rw-r--r--net/netfilter/xt_limit.c2
-rw-r--r--net/netfilter/xt_osf.c2
-rw-r--r--net/rfkill/core.c1
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/outqueue.c10
-rw-r--r--net/sctp/sm_sideeffect.c1
-rw-r--r--net/sctp/sm_statefuns.c15
-rw-r--r--net/sctp/socket.c40
-rw-r--r--net/sctp/transport.c8
-rw-r--r--net/sunrpc/addr.c18
-rw-r--r--scripts/dtc/data.c2
-rw-r--r--scripts/dtc/dtc-lexer.l2
-rw-r--r--scripts/dtc/dtc-lexer.lex.c_shipped69
-rw-r--r--scripts/dtc/libfdt/fdt_ro.c2
-rw-r--r--scripts/dtc/treesource.c2
-rw-r--r--scripts/genksyms/keywords.c_shipped95
-rw-r--r--scripts/genksyms/keywords.gperf2
-rw-r--r--scripts/kconfig/Makefile4
-rw-r--r--scripts/kconfig/lex.zconf.c_shipped25
-rw-r--r--scripts/kconfig/streamline_config.pl12
-rw-r--r--scripts/kconfig/zconf.gperf2
-rw-r--r--scripts/kconfig/zconf.hash.c_shipped2
-rw-r--r--scripts/kconfig/zconf.l6
-rw-r--r--scripts/kconfig/zconf.tab.c_shipped379
-rw-r--r--scripts/kconfig/zconf.y13
-rwxr-xr-xscripts/recordmcount.pl218
-rw-r--r--scripts/selinux/Makefile4
-rw-r--r--scripts/selinux/genheaders/.gitignore1
-rw-r--r--scripts/selinux/genheaders/Makefile5
-rw-r--r--scripts/selinux/genheaders/genheaders.c118
-rw-r--r--scripts/selinux/mdp/mdp.c151
-rw-r--r--security/Kconfig54
-rw-r--r--security/Makefile1
-rw-r--r--security/capability.c21
-rw-r--r--security/commoncap.c74
-rw-r--r--security/integrity/ima/Kconfig1
-rw-r--r--security/integrity/ima/ima_iint.c4
-rw-r--r--security/lsm_audit.c4
-rw-r--r--security/min_addr.c3
-rw-r--r--security/root_plug.c90
-rw-r--r--security/security.c61
-rw-r--r--security/selinux/.gitignore2
-rw-r--r--security/selinux/Makefile10
-rw-r--r--security/selinux/avc.c78
-rw-r--r--security/selinux/hooks.c25
-rw-r--r--security/selinux/include/av_inherit.h34
-rw-r--r--security/selinux/include/av_perm_to_string.h183
-rw-r--r--security/selinux/include/av_permissions.h870
-rw-r--r--security/selinux/include/avc_ss.h21
-rw-r--r--security/selinux/include/class_to_string.h80
-rw-r--r--security/selinux/include/classmap.h150
-rw-r--r--security/selinux/include/common_perm_to_string.h58
-rw-r--r--security/selinux/include/flask.h91
-rw-r--r--security/selinux/include/security.h13
-rw-r--r--security/selinux/selinuxfs.c4
-rw-r--r--security/selinux/ss/Makefile2
-rw-r--r--security/selinux/ss/mls.c2
-rw-r--r--security/selinux/ss/policydb.c47
-rw-r--r--security/selinux/ss/policydb.h7
-rw-r--r--security/selinux/ss/services.c562
-rw-r--r--security/tomoyo/common.c200
-rw-r--r--security/tomoyo/common.h4
-rw-r--r--security/tomoyo/realpath.c13
-rw-r--r--sound/arm/aaci.c6
-rw-r--r--sound/oss/hex2hex.c2
-rw-r--r--sound/pci/hda/patch_nvhdmi.c2
-rw-r--r--sound/pci/hda/patch_realtek.c6
-rw-r--r--sound/pci/hda/patch_sigmatel.c2
-rw-r--r--sound/pci/ice1712/ice1712.h4
-rw-r--r--sound/pci/ice1712/prodigy_hifi.c2
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf.c6
-rw-r--r--sound/pcmcia/vx/vxpocket.c6
-rw-r--r--sound/soc/codecs/tlv320aic23.c5
-rw-r--r--sound/soc/omap/omap3evm.c2
-rw-r--r--sound/soc/omap/omap3pandora.c3
-rw-r--r--sound/soc/soc-dapm.c20
-rw-r--r--sound/usb/usbmixer.c9
739 files changed, 14656 insertions, 11819 deletions
diff --git a/.gitignore b/.gitignore
index b93fb7eff942..946c7ec5c922 100644
--- a/.gitignore
+++ b/.gitignore
@@ -25,6 +25,7 @@
25*.elf 25*.elf
26*.bin 26*.bin
27*.gz 27*.gz
28*.bz2
28*.lzma 29*.lzma
29*.patch 30*.patch
30*.gcno 31*.gcno
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index 187bbf10c923..8608fd85e921 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -1,185 +1,10 @@
1CONFIG_RCU_TRACE debugfs Files and Formats 1CONFIG_RCU_TRACE debugfs Files and Formats
2 2
3 3
4The rcupreempt and rcutree implementations of RCU provide debugfs trace 4The rcutree implementation of RCU provides debugfs trace output that
5output that summarizes counters and state. This information is useful for 5summarizes counters and state. This information is useful for debugging
6debugging RCU itself, and can sometimes also help to debug abuses of RCU. 6RCU itself, and can sometimes also help to debug abuses of RCU.
7Note that the rcuclassic implementation of RCU does not provide debugfs 7The following sections describe the debugfs files and formats.
8trace output.
9
10The following sections describe the debugfs files and formats for
11preemptable RCU (rcupreempt) and hierarchical RCU (rcutree).
12
13
14Preemptable RCU debugfs Files and Formats
15
16This implementation of RCU provides three debugfs files under the
17top-level directory RCU: rcu/rcuctrs (which displays the per-CPU
18counters used by preemptable RCU) rcu/rcugp (which displays grace-period
19counters), and rcu/rcustats (which internal counters for debugging RCU).
20
21The output of "cat rcu/rcuctrs" looks as follows:
22
23CPU last cur F M
24 0 5 -5 0 0
25 1 -1 0 0 0
26 2 0 1 0 0
27 3 0 1 0 0
28 4 0 1 0 0
29 5 0 1 0 0
30 6 0 2 0 0
31 7 0 -1 0 0
32 8 0 1 0 0
33ggp = 26226, state = waitzero
34
35The per-CPU fields are as follows:
36
37o "CPU" gives the CPU number. Offline CPUs are not displayed.
38
39o "last" gives the value of the counter that is being decremented
40 for the current grace period phase. In the example above,
41 the counters sum to 4, indicating that there are still four
42 RCU read-side critical sections still running that started
43 before the last counter flip.
44
45o "cur" gives the value of the counter that is currently being
46 both incremented (by rcu_read_lock()) and decremented (by
47 rcu_read_unlock()). In the example above, the counters sum to
48 1, indicating that there is only one RCU read-side critical section
49 still running that started after the last counter flip.
50
51o "F" indicates whether RCU is waiting for this CPU to acknowledge
52 a counter flip. In the above example, RCU is not waiting on any,
53 which is consistent with the state being "waitzero" rather than
54 "waitack".
55
56o "M" indicates whether RCU is waiting for this CPU to execute a
57 memory barrier. In the above example, RCU is not waiting on any,
58 which is consistent with the state being "waitzero" rather than
59 "waitmb".
60
61o "ggp" is the global grace-period counter.
62
63o "state" is the RCU state, which can be one of the following:
64
65 o "idle": there is no grace period in progress.
66
67 o "waitack": RCU just incremented the global grace-period
68 counter, which has the effect of reversing the roles of
69 the "last" and "cur" counters above, and is waiting for
70 all the CPUs to acknowledge the flip. Once the flip has
71 been acknowledged, CPUs will no longer be incrementing
72 what are now the "last" counters, so that their sum will
73 decrease monotonically down to zero.
74
75 o "waitzero": RCU is waiting for the sum of the "last" counters
76 to decrease to zero.
77
78 o "waitmb": RCU is waiting for each CPU to execute a memory
79 barrier, which ensures that instructions from a given CPU's
80 last RCU read-side critical section cannot be reordered
81 with instructions following the memory-barrier instruction.
82
83The output of "cat rcu/rcugp" looks as follows:
84
85oldggp=48870 newggp=48873
86
87Note that reading from this file provokes a synchronize_rcu(). The
88"oldggp" value is that of "ggp" from rcu/rcuctrs above, taken before
89executing the synchronize_rcu(), and the "newggp" value is also the
90"ggp" value, but taken after the synchronize_rcu() command returns.
91
92
93The output of "cat rcu/rcugp" looks as follows:
94
95na=1337955 nl=40 wa=1337915 wl=44 da=1337871 dl=0 dr=1337871 di=1337871
961=50989 e1=6138 i1=49722 ie1=82 g1=49640 a1=315203 ae1=265563 a2=49640
97z1=1401244 ze1=1351605 z2=49639 m1=5661253 me1=5611614 m2=49639
98
99These are counters tracking internal preemptable-RCU events, however,
100some of them may be useful for debugging algorithms using RCU. In
101particular, the "nl", "wl", and "dl" values track the number of RCU
102callbacks in various states. The fields are as follows:
103
104o "na" is the total number of RCU callbacks that have been enqueued
105 since boot.
106
107o "nl" is the number of RCU callbacks waiting for the previous
108 grace period to end so that they can start waiting on the next
109 grace period.
110
111o "wa" is the total number of RCU callbacks that have started waiting
112 for a grace period since boot. "na" should be roughly equal to
113 "nl" plus "wa".
114
115o "wl" is the number of RCU callbacks currently waiting for their
116 grace period to end.
117
118o "da" is the total number of RCU callbacks whose grace periods
119 have completed since boot. "wa" should be roughly equal to
120 "wl" plus "da".
121
122o "dr" is the total number of RCU callbacks that have been removed
123 from the list of callbacks ready to invoke. "dr" should be roughly
124 equal to "da".
125
126o "di" is the total number of RCU callbacks that have been invoked
127 since boot. "di" should be roughly equal to "da", though some
128 early versions of preemptable RCU had a bug so that only the
129 last CPU's count of invocations was displayed, rather than the
130 sum of all CPU's counts.
131
132o "1" is the number of calls to rcu_try_flip(). This should be
133 roughly equal to the sum of "e1", "i1", "a1", "z1", and "m1"
134 described below. In other words, the number of times that
135 the state machine is visited should be equal to the sum of the
136 number of times that each state is visited plus the number of
137 times that the state-machine lock acquisition failed.
138
139o "e1" is the number of times that rcu_try_flip() was unable to
140 acquire the fliplock.
141
142o "i1" is the number of calls to rcu_try_flip_idle().
143
144o "ie1" is the number of times rcu_try_flip_idle() exited early
145 due to the calling CPU having no work for RCU.
146
147o "g1" is the number of times that rcu_try_flip_idle() decided
148 to start a new grace period. "i1" should be roughly equal to
149 "ie1" plus "g1".
150
151o "a1" is the number of calls to rcu_try_flip_waitack().
152
153o "ae1" is the number of times that rcu_try_flip_waitack() found
154 that at least one CPU had not yet acknowledge the new grace period
155 (AKA "counter flip").
156
157o "a2" is the number of time rcu_try_flip_waitack() found that
158 all CPUs had acknowledged. "a1" should be roughly equal to
159 "ae1" plus "a2". (This particular output was collected on
160 a 128-CPU machine, hence the smaller-than-usual fraction of
161 calls to rcu_try_flip_waitack() finding all CPUs having already
162 acknowledged.)
163
164o "z1" is the number of calls to rcu_try_flip_waitzero().
165
166o "ze1" is the number of times that rcu_try_flip_waitzero() found
167 that not all of the old RCU read-side critical sections had
168 completed.
169
170o "z2" is the number of times that rcu_try_flip_waitzero() finds
171 the sum of the counters equal to zero, in other words, that
172 all of the old RCU read-side critical sections had completed.
173 The value of "z1" should be roughly equal to "ze1" plus
174 "z2".
175
176o "m1" is the number of calls to rcu_try_flip_waitmb().
177
178o "me1" is the number of times that rcu_try_flip_waitmb() finds
179 that at least one CPU has not yet executed a memory barrier.
180
181o "m2" is the number of times that rcu_try_flip_waitmb() finds that
182 all CPUs have executed a memory barrier.
183 8
184 9
185Hierarchical RCU debugfs Files and Formats 10Hierarchical RCU debugfs Files and Formats
@@ -210,9 +35,10 @@ rcu_bh:
210 6 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=859/1 dn=0 df=15 of=0 ri=0 ql=0 b=10 35 6 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=859/1 dn=0 df=15 of=0 ri=0 ql=0 b=10
211 7 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=3761/1 dn=0 df=15 of=0 ri=0 ql=0 b=10 36 7 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=3761/1 dn=0 df=15 of=0 ri=0 ql=0 b=10
212 37
213The first section lists the rcu_data structures for rcu, the second for 38The first section lists the rcu_data structures for rcu_sched, the second
214rcu_bh. Each section has one line per CPU, or eight for this 8-CPU system. 39for rcu_bh. Note that CONFIG_TREE_PREEMPT_RCU kernels will have an
215The fields are as follows: 40additional section for rcu_preempt. Each section has one line per CPU,
41or eight for this 8-CPU system. The fields are as follows:
216 42
217o The number at the beginning of each line is the CPU number. 43o The number at the beginning of each line is the CPU number.
218 CPUs numbers followed by an exclamation mark are offline, 44 CPUs numbers followed by an exclamation mark are offline,
@@ -223,9 +49,9 @@ o The number at the beginning of each line is the CPU number.
223 49
224o "c" is the count of grace periods that this CPU believes have 50o "c" is the count of grace periods that this CPU believes have
225 completed. CPUs in dynticks idle mode may lag quite a ways 51 completed. CPUs in dynticks idle mode may lag quite a ways
226 behind, for example, CPU 4 under "rcu" above, which has slept 52 behind, for example, CPU 4 under "rcu_sched" above, which has
227 through the past 25 RCU grace periods. It is not unusual to 53 slept through the past 25 RCU grace periods. It is not unusual
228 see CPUs lagging by thousands of grace periods. 54 to see CPUs lagging by thousands of grace periods.
229 55
230o "g" is the count of grace periods that this CPU believes have 56o "g" is the count of grace periods that this CPU believes have
231 started. Again, CPUs in dynticks idle mode may lag behind. 57 started. Again, CPUs in dynticks idle mode may lag behind.
@@ -308,8 +134,10 @@ The output of "cat rcu/rcugp" looks as follows:
308rcu_sched: completed=33062 gpnum=33063 134rcu_sched: completed=33062 gpnum=33063
309rcu_bh: completed=464 gpnum=464 135rcu_bh: completed=464 gpnum=464
310 136
311Again, this output is for both "rcu" and "rcu_bh". The fields are 137Again, this output is for both "rcu_sched" and "rcu_bh". Note that
312taken from the rcu_state structure, and are as follows: 138kernels built with CONFIG_TREE_PREEMPT_RCU will have an additional
139"rcu_preempt" line. The fields are taken from the rcu_state structure,
140and are as follows:
313 141
314o "completed" is the number of grace periods that have completed. 142o "completed" is the number of grace periods that have completed.
315 It is comparable to the "c" field from rcu/rcudata in that a 143 It is comparable to the "c" field from rcu/rcudata in that a
@@ -324,23 +152,24 @@ o "gpnum" is the number of grace periods that have started. It is
324 If these two fields are equal (as they are for "rcu_bh" above), 152 If these two fields are equal (as they are for "rcu_bh" above),
325 then there is no grace period in progress, in other words, RCU 153 then there is no grace period in progress, in other words, RCU
326 is idle. On the other hand, if the two fields differ (as they 154 is idle. On the other hand, if the two fields differ (as they
327 do for "rcu" above), then an RCU grace period is in progress. 155 do for "rcu_sched" above), then an RCU grace period is in progress.
328 156
329 157
330The output of "cat rcu/rcuhier" looks as follows, with very long lines: 158The output of "cat rcu/rcuhier" looks as follows, with very long lines:
331 159
332c=6902 g=6903 s=2 jfq=3 j=72c7 nfqs=13142/nfqsng=0(13142) fqlh=6 160c=6902 g=6903 s=2 jfq=3 j=72c7 nfqs=13142/nfqsng=0(13142) fqlh=6 oqlen=0
3331/1 0:127 ^0 1611/1 .>. 0:127 ^0
3343/3 0:35 ^0 0/0 36:71 ^1 0/0 72:107 ^2 0/0 108:127 ^3 1623/3 .>. 0:35 ^0 0/0 .>. 36:71 ^1 0/0 .>. 72:107 ^2 0/0 .>. 108:127 ^3
3353/3f 0:5 ^0 2/3 6:11 ^1 0/0 12:17 ^2 0/0 18:23 ^3 0/0 24:29 ^4 0/0 30:35 ^5 0/0 36:41 ^0 0/0 42:47 ^1 0/0 48:53 ^2 0/0 54:59 ^3 0/0 60:65 ^4 0/0 66:71 ^5 0/0 72:77 ^0 0/0 78:83 ^1 0/0 84:89 ^2 0/0 90:95 ^3 0/0 96:101 ^4 0/0 102:107 ^5 0/0 108:113 ^0 0/0 114:119 ^1 0/0 120:125 ^2 0/0 126:127 ^3 1633/3f .>. 0:5 ^0 2/3 .>. 6:11 ^1 0/0 .>. 12:17 ^2 0/0 .>. 18:23 ^3 0/0 .>. 24:29 ^4 0/0 .>. 30:35 ^5 0/0 .>. 36:41 ^0 0/0 .>. 42:47 ^1 0/0 .>. 48:53 ^2 0/0 .>. 54:59 ^3 0/0 .>. 60:65 ^4 0/0 .>. 66:71 ^5 0/0 .>. 72:77 ^0 0/0 .>. 78:83 ^1 0/0 .>. 84:89 ^2 0/0 .>. 90:95 ^3 0/0 .>. 96:101 ^4 0/0 .>. 102:107 ^5 0/0 .>. 108:113 ^0 0/0 .>. 114:119 ^1 0/0 .>. 120:125 ^2 0/0 .>. 126:127 ^3
336rcu_bh: 164rcu_bh:
337c=-226 g=-226 s=1 jfq=-5701 j=72c7 nfqs=88/nfqsng=0(88) fqlh=0 165c=-226 g=-226 s=1 jfq=-5701 j=72c7 nfqs=88/nfqsng=0(88) fqlh=0 oqlen=0
3380/1 0:127 ^0 1660/1 .>. 0:127 ^0
3390/3 0:35 ^0 0/0 36:71 ^1 0/0 72:107 ^2 0/0 108:127 ^3 1670/3 .>. 0:35 ^0 0/0 .>. 36:71 ^1 0/0 .>. 72:107 ^2 0/0 .>. 108:127 ^3
3400/3f 0:5 ^0 0/3 6:11 ^1 0/0 12:17 ^2 0/0 18:23 ^3 0/0 24:29 ^4 0/0 30:35 ^5 0/0 36:41 ^0 0/0 42:47 ^1 0/0 48:53 ^2 0/0 54:59 ^3 0/0 60:65 ^4 0/0 66:71 ^5 0/0 72:77 ^0 0/0 78:83 ^1 0/0 84:89 ^2 0/0 90:95 ^3 0/0 96:101 ^4 0/0 102:107 ^5 0/0 108:113 ^0 0/0 114:119 ^1 0/0 120:125 ^2 0/0 126:127 ^3 1680/3f .>. 0:5 ^0 0/3 .>. 6:11 ^1 0/0 .>. 12:17 ^2 0/0 .>. 18:23 ^3 0/0 .>. 24:29 ^4 0/0 .>. 30:35 ^5 0/0 .>. 36:41 ^0 0/0 .>. 42:47 ^1 0/0 .>. 48:53 ^2 0/0 .>. 54:59 ^3 0/0 .>. 60:65 ^4 0/0 .>. 66:71 ^5 0/0 .>. 72:77 ^0 0/0 .>. 78:83 ^1 0/0 .>. 84:89 ^2 0/0 .>. 90:95 ^3 0/0 .>. 96:101 ^4 0/0 .>. 102:107 ^5 0/0 .>. 108:113 ^0 0/0 .>. 114:119 ^1 0/0 .>. 120:125 ^2 0/0 .>. 126:127 ^3
341 169
342This is once again split into "rcu" and "rcu_bh" portions. The fields are 170This is once again split into "rcu_sched" and "rcu_bh" portions,
343as follows: 171and CONFIG_TREE_PREEMPT_RCU kernels will again have an additional
172"rcu_preempt" section. The fields are as follows:
344 173
345o "c" is exactly the same as "completed" under rcu/rcugp. 174o "c" is exactly the same as "completed" under rcu/rcugp.
346 175
@@ -372,6 +201,11 @@ o "fqlh" is the number of calls to force_quiescent_state() that
372 exited immediately (without even being counted in nfqs above) 201 exited immediately (without even being counted in nfqs above)
373 due to contention on ->fqslock. 202 due to contention on ->fqslock.
374 203
204o "oqlen" is the number of callbacks on the "orphan" callback
205 list. RCU callbacks are placed on this list by CPUs going
206 offline, and are "adopted" either by the CPU helping the outgoing
207 CPU or by the next rcu_barrier*() call, whichever comes first.
208
375o Each element of the form "1/1 0:127 ^0" represents one struct 209o Each element of the form "1/1 0:127 ^0" represents one struct
376 rcu_node. Each line represents one level of the hierarchy, from 210 rcu_node. Each line represents one level of the hierarchy, from
377 root to leaves. It is best to think of the rcu_data structures 211 root to leaves. It is best to think of the rcu_data structures
@@ -379,7 +213,7 @@ o Each element of the form "1/1 0:127 ^0" represents one struct
379 might be either one, two, or three levels of rcu_node structures, 213 might be either one, two, or three levels of rcu_node structures,
380 depending on the relationship between CONFIG_RCU_FANOUT and 214 depending on the relationship between CONFIG_RCU_FANOUT and
381 CONFIG_NR_CPUS. 215 CONFIG_NR_CPUS.
382 216
383 o The numbers separated by the "/" are the qsmask followed 217 o The numbers separated by the "/" are the qsmask followed
384 by the qsmaskinit. The qsmask will have one bit 218 by the qsmaskinit. The qsmask will have one bit
385 set for each entity in the next lower level that 219 set for each entity in the next lower level that
@@ -389,10 +223,19 @@ o Each element of the form "1/1 0:127 ^0" represents one struct
389 The value of qsmaskinit is assigned to that of qsmask 223 The value of qsmaskinit is assigned to that of qsmask
390 at the beginning of each grace period. 224 at the beginning of each grace period.
391 225
392 For example, for "rcu", the qsmask of the first entry 226 For example, for "rcu_sched", the qsmask of the first
393 of the lowest level is 0x14, meaning that we are still 227 entry of the lowest level is 0x14, meaning that we
394 waiting for CPUs 2 and 4 to check in for the current 228 are still waiting for CPUs 2 and 4 to check in for the
395 grace period. 229 current grace period.
230
231 o The characters separated by the ">" indicate the state
232 of the blocked-tasks lists. A "T" preceding the ">"
233 indicates that at least one task blocked in an RCU
234 read-side critical section blocks the current grace
235 period, while a "." preceding the ">" indicates otherwise.
236 The character following the ">" indicates similarly for
237 the next grace period. A "T" should appear in this
238 field only for rcu-preempt.
396 239
397 o The numbers separated by the ":" are the range of CPUs 240 o The numbers separated by the ":" are the range of CPUs
398 served by this struct rcu_node. This can be helpful 241 served by this struct rcu_node. This can be helpful
@@ -431,8 +274,9 @@ rcu_bh:
431 6 np=120834 qsp=9902 cbr=0 cng=0 gpc=6 gps=3 nf=2 nn=110921 274 6 np=120834 qsp=9902 cbr=0 cng=0 gpc=6 gps=3 nf=2 nn=110921
432 7 np=144888 qsp=26336 cbr=0 cng=0 gpc=8 gps=2 nf=0 nn=118542 275 7 np=144888 qsp=26336 cbr=0 cng=0 gpc=8 gps=2 nf=0 nn=118542
433 276
434As always, this is once again split into "rcu" and "rcu_bh" portions. 277As always, this is once again split into "rcu_sched" and "rcu_bh"
435The fields are as follows: 278portions, with CONFIG_TREE_PREEMPT_RCU kernels having an additional
279"rcu_preempt" section. The fields are as follows:
436 280
437o "np" is the number of times that __rcu_pending() has been invoked 281o "np" is the number of times that __rcu_pending() has been invoked
438 for the corresponding flavor of RCU. 282 for the corresponding flavor of RCU.
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index e41a7fecf0d3..d542ca243b80 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -830,7 +830,7 @@ sched: Critical sections Grace period Barrier
830SRCU: Critical sections Grace period Barrier 830SRCU: Critical sections Grace period Barrier
831 831
832 srcu_read_lock synchronize_srcu N/A 832 srcu_read_lock synchronize_srcu N/A
833 srcu_read_unlock 833 srcu_read_unlock synchronize_srcu_expedited
834 834
835SRCU: Initialization/cleanup 835SRCU: Initialization/cleanup
836 init_srcu_struct 836 init_srcu_struct
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index e1efc400bed6..e151b2a36267 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -65,6 +65,7 @@ aicdb.h*
65asm-offsets.h 65asm-offsets.h
66asm_offsets.h 66asm_offsets.h
67autoconf.h* 67autoconf.h*
68av_permissions.h
68bbootsect 69bbootsect
69bin2c 70bin2c
70binkernel.spec 71binkernel.spec
@@ -95,12 +96,14 @@ docproc
95elf2ecoff 96elf2ecoff
96elfconfig.h* 97elfconfig.h*
97fixdep 98fixdep
99flask.h
98fore200e_mkfirm 100fore200e_mkfirm
99fore200e_pca_fw.c* 101fore200e_pca_fw.c*
100gconf 102gconf
101gen-devlist 103gen-devlist
102gen_crc32table 104gen_crc32table
103gen_init_cpio 105gen_init_cpio
106genheaders
104genksyms 107genksyms
105*_gray256.c 108*_gray256.c
106ihex2fw 109ihex2fw
diff --git a/Documentation/fb/framebuffer.txt b/Documentation/fb/framebuffer.txt
index b3e3a0356839..fe79e3c8847d 100644
--- a/Documentation/fb/framebuffer.txt
+++ b/Documentation/fb/framebuffer.txt
@@ -312,10 +312,8 @@ and to the following documentation:
3128. Mailing list 3128. Mailing list
313--------------- 313---------------
314 314
315There are several frame buffer device related mailing lists at SourceForge: 315There is a frame buffer device related mailing list at kernel.org:
316 - linux-fbdev-announce@lists.sourceforge.net, for announcements, 316linux-fbdev@vger.kernel.org.
317 - linux-fbdev-user@lists.sourceforge.net, for generic user support,
318 - linux-fbdev-devel@lists.sourceforge.net, for project developers.
319 317
320Point your web browser to http://sourceforge.net/projects/linux-fbdev/ for 318Point your web browser to http://sourceforge.net/projects/linux-fbdev/ for
321subscription information and archive browsing. 319subscription information and archive browsing.
diff --git a/Documentation/filesystems/caching/fscache.txt b/Documentation/filesystems/caching/fscache.txt
index 9e94b9491d89..a91e2e2095b0 100644
--- a/Documentation/filesystems/caching/fscache.txt
+++ b/Documentation/filesystems/caching/fscache.txt
@@ -235,6 +235,7 @@ proc files.
235 neg=N Number of negative lookups made 235 neg=N Number of negative lookups made
236 pos=N Number of positive lookups made 236 pos=N Number of positive lookups made
237 crt=N Number of objects created by lookup 237 crt=N Number of objects created by lookup
238 tmo=N Number of lookups timed out and requeued
238 Updates n=N Number of update cookie requests seen 239 Updates n=N Number of update cookie requests seen
239 nul=N Number of upd reqs given a NULL parent 240 nul=N Number of upd reqs given a NULL parent
240 run=N Number of upd reqs granted CPU time 241 run=N Number of upd reqs granted CPU time
@@ -250,8 +251,10 @@ proc files.
250 ok=N Number of successful alloc reqs 251 ok=N Number of successful alloc reqs
251 wt=N Number of alloc reqs that waited on lookup completion 252 wt=N Number of alloc reqs that waited on lookup completion
252 nbf=N Number of alloc reqs rejected -ENOBUFS 253 nbf=N Number of alloc reqs rejected -ENOBUFS
254 int=N Number of alloc reqs aborted -ERESTARTSYS
253 ops=N Number of alloc reqs submitted 255 ops=N Number of alloc reqs submitted
254 owt=N Number of alloc reqs waited for CPU time 256 owt=N Number of alloc reqs waited for CPU time
257 abt=N Number of alloc reqs aborted due to object death
255 Retrvls n=N Number of retrieval (read) requests seen 258 Retrvls n=N Number of retrieval (read) requests seen
256 ok=N Number of successful retr reqs 259 ok=N Number of successful retr reqs
257 wt=N Number of retr reqs that waited on lookup completion 260 wt=N Number of retr reqs that waited on lookup completion
@@ -261,6 +264,7 @@ proc files.
261 oom=N Number of retr reqs failed -ENOMEM 264 oom=N Number of retr reqs failed -ENOMEM
262 ops=N Number of retr reqs submitted 265 ops=N Number of retr reqs submitted
263 owt=N Number of retr reqs waited for CPU time 266 owt=N Number of retr reqs waited for CPU time
267 abt=N Number of retr reqs aborted due to object death
264 Stores n=N Number of storage (write) requests seen 268 Stores n=N Number of storage (write) requests seen
265 ok=N Number of successful store reqs 269 ok=N Number of successful store reqs
266 agn=N Number of store reqs on a page already pending storage 270 agn=N Number of store reqs on a page already pending storage
@@ -268,12 +272,37 @@ proc files.
268 oom=N Number of store reqs failed -ENOMEM 272 oom=N Number of store reqs failed -ENOMEM
269 ops=N Number of store reqs submitted 273 ops=N Number of store reqs submitted
270 run=N Number of store reqs granted CPU time 274 run=N Number of store reqs granted CPU time
275 pgs=N Number of pages given store req processing time
276 rxd=N Number of store reqs deleted from tracking tree
277 olm=N Number of store reqs over store limit
278 VmScan nos=N Number of release reqs against pages with no pending store
279 gon=N Number of release reqs against pages stored by time lock granted
280 bsy=N Number of release reqs ignored due to in-progress store
281 can=N Number of page stores cancelled due to release req
271 Ops pend=N Number of times async ops added to pending queues 282 Ops pend=N Number of times async ops added to pending queues
272 run=N Number of times async ops given CPU time 283 run=N Number of times async ops given CPU time
273 enq=N Number of times async ops queued for processing 284 enq=N Number of times async ops queued for processing
285 can=N Number of async ops cancelled
286 rej=N Number of async ops rejected due to object lookup/create failure
274 dfr=N Number of async ops queued for deferred release 287 dfr=N Number of async ops queued for deferred release
275 rel=N Number of async ops released 288 rel=N Number of async ops released
276 gc=N Number of deferred-release async ops garbage collected 289 gc=N Number of deferred-release async ops garbage collected
290 CacheOp alo=N Number of in-progress alloc_object() cache ops
291 luo=N Number of in-progress lookup_object() cache ops
292 luc=N Number of in-progress lookup_complete() cache ops
293 gro=N Number of in-progress grab_object() cache ops
294 upo=N Number of in-progress update_object() cache ops
295 dro=N Number of in-progress drop_object() cache ops
296 pto=N Number of in-progress put_object() cache ops
297 syn=N Number of in-progress sync_cache() cache ops
298 atc=N Number of in-progress attr_changed() cache ops
299 rap=N Number of in-progress read_or_alloc_page() cache ops
300 ras=N Number of in-progress read_or_alloc_pages() cache ops
301 alp=N Number of in-progress allocate_page() cache ops
302 als=N Number of in-progress allocate_pages() cache ops
303 wrp=N Number of in-progress write_page() cache ops
304 ucp=N Number of in-progress uncache_page() cache ops
305 dsp=N Number of in-progress dissociate_pages() cache ops
277 306
278 307
279 (*) /proc/fs/fscache/histogram 308 (*) /proc/fs/fscache/histogram
@@ -299,6 +328,87 @@ proc files.
299 jiffy range covered, and the SECS field the equivalent number of seconds. 328 jiffy range covered, and the SECS field the equivalent number of seconds.
300 329
301 330
331===========
332OBJECT LIST
333===========
334
335If CONFIG_FSCACHE_OBJECT_LIST is enabled, the FS-Cache facility will maintain a
336list of all the objects currently allocated and allow them to be viewed
337through:
338
339 /proc/fs/fscache/objects
340
341This will look something like:
342
343 [root@andromeda ~]# head /proc/fs/fscache/objects
344 OBJECT PARENT STAT CHLDN OPS OOP IPR EX READS EM EV F S | NETFS_COOKIE_DEF TY FL NETFS_DATA OBJECT_KEY, AUX_DATA
345 ======== ======== ==== ===== === === === == ===== == == = = | ================ == == ================ ================
346 17e4b 2 ACTV 0 0 0 0 0 0 7b 4 0 8 | NFS.fh DT 0 ffff88001dd82820 010006017edcf8bbc93b43298fdfbe71e50b57b13a172c0117f38472, e567634700000000000000000000000063f2404a000000000000000000000000c9030000000000000000000063f2404a
347 1693a 2 ACTV 0 0 0 0 0 0 7b 4 0 8 | NFS.fh DT 0 ffff88002db23380 010006017edcf8bbc93b43298fdfbe71e50b57b1e0162c01a2df0ea6, 420ebc4a000000000000000000000000420ebc4a0000000000000000000000000e1801000000000000000000420ebc4a
348
349where the first set of columns before the '|' describe the object:
350
351 COLUMN DESCRIPTION
352 ======= ===============================================================
353 OBJECT Object debugging ID (appears as OBJ%x in some debug messages)
354 PARENT Debugging ID of parent object
355 STAT Object state
356 CHLDN Number of child objects of this object
357 OPS Number of outstanding operations on this object
358 OOP Number of outstanding child object management operations
359 IPR
360 EX Number of outstanding exclusive operations
361 READS Number of outstanding read operations
362 EM Object's event mask
363 EV Events raised on this object
364 F Object flags
365 S Object slow-work work item flags
366
367and the second set of columns describe the object's cookie, if present:
368
369 COLUMN DESCRIPTION
370 =============== =======================================================
371 NETFS_COOKIE_DEF Name of netfs cookie definition
372 TY Cookie type (IX - index, DT - data, hex - special)
373 FL Cookie flags
374 NETFS_DATA Netfs private data stored in the cookie
375 OBJECT_KEY Object key } 1 column, with separating comma
376 AUX_DATA Object aux data } presence may be configured
377
378The data shown may be filtered by attaching the a key to an appropriate keyring
379before viewing the file. Something like:
380
381 keyctl add user fscache:objlist <restrictions> @s
382
383where <restrictions> are a selection of the following letters:
384
385 K Show hexdump of object key (don't show if not given)
386 A Show hexdump of object aux data (don't show if not given)
387
388and the following paired letters:
389
390 C Show objects that have a cookie
391 c Show objects that don't have a cookie
392 B Show objects that are busy
393 b Show objects that aren't busy
394 W Show objects that have pending writes
395 w Show objects that don't have pending writes
396 R Show objects that have outstanding reads
397 r Show objects that don't have outstanding reads
398 S Show objects that have slow work queued
399 s Show objects that don't have slow work queued
400
401If neither side of a letter pair is given, then both are implied. For example:
402
403 keyctl add user fscache:objlist KB @s
404
405shows objects that are busy, and lists their object keys, but does not dump
406their auxiliary data. It also implies "CcWwRrSs", but as 'B' is given, 'b' is
407not implied.
408
409By default all objects and all fields will be shown.
410
411
302========= 412=========
303DEBUGGING 413DEBUGGING
304========= 414=========
diff --git a/Documentation/filesystems/caching/netfs-api.txt b/Documentation/filesystems/caching/netfs-api.txt
index 2666b1ed5e9e..1902c57b72ef 100644
--- a/Documentation/filesystems/caching/netfs-api.txt
+++ b/Documentation/filesystems/caching/netfs-api.txt
@@ -641,7 +641,7 @@ data file must be retired (see the relinquish cookie function below).
641 641
642Furthermore, note that this does not cancel the asynchronous read or write 642Furthermore, note that this does not cancel the asynchronous read or write
643operation started by the read/alloc and write functions, so the page 643operation started by the read/alloc and write functions, so the page
644invalidation and release functions must use: 644invalidation functions must use:
645 645
646 bool fscache_check_page_write(struct fscache_cookie *cookie, 646 bool fscache_check_page_write(struct fscache_cookie *cookie,
647 struct page *page); 647 struct page *page);
@@ -654,6 +654,25 @@ to see if a page is being written to the cache, and:
654to wait for it to finish if it is. 654to wait for it to finish if it is.
655 655
656 656
657When releasepage() is being implemented, a special FS-Cache function exists to
658manage the heuristics of coping with vmscan trying to eject pages, which may
659conflict with the cache trying to write pages to the cache (which may itself
660need to allocate memory):
661
662 bool fscache_maybe_release_page(struct fscache_cookie *cookie,
663 struct page *page,
664 gfp_t gfp);
665
666This takes the netfs cookie, and the page and gfp arguments as supplied to
667releasepage(). It will return false if the page cannot be released yet for
668some reason and if it returns true, the page has been uncached and can now be
669released.
670
671To make a page available for release, this function may wait for an outstanding
672storage request to complete, or it may attempt to cancel the storage request -
673in which case the page will not be stored in the cache this time.
674
675
657========================== 676==========================
658INDEX AND DATA FILE UPDATE 677INDEX AND DATA FILE UPDATE
659========================== 678==========================
diff --git a/Documentation/filesystems/ocfs2.txt b/Documentation/filesystems/ocfs2.txt
index c2a0871280a0..c58b9f5ba002 100644
--- a/Documentation/filesystems/ocfs2.txt
+++ b/Documentation/filesystems/ocfs2.txt
@@ -20,15 +20,16 @@ Lots of code taken from ext3 and other projects.
20Authors in alphabetical order: 20Authors in alphabetical order:
21Joel Becker <joel.becker@oracle.com> 21Joel Becker <joel.becker@oracle.com>
22Zach Brown <zach.brown@oracle.com> 22Zach Brown <zach.brown@oracle.com>
23Mark Fasheh <mark.fasheh@oracle.com> 23Mark Fasheh <mfasheh@suse.com>
24Kurt Hackel <kurt.hackel@oracle.com> 24Kurt Hackel <kurt.hackel@oracle.com>
25Tao Ma <tao.ma@oracle.com>
25Sunil Mushran <sunil.mushran@oracle.com> 26Sunil Mushran <sunil.mushran@oracle.com>
26Manish Singh <manish.singh@oracle.com> 27Manish Singh <manish.singh@oracle.com>
28Tiger Yang <tiger.yang@oracle.com>
27 29
28Caveats 30Caveats
29======= 31=======
30Features which OCFS2 does not support yet: 32Features which OCFS2 does not support yet:
31 - quotas
32 - Directory change notification (F_NOTIFY) 33 - Directory change notification (F_NOTIFY)
33 - Distributed Caching (F_SETLEASE/F_GETLEASE/break_lease) 34 - Distributed Caching (F_SETLEASE/F_GETLEASE/break_lease)
34 35
@@ -70,7 +71,6 @@ commit=nrsec (*) Ocfs2 can be told to sync all its data and metadata
70 performance. 71 performance.
71localalloc=8(*) Allows custom localalloc size in MB. If the value is too 72localalloc=8(*) Allows custom localalloc size in MB. If the value is too
72 large, the fs will silently revert it to the default. 73 large, the fs will silently revert it to the default.
73 Localalloc is not enabled for local mounts.
74localflocks This disables cluster aware flock. 74localflocks This disables cluster aware flock.
75inode64 Indicates that Ocfs2 is allowed to create inodes at 75inode64 Indicates that Ocfs2 is allowed to create inodes at
76 any location in the filesystem, including those which 76 any location in the filesystem, including those which
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index c8d1b2be28ef..51138b3dc8cc 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -85,7 +85,6 @@ parameter is applicable:
85 PPT Parallel port support is enabled. 85 PPT Parallel port support is enabled.
86 PS2 Appropriate PS/2 support is enabled. 86 PS2 Appropriate PS/2 support is enabled.
87 RAM RAM disk support is enabled. 87 RAM RAM disk support is enabled.
88 ROOTPLUG The example Root Plug LSM is enabled.
89 S390 S390 architecture is enabled. 88 S390 S390 architecture is enabled.
90 SCSI Appropriate SCSI support is enabled. 89 SCSI Appropriate SCSI support is enabled.
91 A lot of drivers has their options described inside of 90 A lot of drivers has their options described inside of
@@ -2039,8 +2038,15 @@ and is between 256 and 4096 characters. It is defined in the file
2039 2038
2040 print-fatal-signals= 2039 print-fatal-signals=
2041 [KNL] debug: print fatal signals 2040 [KNL] debug: print fatal signals
2042 print-fatal-signals=1: print segfault info to 2041
2043 the kernel console. 2042 If enabled, warn about various signal handling
2043 related application anomalies: too many signals,
2044 too many POSIX.1 timers, fatal signals causing a
2045 coredump - etc.
2046
2047 If you hit the warning due to signal overflow,
2048 you might want to try "ulimit -i unlimited".
2049
2044 default: off. 2050 default: off.
2045 2051
2046 printk.time= Show timing data prefixed to each printk message line 2052 printk.time= Show timing data prefixed to each printk message line
@@ -2171,15 +2177,6 @@ and is between 256 and 4096 characters. It is defined in the file
2171 Useful for devices that are detected asynchronously 2177 Useful for devices that are detected asynchronously
2172 (e.g. USB and MMC devices). 2178 (e.g. USB and MMC devices).
2173 2179
2174 root_plug.vendor_id=
2175 [ROOTPLUG] Override the default vendor ID
2176
2177 root_plug.product_id=
2178 [ROOTPLUG] Override the default product ID
2179
2180 root_plug.debug=
2181 [ROOTPLUG] Enable debugging output
2182
2183 rw [KNL] Mount root device read-write on boot 2180 rw [KNL] Mount root device read-write on boot
2184 2181
2185 S [KNL] Run init in single mode 2182 S [KNL] Run init in single mode
diff --git a/Documentation/pcmcia/driver-changes.txt b/Documentation/pcmcia/driver-changes.txt
index 059934363caf..446f43b309df 100644
--- a/Documentation/pcmcia/driver-changes.txt
+++ b/Documentation/pcmcia/driver-changes.txt
@@ -1,5 +1,17 @@
1This file details changes in 2.6 which affect PCMCIA card driver authors: 1This file details changes in 2.6 which affect PCMCIA card driver authors:
2 2
3* no cs_error / CS_CHECK / CONFIG_PCMCIA_DEBUG (as of 2.6.33)
4 Instead of the cs_error() callback or the CS_CHECK() macro, please use
5 Linux-style checking of return values, and -- if necessary -- debug
6 messages using "dev_dbg()" or "pr_debug()".
7
8* New CIS tuple access (as of 2.6.33)
9 Instead of pcmcia_get_{first,next}_tuple(), pcmcia_get_tuple_data() and
10 pcmcia_parse_tuple(), a driver shall use "pcmcia_get_tuple()" if it is
11 only interested in one (raw) tuple, or "pcmcia_loop_tuple()" if it is
12 interested in all tuples of one type. To decode the MAC from CISTPL_FUNCE,
13 a new helper "pcmcia_get_mac_from_cis()" was added.
14
3* New configuration loop helper (as of 2.6.28) 15* New configuration loop helper (as of 2.6.28)
4 By calling pcmcia_loop_config(), a driver can iterate over all available 16 By calling pcmcia_loop_config(), a driver can iterate over all available
5 configuration options. During a driver's probe() phase, one doesn't need 17 configuration options. During a driver's probe() phase, one doesn't need
diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt
index ebc50f808ea4..9dbf4470c7e1 100644
--- a/Documentation/slow-work.txt
+++ b/Documentation/slow-work.txt
@@ -41,6 +41,13 @@ expand files, provided the time taken to do so isn't too long.
41Operations of both types may sleep during execution, thus tying up the thread 41Operations of both types may sleep during execution, thus tying up the thread
42loaned to it. 42loaned to it.
43 43
44A further class of work item is available, based on the slow work item class:
45
46 (*) Delayed slow work items.
47
48These are slow work items that have a timer to defer queueing of the item for
49a while.
50
44 51
45THREAD-TO-CLASS ALLOCATION 52THREAD-TO-CLASS ALLOCATION
46-------------------------- 53--------------------------
@@ -64,9 +71,11 @@ USING SLOW WORK ITEMS
64Firstly, a module or subsystem wanting to make use of slow work items must 71Firstly, a module or subsystem wanting to make use of slow work items must
65register its interest: 72register its interest:
66 73
67 int ret = slow_work_register_user(); 74 int ret = slow_work_register_user(struct module *module);
68 75
69This will return 0 if successful, or a -ve error upon failure. 76This will return 0 if successful, or a -ve error upon failure. The module
77pointer should be the module interested in using this facility (almost
78certainly THIS_MODULE).
70 79
71 80
72Slow work items may then be set up by: 81Slow work items may then be set up by:
@@ -93,6 +102,10 @@ Slow work items may then be set up by:
93 102
94 or: 103 or:
95 104
105 delayed_slow_work_init(&myitem, &myitem_ops);
106
107 or:
108
96 vslow_work_init(&myitem, &myitem_ops); 109 vslow_work_init(&myitem, &myitem_ops);
97 110
98 depending on its class. 111 depending on its class.
@@ -102,15 +115,92 @@ A suitably set up work item can then be enqueued for processing:
102 int ret = slow_work_enqueue(&myitem); 115 int ret = slow_work_enqueue(&myitem);
103 116
104This will return a -ve error if the thread pool is unable to gain a reference 117This will return a -ve error if the thread pool is unable to gain a reference
105on the item, 0 otherwise. 118on the item, 0 otherwise, or (for delayed work):
119
120 int ret = delayed_slow_work_enqueue(&myitem, my_jiffy_delay);
106 121
107 122
108The items are reference counted, so there ought to be no need for a flush 123The items are reference counted, so there ought to be no need for a flush
109operation. When all a module's slow work items have been processed, and the 124operation. But as the reference counting is optional, means to cancel
125existing work items are also included:
126
127 cancel_slow_work(&myitem);
128 cancel_delayed_slow_work(&myitem);
129
130can be used to cancel pending work. The above cancel function waits for
131existing work to have been executed (or prevent execution of them, depending
132on timing).
133
134
135When all a module's slow work items have been processed, and the
110module has no further interest in the facility, it should unregister its 136module has no further interest in the facility, it should unregister its
111interest: 137interest:
112 138
113 slow_work_unregister_user(); 139 slow_work_unregister_user(struct module *module);
140
141The module pointer is used to wait for all outstanding work items for that
142module before completing the unregistration. This prevents the put_ref() code
143from being taken away before it completes. module should almost certainly be
144THIS_MODULE.
145
146
147================
148HELPER FUNCTIONS
149================
150
151The slow-work facility provides a function by which it can be determined
152whether or not an item is queued for later execution:
153
154 bool queued = slow_work_is_queued(struct slow_work *work);
155
156If it returns false, then the item is not on the queue (it may be executing
157with a requeue pending). This can be used to work out whether an item on which
158another depends is on the queue, thus allowing a dependent item to be queued
159after it.
160
161If the above shows an item on which another depends not to be queued, then the
162owner of the dependent item might need to wait. However, to avoid locking up
163the threads unnecessarily be sleeping in them, it can make sense under some
164circumstances to return the work item to the queue, thus deferring it until
165some other items have had a chance to make use of the yielded thread.
166
167To yield a thread and defer an item, the work function should simply enqueue
168the work item again and return. However, this doesn't work if there's nothing
169actually on the queue, as the thread just vacated will jump straight back into
170the item's work function, thus busy waiting on a CPU.
171
172Instead, the item should use the thread to wait for the dependency to go away,
173but rather than using schedule() or schedule_timeout() to sleep, it should use
174the following function:
175
176 bool requeue = slow_work_sleep_till_thread_needed(
177 struct slow_work *work,
178 signed long *_timeout);
179
180This will add a second wait and then sleep, such that it will be woken up if
181either something appears on the queue that could usefully make use of the
182thread - and behind which this item can be queued, or if the event the caller
183set up to wait for happens. True will be returned if something else appeared
184on the queue and this work function should perhaps return, of false if
185something else woke it up. The timeout is as for schedule_timeout().
186
187For example:
188
189 wq = bit_waitqueue(&my_flags, MY_BIT);
190 init_wait(&wait);
191 requeue = false;
192 do {
193 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
194 if (!test_bit(MY_BIT, &my_flags))
195 break;
196 requeue = slow_work_sleep_till_thread_needed(&my_work,
197 &timeout);
198 } while (timeout > 0 && !requeue);
199 finish_wait(wq, &wait);
200 if (!test_bit(MY_BIT, &my_flags)
201 goto do_my_thing;
202 if (requeue)
203 return; // to slow_work
114 204
115 205
116=============== 206===============
@@ -118,7 +208,8 @@ ITEM OPERATIONS
118=============== 208===============
119 209
120Each work item requires a table of operations of type struct slow_work_ops. 210Each work item requires a table of operations of type struct slow_work_ops.
121All members are required: 211Only ->execute() is required; the getting and putting of a reference and the
212describing of an item are all optional.
122 213
123 (*) Get a reference on an item: 214 (*) Get a reference on an item:
124 215
@@ -148,6 +239,16 @@ All members are required:
148 This should perform the work required of the item. It may sleep, it may 239 This should perform the work required of the item. It may sleep, it may
149 perform disk I/O and it may wait for locks. 240 perform disk I/O and it may wait for locks.
150 241
242 (*) View an item through /proc:
243
244 void (*desc)(struct slow_work *work, struct seq_file *m);
245
246 If supplied, this should print to 'm' a small string describing the work
247 the item is to do. This should be no more than about 40 characters, and
248 shouldn't include a newline character.
249
250 See the 'Viewing executing and queued items' section below.
251
151 252
152================== 253==================
153POOL CONFIGURATION 254POOL CONFIGURATION
@@ -172,3 +273,50 @@ The slow-work thread pool has a number of configurables:
172 is bounded to between 1 and one fewer than the number of active threads. 273 is bounded to between 1 and one fewer than the number of active threads.
173 This ensures there is always at least one thread that can process very 274 This ensures there is always at least one thread that can process very
174 slow work items, and always at least one thread that won't. 275 slow work items, and always at least one thread that won't.
276
277
278==================================
279VIEWING EXECUTING AND QUEUED ITEMS
280==================================
281
282If CONFIG_SLOW_WORK_DEBUG is enabled, a debugfs file is made available:
283
284 /sys/kernel/debug/slow_work/runqueue
285
286through which the list of work items being executed and the queues of items to
287be executed may be viewed. The owner of a work item is given the chance to
288add some information of its own.
289
290The contents look something like the following:
291
292 THR PID ITEM ADDR FL MARK DESC
293 === ===== ================ == ===== ==========
294 0 3005 ffff880023f52348 a 952ms FSC: OBJ17d3: LOOK
295 1 3006 ffff880024e33668 2 160ms FSC: OBJ17e5 OP60d3b: Write1/Store fl=2
296 2 3165 ffff8800296dd180 a 424ms FSC: OBJ17e4: LOOK
297 3 4089 ffff8800262c8d78 a 212ms FSC: OBJ17ea: CRTN
298 4 4090 ffff88002792bed8 2 388ms FSC: OBJ17e8 OP60d36: Write1/Store fl=2
299 5 4092 ffff88002a0ef308 2 388ms FSC: OBJ17e7 OP60d2e: Write1/Store fl=2
300 6 4094 ffff88002abaf4b8 2 132ms FSC: OBJ17e2 OP60d4e: Write1/Store fl=2
301 7 4095 ffff88002bb188e0 a 388ms FSC: OBJ17e9: CRTN
302 vsq - ffff880023d99668 1 308ms FSC: OBJ17e0 OP60f91: Write1/EnQ fl=2
303 vsq - ffff8800295d1740 1 212ms FSC: OBJ16be OP4d4b6: Write1/EnQ fl=2
304 vsq - ffff880025ba3308 1 160ms FSC: OBJ179a OP58dec: Write1/EnQ fl=2
305 vsq - ffff880024ec83e0 1 160ms FSC: OBJ17ae OP599f2: Write1/EnQ fl=2
306 vsq - ffff880026618e00 1 160ms FSC: OBJ17e6 OP60d33: Write1/EnQ fl=2
307 vsq - ffff880025a2a4b8 1 132ms FSC: OBJ16a2 OP4d583: Write1/EnQ fl=2
308 vsq - ffff880023cbe6d8 9 212ms FSC: OBJ17eb: LOOK
309 vsq - ffff880024d37590 9 212ms FSC: OBJ17ec: LOOK
310 vsq - ffff880027746cb0 9 212ms FSC: OBJ17ed: LOOK
311 vsq - ffff880024d37ae8 9 212ms FSC: OBJ17ee: LOOK
312 vsq - ffff880024d37cb0 9 212ms FSC: OBJ17ef: LOOK
313 vsq - ffff880025036550 9 212ms FSC: OBJ17f0: LOOK
314 vsq - ffff8800250368e0 9 212ms FSC: OBJ17f1: LOOK
315 vsq - ffff880025036aa8 9 212ms FSC: OBJ17f2: LOOK
316
317In the 'THR' column, executing items show the thread they're occupying and
318queued threads indicate which queue they're on. 'PID' shows the process ID of
319a slow-work thread that's executing something. 'FL' shows the work item flags.
320'MARK' indicates how long since an item was queued or began executing. Lastly,
321the 'DESC' column permits the owner of an item to give some information.
322
diff --git a/Documentation/vm/page-types.c b/Documentation/vm/page-types.c
index 3ec4f2a22585..4793c6aac733 100644
--- a/Documentation/vm/page-types.c
+++ b/Documentation/vm/page-types.c
@@ -218,7 +218,7 @@ static void fatal(const char *x, ...)
218 exit(EXIT_FAILURE); 218 exit(EXIT_FAILURE);
219} 219}
220 220
221int checked_open(const char *pathname, int flags) 221static int checked_open(const char *pathname, int flags)
222{ 222{
223 int fd = open(pathname, flags); 223 int fd = open(pathname, flags);
224 224
diff --git a/MAINTAINERS b/MAINTAINERS
index 81d68d5b7eea..4f96ac81089c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -512,10 +512,32 @@ W: http://www.arm.linux.org.uk/
512S: Maintained 512S: Maintained
513F: arch/arm/ 513F: arch/arm/
514 514
515ARM PRIMECELL AACI PL041 DRIVER
516M: Russell King <linux@arm.linux.org.uk>
517S: Maintained
518F: sound/arm/aaci.*
519
520ARM PRIMECELL CLCD PL110 DRIVER
521M: Russell King <linux@arm.linux.org.uk>
522S: Maintained
523F: drivers/video/amba-clcd.*
524
525ARM PRIMECELL KMI PL050 DRIVER
526M: Russell King <linux@arm.linux.org.uk>
527S: Maintained
528F: drivers/input/serio/ambakmi.*
529F: include/linux/amba/kmi.h
530
515ARM PRIMECELL MMCI PL180/1 DRIVER 531ARM PRIMECELL MMCI PL180/1 DRIVER
516S: Orphan 532S: Orphan
517F: drivers/mmc/host/mmci.* 533F: drivers/mmc/host/mmci.*
518 534
535ARM PRIMECELL BUS SUPPORT
536M: Russell King <linux@arm.linux.org.uk>
537S: Maintained
538F: drivers/amba/
539F: include/linux/amba/bus.h
540
519ARM/ADI ROADRUNNER MACHINE SUPPORT 541ARM/ADI ROADRUNNER MACHINE SUPPORT
520M: Lennert Buytenhek <kernel@wantstofly.org> 542M: Lennert Buytenhek <kernel@wantstofly.org>
521L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 543L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1027,7 +1049,7 @@ F: drivers/serial/atmel_serial.c
1027 1049
1028ATMEL LCDFB DRIVER 1050ATMEL LCDFB DRIVER
1029M: Nicolas Ferre <nicolas.ferre@atmel.com> 1051M: Nicolas Ferre <nicolas.ferre@atmel.com>
1030L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 1052L: linux-fbdev@vger.kernel.org
1031S: Maintained 1053S: Maintained
1032F: drivers/video/atmel_lcdfb.c 1054F: drivers/video/atmel_lcdfb.c
1033F: include/video/atmel_lcdc.h 1055F: include/video/atmel_lcdc.h
@@ -2113,7 +2135,7 @@ F: drivers/net/wan/dlci.c
2113F: drivers/net/wan/sdla.c 2135F: drivers/net/wan/sdla.c
2114 2136
2115FRAMEBUFFER LAYER 2137FRAMEBUFFER LAYER
2116L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 2138L: linux-fbdev@vger.kernel.org
2117W: http://linux-fbdev.sourceforge.net/ 2139W: http://linux-fbdev.sourceforge.net/
2118S: Orphan 2140S: Orphan
2119F: Documentation/fb/ 2141F: Documentation/fb/
@@ -2136,7 +2158,7 @@ F: drivers/i2c/busses/i2c-cpm.c
2136 2158
2137FREESCALE IMX / MXC FRAMEBUFFER DRIVER 2159FREESCALE IMX / MXC FRAMEBUFFER DRIVER
2138M: Sascha Hauer <kernel@pengutronix.de> 2160M: Sascha Hauer <kernel@pengutronix.de>
2139L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 2161L: linux-fbdev@vger.kernel.org
2140L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 2162L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
2141S: Maintained 2163S: Maintained
2142F: arch/arm/plat-mxc/include/mach/imxfb.h 2164F: arch/arm/plat-mxc/include/mach/imxfb.h
@@ -2312,6 +2334,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
2312S: Maintained 2334S: Maintained
2313F: drivers/media/video/gspca/finepix.c 2335F: drivers/media/video/gspca/finepix.c
2314 2336
2337GSPCA GL860 SUBDRIVER
2338M: Olivier Lorin <o.lorin@laposte.net>
2339L: linux-media@vger.kernel.org
2340T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
2341S: Maintained
2342F: drivers/media/video/gspca/gl860/
2343
2315GSPCA M5602 SUBDRIVER 2344GSPCA M5602 SUBDRIVER
2316M: Erik Andren <erik.andren@gmail.com> 2345M: Erik Andren <erik.andren@gmail.com>
2317L: linux-media@vger.kernel.org 2346L: linux-media@vger.kernel.org
@@ -2533,8 +2562,7 @@ S: Maintained
2533F: Documentation/i2c/ 2562F: Documentation/i2c/
2534F: drivers/i2c/ 2563F: drivers/i2c/
2535F: include/linux/i2c.h 2564F: include/linux/i2c.h
2536F: include/linux/i2c-dev.h 2565F: include/linux/i2c-*.h
2537F: include/linux/i2c-id.h
2538 2566
2539I2C-TINY-USB DRIVER 2567I2C-TINY-USB DRIVER
2540M: Till Harbaum <till@harbaum.org> 2568M: Till Harbaum <till@harbaum.org>
@@ -2635,7 +2663,7 @@ S: Supported
2635F: security/integrity/ima/ 2663F: security/integrity/ima/
2636 2664
2637IMS TWINTURBO FRAMEBUFFER DRIVER 2665IMS TWINTURBO FRAMEBUFFER DRIVER
2638L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 2666L: linux-fbdev@vger.kernel.org
2639S: Orphan 2667S: Orphan
2640F: drivers/video/imsttfb.c 2668F: drivers/video/imsttfb.c
2641 2669
@@ -2670,14 +2698,14 @@ F: drivers/input/
2670 2698
2671INTEL FRAMEBUFFER DRIVER (excluding 810 and 815) 2699INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
2672M: Sylvain Meyer <sylvain.meyer@worldonline.fr> 2700M: Sylvain Meyer <sylvain.meyer@worldonline.fr>
2673L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 2701L: linux-fbdev@vger.kernel.org
2674S: Maintained 2702S: Maintained
2675F: Documentation/fb/intelfb.txt 2703F: Documentation/fb/intelfb.txt
2676F: drivers/video/intelfb/ 2704F: drivers/video/intelfb/
2677 2705
2678INTEL 810/815 FRAMEBUFFER DRIVER 2706INTEL 810/815 FRAMEBUFFER DRIVER
2679M: Antonino Daplas <adaplas@gmail.com> 2707M: Antonino Daplas <adaplas@gmail.com>
2680L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 2708L: linux-fbdev@vger.kernel.org
2681S: Maintained 2709S: Maintained
2682F: drivers/video/i810/ 2710F: drivers/video/i810/
2683 2711
@@ -2987,11 +3015,8 @@ S: Maintained
2987F: fs/autofs4/ 3015F: fs/autofs4/
2988 3016
2989KERNEL BUILD 3017KERNEL BUILD
2990M: Sam Ravnborg <sam@ravnborg.org>
2991T: git git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild-next.git
2992T: git git://git.kernel.org/pub/scm/linux/kernel/git/sam/kbuild-fixes.git
2993L: linux-kbuild@vger.kernel.org 3018L: linux-kbuild@vger.kernel.org
2994S: Maintained 3019S: Orphan
2995F: Documentation/kbuild/ 3020F: Documentation/kbuild/
2996F: Makefile 3021F: Makefile
2997F: scripts/Makefile.* 3022F: scripts/Makefile.*
@@ -3084,9 +3109,13 @@ F: kernel/kgdb.c
3084 3109
3085KMEMCHECK 3110KMEMCHECK
3086M: Vegard Nossum <vegardno@ifi.uio.no> 3111M: Vegard Nossum <vegardno@ifi.uio.no>
3087P Pekka Enberg 3112M: Pekka Enberg <penberg@cs.helsinki.fi>
3088M: penberg@cs.helsinki.fi
3089S: Maintained 3113S: Maintained
3114F: Documentation/kmemcheck.txt
3115F: arch/x86/include/asm/kmemcheck.h
3116F: arch/x86/mm/kmemcheck/
3117F: include/linux/kmemcheck.h
3118F: mm/kmemcheck.c
3090 3119
3091KMEMLEAK 3120KMEMLEAK
3092M: Catalin Marinas <catalin.marinas@arm.com> 3121M: Catalin Marinas <catalin.marinas@arm.com>
@@ -3387,7 +3416,7 @@ S: Supported
3387 3416
3388MATROX FRAMEBUFFER DRIVER 3417MATROX FRAMEBUFFER DRIVER
3389M: Petr Vandrovec <vandrove@vc.cvut.cz> 3418M: Petr Vandrovec <vandrove@vc.cvut.cz>
3390L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 3419L: linux-fbdev@vger.kernel.org
3391S: Maintained 3420S: Maintained
3392F: drivers/video/matrox/matroxfb_* 3421F: drivers/video/matrox/matroxfb_*
3393F: include/linux/matroxfb.h 3422F: include/linux/matroxfb.h
@@ -3774,7 +3803,7 @@ F: fs/ntfs/
3774 3803
3775NVIDIA (rivafb and nvidiafb) FRAMEBUFFER DRIVER 3804NVIDIA (rivafb and nvidiafb) FRAMEBUFFER DRIVER
3776M: Antonino Daplas <adaplas@gmail.com> 3805M: Antonino Daplas <adaplas@gmail.com>
3777L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 3806L: linux-fbdev@vger.kernel.org
3778S: Maintained 3807S: Maintained
3779F: drivers/video/riva/ 3808F: drivers/video/riva/
3780F: drivers/video/nvidia/ 3809F: drivers/video/nvidia/
@@ -3809,7 +3838,7 @@ F: sound/soc/omap/
3809 3838
3810OMAP FRAMEBUFFER SUPPORT 3839OMAP FRAMEBUFFER SUPPORT
3811M: Imre Deak <imre.deak@nokia.com> 3840M: Imre Deak <imre.deak@nokia.com>
3812L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 3841L: linux-fbdev@vger.kernel.org
3813L: linux-omap@vger.kernel.org 3842L: linux-omap@vger.kernel.org
3814S: Maintained 3843S: Maintained
3815F: drivers/video/omap/ 3844F: drivers/video/omap/
@@ -4315,19 +4344,21 @@ F: include/linux/qnxtypes.h
4315 4344
4316RADEON FRAMEBUFFER DISPLAY DRIVER 4345RADEON FRAMEBUFFER DISPLAY DRIVER
4317M: Benjamin Herrenschmidt <benh@kernel.crashing.org> 4346M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
4318L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 4347L: linux-fbdev@vger.kernel.org
4319S: Maintained 4348S: Maintained
4320F: drivers/video/aty/radeon* 4349F: drivers/video/aty/radeon*
4321F: include/linux/radeonfb.h 4350F: include/linux/radeonfb.h
4322 4351
4323RAGE128 FRAMEBUFFER DISPLAY DRIVER 4352RAGE128 FRAMEBUFFER DISPLAY DRIVER
4324M: Paul Mackerras <paulus@samba.org> 4353M: Paul Mackerras <paulus@samba.org>
4325L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 4354L: linux-fbdev@vger.kernel.org
4326S: Maintained 4355S: Maintained
4327F: drivers/video/aty/aty128fb.c 4356F: drivers/video/aty/aty128fb.c
4328 4357
4329RALINK RT2X00 WIRELESS LAN DRIVER 4358RALINK RT2X00 WIRELESS LAN DRIVER
4330P: rt2x00 project 4359P: rt2x00 project
4360M: Ivo van Doorn <IvDoorn@gmail.com>
4361M: Gertjan van Wingerde <gwingerde@gmail.com>
4331L: linux-wireless@vger.kernel.org 4362L: linux-wireless@vger.kernel.org
4332L: users@rt2x00.serialmonkey.com (moderated for non-subscribers) 4363L: users@rt2x00.serialmonkey.com (moderated for non-subscribers)
4333W: http://rt2x00.serialmonkey.com/ 4364W: http://rt2x00.serialmonkey.com/
@@ -4415,7 +4446,7 @@ RFKILL
4415M: Johannes Berg <johannes@sipsolutions.net> 4446M: Johannes Berg <johannes@sipsolutions.net>
4416L: linux-wireless@vger.kernel.org 4447L: linux-wireless@vger.kernel.org
4417S: Maintained 4448S: Maintained
4418F Documentation/rfkill.txt 4449F: Documentation/rfkill.txt
4419F: net/rfkill/ 4450F: net/rfkill/
4420 4451
4421RISCOM8 DRIVER 4452RISCOM8 DRIVER
@@ -4459,7 +4490,7 @@ F: drivers/net/wireless/rtl818x/rtl8187*
4459 4490
4460S3 SAVAGE FRAMEBUFFER DRIVER 4491S3 SAVAGE FRAMEBUFFER DRIVER
4461M: Antonino Daplas <adaplas@gmail.com> 4492M: Antonino Daplas <adaplas@gmail.com>
4462L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 4493L: linux-fbdev@vger.kernel.org
4463S: Maintained 4494S: Maintained
4464F: drivers/video/savage/ 4495F: drivers/video/savage/
4465 4496
@@ -5622,7 +5653,7 @@ S: Maintained
5622 5653
5623UVESAFB DRIVER 5654UVESAFB DRIVER
5624M: Michal Januszewski <spock@gentoo.org> 5655M: Michal Januszewski <spock@gentoo.org>
5625L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 5656L: linux-fbdev@vger.kernel.org
5626W: http://dev.gentoo.org/~spock/projects/uvesafb/ 5657W: http://dev.gentoo.org/~spock/projects/uvesafb/
5627S: Maintained 5658S: Maintained
5628F: Documentation/fb/uvesafb.txt 5659F: Documentation/fb/uvesafb.txt
@@ -5655,7 +5686,7 @@ F: drivers/mmc/host/via-sdmmc.c
5655VIA UNICHROME(PRO)/CHROME9 FRAMEBUFFER DRIVER 5686VIA UNICHROME(PRO)/CHROME9 FRAMEBUFFER DRIVER
5656M: Joseph Chan <JosephChan@via.com.tw> 5687M: Joseph Chan <JosephChan@via.com.tw>
5657M: Scott Fang <ScottFang@viatech.com.cn> 5688M: Scott Fang <ScottFang@viatech.com.cn>
5658L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers) 5689L: linux-fbdev@vger.kernel.org
5659S: Maintained 5690S: Maintained
5660F: drivers/video/via/ 5691F: drivers/video/via/
5661 5692
diff --git a/Makefile b/Makefile
index 827836128158..33d4732a6c4a 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 32 3SUBLEVEL = 32
4EXTRAVERSION = -rc7 4EXTRAVERSION =
5NAME = Man-Eating Seals of Antiquity 5NAME = Man-Eating Seals of Antiquity
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -221,7 +221,7 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
221 221
222HOSTCC = gcc 222HOSTCC = gcc
223HOSTCXX = g++ 223HOSTCXX = g++
224HOSTCFLAGS = -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer 224HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
225HOSTCXXFLAGS = -O2 225HOSTCXXFLAGS = -O2
226 226
227# Decide whether to build built-in, modular, or both. 227# Decide whether to build built-in, modular, or both.
@@ -379,6 +379,7 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
379PHONY += scripts_basic 379PHONY += scripts_basic
380scripts_basic: 380scripts_basic:
381 $(Q)$(MAKE) $(build)=scripts/basic 381 $(Q)$(MAKE) $(build)=scripts/basic
382 $(Q)rm -f .tmp_quiet_recordmcount
382 383
383# To avoid any implicit rule to kick in, define an empty command. 384# To avoid any implicit rule to kick in, define an empty command.
384scripts/basic/%: scripts_basic ; 385scripts/basic/%: scripts_basic ;
diff --git a/arch/alpha/boot/tools/objstrip.c b/arch/alpha/boot/tools/objstrip.c
index 9d0727d18aee..367d53d031fc 100644
--- a/arch/alpha/boot/tools/objstrip.c
+++ b/arch/alpha/boot/tools/objstrip.c
@@ -35,7 +35,7 @@
35const char * prog_name; 35const char * prog_name;
36 36
37 37
38void 38static void
39usage (void) 39usage (void)
40{ 40{
41 fprintf(stderr, 41 fprintf(stderr,
diff --git a/arch/alpha/include/asm/fcntl.h b/arch/alpha/include/asm/fcntl.h
index 73126e4dd639..25da0017ec87 100644
--- a/arch/alpha/include/asm/fcntl.h
+++ b/arch/alpha/include/asm/fcntl.h
@@ -26,8 +26,6 @@
26#define F_GETOWN 6 /* for sockets. */ 26#define F_GETOWN 6 /* for sockets. */
27#define F_SETSIG 10 /* for sockets. */ 27#define F_SETSIG 10 /* for sockets. */
28#define F_GETSIG 11 /* for sockets. */ 28#define F_GETSIG 11 /* for sockets. */
29#define F_SETOWN_EX 15
30#define F_GETOWN_EX 16
31 29
32/* for posix fcntl() and lockf() */ 30/* for posix fcntl() and lockf() */
33#define F_RDLCK 1 31#define F_RDLCK 1
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index 815680b585ed..b3e888638bb7 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -61,21 +61,24 @@ register struct thread_info *__current_thread_info __asm__("$8");
61/* 61/*
62 * Thread information flags: 62 * Thread information flags:
63 * - these are process state flags and used from assembly 63 * - these are process state flags and used from assembly
64 * - pending work-to-be-done flags come first to fit in and immediate operand. 64 * - pending work-to-be-done flags come first and must be assigned to be
65 * within bits 0 to 7 to fit in and immediate operand.
66 * - ALPHA_UAC_SHIFT below must be kept consistent with the unaligned
67 * control flags.
65 * 68 *
66 * TIF_SYSCALL_TRACE is known to be 0 via blbs. 69 * TIF_SYSCALL_TRACE is known to be 0 via blbs.
67 */ 70 */
68#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ 71#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
69#define TIF_SIGPENDING 1 /* signal pending */ 72#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
70#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ 73#define TIF_SIGPENDING 2 /* signal pending */
71#define TIF_POLLING_NRFLAG 3 /* poll_idle is polling NEED_RESCHED */ 74#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
72#define TIF_DIE_IF_KERNEL 4 /* dik recursion lock */ 75#define TIF_POLLING_NRFLAG 8 /* poll_idle is polling NEED_RESCHED */
73#define TIF_UAC_NOPRINT 5 /* see sysinfo.h */ 76#define TIF_DIE_IF_KERNEL 9 /* dik recursion lock */
74#define TIF_UAC_NOFIX 6 77#define TIF_UAC_NOPRINT 10 /* see sysinfo.h */
75#define TIF_UAC_SIGBUS 7 78#define TIF_UAC_NOFIX 11
76#define TIF_MEMDIE 8 79#define TIF_UAC_SIGBUS 12
77#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ 80#define TIF_MEMDIE 13
78#define TIF_NOTIFY_RESUME 10 /* callback before returning to user */ 81#define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */
79#define TIF_FREEZE 16 /* is freezing for suspend */ 82#define TIF_FREEZE 16 /* is freezing for suspend */
80 83
81#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 84#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -94,7 +97,7 @@ register struct thread_info *__current_thread_info __asm__("$8");
94#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \ 97#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \
95 | _TIF_SYSCALL_TRACE) 98 | _TIF_SYSCALL_TRACE)
96 99
97#define ALPHA_UAC_SHIFT 6 100#define ALPHA_UAC_SHIFT 10
98#define ALPHA_UAC_MASK (1 << TIF_UAC_NOPRINT | 1 << TIF_UAC_NOFIX | \ 101#define ALPHA_UAC_MASK (1 << TIF_UAC_NOPRINT | 1 << TIF_UAC_NOFIX | \
99 1 << TIF_UAC_SIGBUS) 102 1 << TIF_UAC_SIGBUS)
100 103
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index 8e059e58b0ac..53dd2f1a53aa 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -1103,6 +1103,8 @@ marvel_agp_info(void)
1103 * Allocate the info structure. 1103 * Allocate the info structure.
1104 */ 1104 */
1105 agp = kmalloc(sizeof(*agp), GFP_KERNEL); 1105 agp = kmalloc(sizeof(*agp), GFP_KERNEL);
1106 if (!agp)
1107 return NULL;
1106 1108
1107 /* 1109 /*
1108 * Fill it in. 1110 * Fill it in.
diff --git a/arch/alpha/kernel/core_titan.c b/arch/alpha/kernel/core_titan.c
index 76686497b1e2..219bf271c0ba 100644
--- a/arch/alpha/kernel/core_titan.c
+++ b/arch/alpha/kernel/core_titan.c
@@ -757,6 +757,8 @@ titan_agp_info(void)
757 * Allocate the info structure. 757 * Allocate the info structure.
758 */ 758 */
759 agp = kmalloc(sizeof(*agp), GFP_KERNEL); 759 agp = kmalloc(sizeof(*agp), GFP_KERNEL);
760 if (!agp)
761 return NULL;
760 762
761 /* 763 /*
762 * Fill it in. 764 * Fill it in.
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index cc7834661427..c0de072b8305 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -92,7 +92,7 @@ show_interrupts(struct seq_file *p, void *v)
92 for_each_online_cpu(j) 92 for_each_online_cpu(j)
93 seq_printf(p, "%10u ", kstat_irqs_cpu(irq, j)); 93 seq_printf(p, "%10u ", kstat_irqs_cpu(irq, j));
94#endif 94#endif
95 seq_printf(p, " %14s", irq_desc[irq].chip->typename); 95 seq_printf(p, " %14s", irq_desc[irq].chip->name);
96 seq_printf(p, " %c%s", 96 seq_printf(p, " %c%s",
97 (action->flags & IRQF_DISABLED)?'+':' ', 97 (action->flags & IRQF_DISABLED)?'+':' ',
98 action->name); 98 action->name);
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index 38c805dfc544..cfde865b78e0 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -228,7 +228,7 @@ struct irqaction timer_irqaction = {
228}; 228};
229 229
230static struct irq_chip rtc_irq_type = { 230static struct irq_chip rtc_irq_type = {
231 .typename = "RTC", 231 .name = "RTC",
232 .startup = rtc_startup, 232 .startup = rtc_startup,
233 .shutdown = rtc_enable_disable, 233 .shutdown = rtc_enable_disable,
234 .enable = rtc_enable_disable, 234 .enable = rtc_enable_disable,
diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c
index 50bfec9b588f..83a9ac280890 100644
--- a/arch/alpha/kernel/irq_i8259.c
+++ b/arch/alpha/kernel/irq_i8259.c
@@ -84,7 +84,7 @@ i8259a_end_irq(unsigned int irq)
84} 84}
85 85
86struct irq_chip i8259a_irq_type = { 86struct irq_chip i8259a_irq_type = {
87 .typename = "XT-PIC", 87 .name = "XT-PIC",
88 .startup = i8259a_startup_irq, 88 .startup = i8259a_startup_irq,
89 .shutdown = i8259a_disable_irq, 89 .shutdown = i8259a_disable_irq,
90 .enable = i8259a_enable_irq, 90 .enable = i8259a_enable_irq,
diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c
index 69199a76ec4a..989ce46a0cf3 100644
--- a/arch/alpha/kernel/irq_pyxis.c
+++ b/arch/alpha/kernel/irq_pyxis.c
@@ -71,7 +71,7 @@ pyxis_mask_and_ack_irq(unsigned int irq)
71} 71}
72 72
73static struct irq_chip pyxis_irq_type = { 73static struct irq_chip pyxis_irq_type = {
74 .typename = "PYXIS", 74 .name = "PYXIS",
75 .startup = pyxis_startup_irq, 75 .startup = pyxis_startup_irq,
76 .shutdown = pyxis_disable_irq, 76 .shutdown = pyxis_disable_irq,
77 .enable = pyxis_enable_irq, 77 .enable = pyxis_enable_irq,
diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c
index 85229369a1f8..d63e93e1e8bf 100644
--- a/arch/alpha/kernel/irq_srm.c
+++ b/arch/alpha/kernel/irq_srm.c
@@ -49,7 +49,7 @@ srm_end_irq(unsigned int irq)
49 49
50/* Handle interrupts from the SRM, assuming no additional weirdness. */ 50/* Handle interrupts from the SRM, assuming no additional weirdness. */
51static struct irq_chip srm_irq_type = { 51static struct irq_chip srm_irq_type = {
52 .typename = "SRM", 52 .name = "SRM",
53 .startup = srm_startup_irq, 53 .startup = srm_startup_irq,
54 .shutdown = srm_disable_irq, 54 .shutdown = srm_disable_irq,
55 .enable = srm_enable_irq, 55 .enable = srm_enable_irq,
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c
index 382035ef7394..20a30b8b9655 100644
--- a/arch/alpha/kernel/sys_alcor.c
+++ b/arch/alpha/kernel/sys_alcor.c
@@ -90,7 +90,7 @@ alcor_end_irq(unsigned int irq)
90} 90}
91 91
92static struct irq_chip alcor_irq_type = { 92static struct irq_chip alcor_irq_type = {
93 .typename = "ALCOR", 93 .name = "ALCOR",
94 .startup = alcor_startup_irq, 94 .startup = alcor_startup_irq,
95 .shutdown = alcor_disable_irq, 95 .shutdown = alcor_disable_irq,
96 .enable = alcor_enable_irq, 96 .enable = alcor_enable_irq,
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c
index ed349436732b..affd0f3f25df 100644
--- a/arch/alpha/kernel/sys_cabriolet.c
+++ b/arch/alpha/kernel/sys_cabriolet.c
@@ -72,7 +72,7 @@ cabriolet_end_irq(unsigned int irq)
72} 72}
73 73
74static struct irq_chip cabriolet_irq_type = { 74static struct irq_chip cabriolet_irq_type = {
75 .typename = "CABRIOLET", 75 .name = "CABRIOLET",
76 .startup = cabriolet_startup_irq, 76 .startup = cabriolet_startup_irq,
77 .shutdown = cabriolet_disable_irq, 77 .shutdown = cabriolet_disable_irq,
78 .enable = cabriolet_enable_irq, 78 .enable = cabriolet_enable_irq,
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index 46e70ece5176..d64e1e497e76 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -199,7 +199,7 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
199} 199}
200 200
201static struct irq_chip dp264_irq_type = { 201static struct irq_chip dp264_irq_type = {
202 .typename = "DP264", 202 .name = "DP264",
203 .startup = dp264_startup_irq, 203 .startup = dp264_startup_irq,
204 .shutdown = dp264_disable_irq, 204 .shutdown = dp264_disable_irq,
205 .enable = dp264_enable_irq, 205 .enable = dp264_enable_irq,
@@ -210,7 +210,7 @@ static struct irq_chip dp264_irq_type = {
210}; 210};
211 211
212static struct irq_chip clipper_irq_type = { 212static struct irq_chip clipper_irq_type = {
213 .typename = "CLIPPER", 213 .name = "CLIPPER",
214 .startup = clipper_startup_irq, 214 .startup = clipper_startup_irq,
215 .shutdown = clipper_disable_irq, 215 .shutdown = clipper_disable_irq,
216 .enable = clipper_enable_irq, 216 .enable = clipper_enable_irq,
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c
index 660c23ef661f..df2090ce5e7f 100644
--- a/arch/alpha/kernel/sys_eb64p.c
+++ b/arch/alpha/kernel/sys_eb64p.c
@@ -70,7 +70,7 @@ eb64p_end_irq(unsigned int irq)
70} 70}
71 71
72static struct irq_chip eb64p_irq_type = { 72static struct irq_chip eb64p_irq_type = {
73 .typename = "EB64P", 73 .name = "EB64P",
74 .startup = eb64p_startup_irq, 74 .startup = eb64p_startup_irq,
75 .shutdown = eb64p_disable_irq, 75 .shutdown = eb64p_disable_irq,
76 .enable = eb64p_enable_irq, 76 .enable = eb64p_enable_irq,
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c
index b99ea488d844..3ca1dbcf4044 100644
--- a/arch/alpha/kernel/sys_eiger.c
+++ b/arch/alpha/kernel/sys_eiger.c
@@ -81,7 +81,7 @@ eiger_end_irq(unsigned int irq)
81} 81}
82 82
83static struct irq_chip eiger_irq_type = { 83static struct irq_chip eiger_irq_type = {
84 .typename = "EIGER", 84 .name = "EIGER",
85 .startup = eiger_startup_irq, 85 .startup = eiger_startup_irq,
86 .shutdown = eiger_disable_irq, 86 .shutdown = eiger_disable_irq,
87 .enable = eiger_enable_irq, 87 .enable = eiger_enable_irq,
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c
index ef0b83a070ac..7a7ae36fff91 100644
--- a/arch/alpha/kernel/sys_jensen.c
+++ b/arch/alpha/kernel/sys_jensen.c
@@ -119,7 +119,7 @@ jensen_local_end(unsigned int irq)
119} 119}
120 120
121static struct irq_chip jensen_local_irq_type = { 121static struct irq_chip jensen_local_irq_type = {
122 .typename = "LOCAL", 122 .name = "LOCAL",
123 .startup = jensen_local_startup, 123 .startup = jensen_local_startup,
124 .shutdown = jensen_local_shutdown, 124 .shutdown = jensen_local_shutdown,
125 .enable = jensen_local_enable, 125 .enable = jensen_local_enable,
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index bbfc4f20ca72..0bb3b5c4f693 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -170,7 +170,7 @@ marvel_irq_noop_return(unsigned int irq)
170} 170}
171 171
172static struct irq_chip marvel_legacy_irq_type = { 172static struct irq_chip marvel_legacy_irq_type = {
173 .typename = "LEGACY", 173 .name = "LEGACY",
174 .startup = marvel_irq_noop_return, 174 .startup = marvel_irq_noop_return,
175 .shutdown = marvel_irq_noop, 175 .shutdown = marvel_irq_noop,
176 .enable = marvel_irq_noop, 176 .enable = marvel_irq_noop,
@@ -180,7 +180,7 @@ static struct irq_chip marvel_legacy_irq_type = {
180}; 180};
181 181
182static struct irq_chip io7_lsi_irq_type = { 182static struct irq_chip io7_lsi_irq_type = {
183 .typename = "LSI", 183 .name = "LSI",
184 .startup = io7_startup_irq, 184 .startup = io7_startup_irq,
185 .shutdown = io7_disable_irq, 185 .shutdown = io7_disable_irq,
186 .enable = io7_enable_irq, 186 .enable = io7_enable_irq,
@@ -190,7 +190,7 @@ static struct irq_chip io7_lsi_irq_type = {
190}; 190};
191 191
192static struct irq_chip io7_msi_irq_type = { 192static struct irq_chip io7_msi_irq_type = {
193 .typename = "MSI", 193 .name = "MSI",
194 .startup = io7_startup_irq, 194 .startup = io7_startup_irq,
195 .shutdown = io7_disable_irq, 195 .shutdown = io7_disable_irq,
196 .enable = io7_enable_irq, 196 .enable = io7_enable_irq,
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c
index 4e366641a08e..ee8865169811 100644
--- a/arch/alpha/kernel/sys_mikasa.c
+++ b/arch/alpha/kernel/sys_mikasa.c
@@ -69,7 +69,7 @@ mikasa_end_irq(unsigned int irq)
69} 69}
70 70
71static struct irq_chip mikasa_irq_type = { 71static struct irq_chip mikasa_irq_type = {
72 .typename = "MIKASA", 72 .name = "MIKASA",
73 .startup = mikasa_startup_irq, 73 .startup = mikasa_startup_irq,
74 .shutdown = mikasa_disable_irq, 74 .shutdown = mikasa_disable_irq,
75 .enable = mikasa_enable_irq, 75 .enable = mikasa_enable_irq,
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c
index 35753a173bac..86503fe73a88 100644
--- a/arch/alpha/kernel/sys_noritake.c
+++ b/arch/alpha/kernel/sys_noritake.c
@@ -74,7 +74,7 @@ noritake_end_irq(unsigned int irq)
74} 74}
75 75
76static struct irq_chip noritake_irq_type = { 76static struct irq_chip noritake_irq_type = {
77 .typename = "NORITAKE", 77 .name = "NORITAKE",
78 .startup = noritake_startup_irq, 78 .startup = noritake_startup_irq,
79 .shutdown = noritake_disable_irq, 79 .shutdown = noritake_disable_irq,
80 .enable = noritake_enable_irq, 80 .enable = noritake_enable_irq,
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c
index f3aec7e085c8..26c322bf89ee 100644
--- a/arch/alpha/kernel/sys_rawhide.c
+++ b/arch/alpha/kernel/sys_rawhide.c
@@ -136,7 +136,7 @@ rawhide_end_irq(unsigned int irq)
136} 136}
137 137
138static struct irq_chip rawhide_irq_type = { 138static struct irq_chip rawhide_irq_type = {
139 .typename = "RAWHIDE", 139 .name = "RAWHIDE",
140 .startup = rawhide_startup_irq, 140 .startup = rawhide_startup_irq,
141 .shutdown = rawhide_disable_irq, 141 .shutdown = rawhide_disable_irq,
142 .enable = rawhide_enable_irq, 142 .enable = rawhide_enable_irq,
diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c
index d9f9cfeb9931..8de1046fe91e 100644
--- a/arch/alpha/kernel/sys_ruffian.c
+++ b/arch/alpha/kernel/sys_ruffian.c
@@ -66,7 +66,7 @@ ruffian_init_irq(void)
66 common_init_isa_dma(); 66 common_init_isa_dma();
67} 67}
68 68
69#define RUFFIAN_LATCH ((PIT_TICK_RATE + HZ / 2) / HZ) 69#define RUFFIAN_LATCH DIV_ROUND_CLOSEST(PIT_TICK_RATE, HZ)
70 70
71static void __init 71static void __init
72ruffian_init_rtc(void) 72ruffian_init_rtc(void)
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c
index fc9246373452..be161129eab9 100644
--- a/arch/alpha/kernel/sys_rx164.c
+++ b/arch/alpha/kernel/sys_rx164.c
@@ -73,7 +73,7 @@ rx164_end_irq(unsigned int irq)
73} 73}
74 74
75static struct irq_chip rx164_irq_type = { 75static struct irq_chip rx164_irq_type = {
76 .typename = "RX164", 76 .name = "RX164",
77 .startup = rx164_startup_irq, 77 .startup = rx164_startup_irq,
78 .shutdown = rx164_disable_irq, 78 .shutdown = rx164_disable_irq,
79 .enable = rx164_enable_irq, 79 .enable = rx164_enable_irq,
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
index 426eb6906d01..b2abe27a23cf 100644
--- a/arch/alpha/kernel/sys_sable.c
+++ b/arch/alpha/kernel/sys_sable.c
@@ -502,7 +502,7 @@ sable_lynx_mask_and_ack_irq(unsigned int irq)
502} 502}
503 503
504static struct irq_chip sable_lynx_irq_type = { 504static struct irq_chip sable_lynx_irq_type = {
505 .typename = "SABLE/LYNX", 505 .name = "SABLE/LYNX",
506 .startup = sable_lynx_startup_irq, 506 .startup = sable_lynx_startup_irq,
507 .shutdown = sable_lynx_disable_irq, 507 .shutdown = sable_lynx_disable_irq,
508 .enable = sable_lynx_enable_irq, 508 .enable = sable_lynx_enable_irq,
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c
index 830318c21661..230464885b5c 100644
--- a/arch/alpha/kernel/sys_takara.c
+++ b/arch/alpha/kernel/sys_takara.c
@@ -75,7 +75,7 @@ takara_end_irq(unsigned int irq)
75} 75}
76 76
77static struct irq_chip takara_irq_type = { 77static struct irq_chip takara_irq_type = {
78 .typename = "TAKARA", 78 .name = "TAKARA",
79 .startup = takara_startup_irq, 79 .startup = takara_startup_irq,
80 .shutdown = takara_disable_irq, 80 .shutdown = takara_disable_irq,
81 .enable = takara_enable_irq, 81 .enable = takara_enable_irq,
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index 88978fc60f83..288053342c83 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -195,7 +195,7 @@ init_titan_irqs(struct irq_chip * ops, int imin, int imax)
195} 195}
196 196
197static struct irq_chip titan_irq_type = { 197static struct irq_chip titan_irq_type = {
198 .typename = "TITAN", 198 .name = "TITAN",
199 .startup = titan_startup_irq, 199 .startup = titan_startup_irq,
200 .shutdown = titan_disable_irq, 200 .shutdown = titan_disable_irq,
201 .enable = titan_enable_irq, 201 .enable = titan_enable_irq,
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index e91b4c3838a8..62fd972e18ef 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -158,7 +158,7 @@ wildfire_end_irq(unsigned int irq)
158} 158}
159 159
160static struct irq_chip wildfire_irq_type = { 160static struct irq_chip wildfire_irq_type = {
161 .typename = "WILDFIRE", 161 .name = "WILDFIRE",
162 .startup = wildfire_startup_irq, 162 .startup = wildfire_startup_irq,
163 .shutdown = wildfire_disable_irq, 163 .shutdown = wildfire_disable_irq,
164 .enable = wildfire_enable_irq, 164 .enable = wildfire_enable_irq,
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
index d16ec97ec9a9..c019949a5189 100644
--- a/arch/arm/include/asm/kmap_types.h
+++ b/arch/arm/include/asm/kmap_types.h
@@ -22,4 +22,10 @@ enum km_type {
22 KM_TYPE_NR 22 KM_TYPE_NR
23}; 23};
24 24
25#ifdef CONFIG_DEBUG_HIGHMEM
26#define KM_NMI (-1)
27#define KM_NMI_PTE (-1)
28#define KM_IRQ_PTE (-1)
29#endif
30
25#endif 31#endif
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 2a573d4fea24..e7714f367eb8 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -662,8 +662,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
662 regs->ARM_sp -= 4; 662 regs->ARM_sp -= 4;
663 usp = (u32 __user *)regs->ARM_sp; 663 usp = (u32 __user *)regs->ARM_sp;
664 664
665 put_user(regs->ARM_pc, usp); 665 if (put_user(regs->ARM_pc, usp) == 0) {
666 regs->ARM_pc = KERN_RESTART_CODE; 666 regs->ARM_pc = KERN_RESTART_CODE;
667 } else {
668 regs->ARM_sp += 4;
669 force_sigsegv(0, current);
670 }
667#endif 671#endif
668 } 672 }
669 } 673 }
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index e35d54d43e70..2fd88437348b 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -289,13 +289,6 @@ config MACH_NEOCORE926
289 help 289 help
290 Select this if you are using the Adeneo Neocore 926 board. 290 Select this if you are using the Adeneo Neocore 926 board.
291 291
292config MACH_AT91SAM9G20EK_2MMC
293 bool "Atmel AT91SAM9G20-EK Evaluation Kit modified for 2 MMC Slots"
294 depends on ARCH_AT91SAM9G20
295 help
296 Select this if you are using an Atmel AT91SAM9G20-EK Evaluation Kit
297 Rev A or B modified for 2 MMC Slots.
298
299endif 292endif
300 293
301# ---------------------------------------------------------- 294# ----------------------------------------------------------
@@ -322,7 +315,16 @@ config MACH_AT91SAM9G20EK
322 bool "Atmel AT91SAM9G20-EK Evaluation Kit" 315 bool "Atmel AT91SAM9G20-EK Evaluation Kit"
323 depends on ARCH_AT91SAM9G20 316 depends on ARCH_AT91SAM9G20
324 help 317 help
325 Select this if you are using Atmel's AT91SAM9G20-EK Evaluation Kit. 318 Select this if you are using Atmel's AT91SAM9G20-EK Evaluation Kit
319 that embeds only one SD/MMC slot.
320
321config MACH_AT91SAM9G20EK_2MMC
322 bool "Atmel AT91SAM9G20-EK Evaluation Kit with 2 SD/MMC Slots"
323 depends on ARCH_AT91SAM9G20
324 help
325 Select this if you are using an Atmel AT91SAM9G20-EK Evaluation Kit
326 with 2 SD/MMC Slots. This is the case for AT91SAM9G20-EK rev. C and
327 onwards.
326 328
327config MACH_CPU9G20 329config MACH_CPU9G20
328 bool "Eukrea CPU9G20 board" 330 bool "Eukrea CPU9G20 board"
@@ -392,7 +394,7 @@ config MTD_AT91_DATAFLASH_CARD
392 394
393config MTD_NAND_ATMEL_BUSWIDTH_16 395config MTD_NAND_ATMEL_BUSWIDTH_16
394 bool "Enable 16-bit data bus interface to NAND flash" 396 bool "Enable 16-bit data bus interface to NAND flash"
395 depends on (MACH_AT91SAM9260EK || MACH_AT91SAM9261EK || MACH_AT91SAM9G10EK || MACH_AT91SAM9263EK || MACH_AT91SAM9G20EK || MACH_AT91SAM9G45EKES || MACH_AT91CAP9ADK) 397 depends on (MACH_AT91SAM9260EK || MACH_AT91SAM9261EK || MACH_AT91SAM9G10EK || MACH_AT91SAM9263EK || MACH_AT91SAM9G20EK || MACH_AT91SAM9G20EK_2MMC || MACH_AT91SAM9G45EKES || MACH_AT91CAP9ADK)
396 help 398 help
397 On AT91SAM926x boards both types of NAND flash can be present 399 On AT91SAM926x boards both types of NAND flash can be present
398 (8 and 16 bit data bus width). 400 (8 and 16 bit data bus width).
diff --git a/arch/arm/mach-at91/board-sam9g20ek-2slot-mmc.c b/arch/arm/mach-at91/board-sam9g20ek-2slot-mmc.c
index a28e53faf71d..a4102d72cc9b 100644
--- a/arch/arm/mach-at91/board-sam9g20ek-2slot-mmc.c
+++ b/arch/arm/mach-at91/board-sam9g20ek-2slot-mmc.c
@@ -90,7 +90,7 @@ static struct at91_udc_data __initdata ek_udc_data = {
90 * SPI devices. 90 * SPI devices.
91 */ 91 */
92static struct spi_board_info ek_spi_devices[] = { 92static struct spi_board_info ek_spi_devices[] = {
93#if !defined(CONFIG_MMC_ATMELMCI) 93#if !(defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_AT91))
94 { /* DataFlash chip */ 94 { /* DataFlash chip */
95 .modalias = "mtd_dataflash", 95 .modalias = "mtd_dataflash",
96 .chip_select = 1, 96 .chip_select = 1,
@@ -113,7 +113,7 @@ static struct spi_board_info ek_spi_devices[] = {
113 * MACB Ethernet device 113 * MACB Ethernet device
114 */ 114 */
115static struct at91_eth_data __initdata ek_macb_data = { 115static struct at91_eth_data __initdata ek_macb_data = {
116 .phy_irq_pin = AT91_PIN_PC12, 116 .phy_irq_pin = AT91_PIN_PB0,
117 .is_rmii = 1, 117 .is_rmii = 1,
118}; 118};
119 119
@@ -194,24 +194,27 @@ static void __init ek_add_device_nand(void)
194 194
195/* 195/*
196 * MCI (SD/MMC) 196 * MCI (SD/MMC)
197 * det_pin and wp_pin are not connected 197 * wp_pin is not connected
198 */ 198 */
199#if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE) 199#if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE)
200static struct mci_platform_data __initdata ek_mmc_data = { 200static struct mci_platform_data __initdata ek_mmc_data = {
201 .slot[0] = { 201 .slot[0] = {
202 .bus_width = 4, 202 .bus_width = 4,
203 .detect_pin = -ENODEV, 203 .detect_pin = AT91_PIN_PC2,
204 .wp_pin = -ENODEV, 204 .wp_pin = -ENODEV,
205 }, 205 },
206 .slot[1] = { 206 .slot[1] = {
207 .bus_width = 4, 207 .bus_width = 4,
208 .detect_pin = -ENODEV, 208 .detect_pin = AT91_PIN_PC9,
209 .wp_pin = -ENODEV, 209 .wp_pin = -ENODEV,
210 }, 210 },
211 211
212}; 212};
213#else 213#else
214static struct amci_platform_data __initdata ek_mmc_data = { 214static struct at91_mmc_data __initdata ek_mmc_data = {
215 .slot_b = 1, /* Only one slot so use slot B */
216 .wire4 = 1,
217 .det_pin = AT91_PIN_PC9,
215}; 218};
216#endif 219#endif
217 220
@@ -221,13 +224,13 @@ static struct amci_platform_data __initdata ek_mmc_data = {
221static struct gpio_led ek_leds[] = { 224static struct gpio_led ek_leds[] = {
222 { /* "bottom" led, green, userled1 to be defined */ 225 { /* "bottom" led, green, userled1 to be defined */
223 .name = "ds5", 226 .name = "ds5",
224 .gpio = AT91_PIN_PB12, 227 .gpio = AT91_PIN_PB8,
225 .active_low = 1, 228 .active_low = 1,
226 .default_trigger = "none", 229 .default_trigger = "none",
227 }, 230 },
228 { /* "power" led, yellow */ 231 { /* "power" led, yellow */
229 .name = "ds1", 232 .name = "ds1",
230 .gpio = AT91_PIN_PB13, 233 .gpio = AT91_PIN_PB9,
231 .default_trigger = "heartbeat", 234 .default_trigger = "heartbeat",
232 } 235 }
233}; 236};
@@ -254,7 +257,11 @@ static void __init ek_board_init(void)
254 /* Ethernet */ 257 /* Ethernet */
255 at91_add_device_eth(&ek_macb_data); 258 at91_add_device_eth(&ek_macb_data);
256 /* MMC */ 259 /* MMC */
260#if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE)
257 at91_add_device_mci(0, &ek_mmc_data); 261 at91_add_device_mci(0, &ek_mmc_data);
262#else
263 at91_add_device_mmc(0, &ek_mmc_data);
264#endif
258 /* I2C */ 265 /* I2C */
259 at91_add_device_i2c(ek_i2c_devices, ARRAY_SIZE(ek_i2c_devices)); 266 at91_add_device_i2c(ek_i2c_devices, ARRAY_SIZE(ek_i2c_devices));
260 /* LEDs */ 267 /* LEDs */
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 7177c4aa6342..242dd0775343 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -915,6 +915,14 @@ void __init kirkwood_init(void)
915 kirkwood_uart0_data[0].uartclk = kirkwood_tclk; 915 kirkwood_uart0_data[0].uartclk = kirkwood_tclk;
916 kirkwood_uart1_data[0].uartclk = kirkwood_tclk; 916 kirkwood_uart1_data[0].uartclk = kirkwood_tclk;
917 917
918 /*
919 * Disable propagation of mbus errors to the CPU local bus,
920 * as this causes mbus errors (which can occur for example
921 * for PCI aborts) to throw CPU aborts, which we're not set
922 * up to deal with.
923 */
924 writel(readl(CPU_CONFIG) & ~CPU_CONFIG_ERROR_PROP, CPU_CONFIG);
925
918 kirkwood_setup_cpu_mbus(); 926 kirkwood_setup_cpu_mbus();
919 927
920#ifdef CONFIG_CACHE_FEROCEON_L2 928#ifdef CONFIG_CACHE_FEROCEON_L2
diff --git a/arch/arm/mach-kirkwood/include/mach/bridge-regs.h b/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
index 9e80d9232c83..418f5017c50e 100644
--- a/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
+++ b/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
@@ -13,6 +13,9 @@
13 13
14#include <mach/kirkwood.h> 14#include <mach/kirkwood.h>
15 15
16#define CPU_CONFIG (BRIDGE_VIRT_BASE | 0x0100)
17#define CPU_CONFIG_ERROR_PROP 0x00000004
18
16#define CPU_CONTROL (BRIDGE_VIRT_BASE | 0x0104) 19#define CPU_CONTROL (BRIDGE_VIRT_BASE | 0x0104)
17#define CPU_RESET 0x00000002 20#define CPU_RESET 0x00000002
18 21
diff --git a/arch/arm/mach-mmp/include/mach/mfp-pxa910.h b/arch/arm/mach-mmp/include/mach/mfp-pxa910.h
index bf1189ff9a34..7e8a80f25ddc 100644
--- a/arch/arm/mach-mmp/include/mach/mfp-pxa910.h
+++ b/arch/arm/mach-mmp/include/mach/mfp-pxa910.h
@@ -160,7 +160,7 @@
160#define MMC1_WP_MMC1_WP MFP_CFG_DRV(MMC1_WP, AF0, MEDIUM) 160#define MMC1_WP_MMC1_WP MFP_CFG_DRV(MMC1_WP, AF0, MEDIUM)
161 161
162/* PWM */ 162/* PWM */
163#define GPIO27 PWM3 AF2 MFP_CFG(GPIO27, AF2) 163#define GPIO27_PWM3_AF2 MFP_CFG(GPIO27, AF2)
164#define GPIO51_PWM2_OUT MFP_CFG(GPIO51, AF2) 164#define GPIO51_PWM2_OUT MFP_CFG(GPIO51, AF2)
165#define GPIO117_PWM1_OUT MFP_CFG(GPIO117, AF2) 165#define GPIO117_PWM1_OUT MFP_CFG(GPIO117, AF2)
166#define GPIO118_PWM2_OUT MFP_CFG(GPIO118, AF2) 166#define GPIO118_PWM2_OUT MFP_CFG(GPIO118, AF2)
diff --git a/arch/arm/mach-omap2/board-zoom2.c b/arch/arm/mach-omap2/board-zoom2.c
index ea00486a5e53..51e0b3ba5f3a 100644
--- a/arch/arm/mach-omap2/board-zoom2.c
+++ b/arch/arm/mach-omap2/board-zoom2.c
@@ -30,57 +30,56 @@
30/* Zoom2 has Qwerty keyboard*/ 30/* Zoom2 has Qwerty keyboard*/
31static int board_keymap[] = { 31static int board_keymap[] = {
32 KEY(0, 0, KEY_E), 32 KEY(0, 0, KEY_E),
33 KEY(1, 0, KEY_R), 33 KEY(0, 1, KEY_R),
34 KEY(2, 0, KEY_T), 34 KEY(0, 2, KEY_T),
35 KEY(3, 0, KEY_HOME), 35 KEY(0, 3, KEY_HOME),
36 KEY(6, 0, KEY_I), 36 KEY(0, 6, KEY_I),
37 KEY(7, 0, KEY_LEFTSHIFT), 37 KEY(0, 7, KEY_LEFTSHIFT),
38 KEY(0, 1, KEY_D), 38 KEY(1, 0, KEY_D),
39 KEY(1, 1, KEY_F), 39 KEY(1, 1, KEY_F),
40 KEY(2, 1, KEY_G), 40 KEY(1, 2, KEY_G),
41 KEY(3, 1, KEY_SEND), 41 KEY(1, 3, KEY_SEND),
42 KEY(6, 1, KEY_K), 42 KEY(1, 6, KEY_K),
43 KEY(7, 1, KEY_ENTER), 43 KEY(1, 7, KEY_ENTER),
44 KEY(0, 2, KEY_X), 44 KEY(2, 0, KEY_X),
45 KEY(1, 2, KEY_C), 45 KEY(2, 1, KEY_C),
46 KEY(2, 2, KEY_V), 46 KEY(2, 2, KEY_V),
47 KEY(3, 2, KEY_END), 47 KEY(2, 3, KEY_END),
48 KEY(6, 2, KEY_DOT), 48 KEY(2, 6, KEY_DOT),
49 KEY(7, 2, KEY_CAPSLOCK), 49 KEY(2, 7, KEY_CAPSLOCK),
50 KEY(0, 3, KEY_Z), 50 KEY(3, 0, KEY_Z),
51 KEY(1, 3, KEY_KPPLUS), 51 KEY(3, 1, KEY_KPPLUS),
52 KEY(2, 3, KEY_B), 52 KEY(3, 2, KEY_B),
53 KEY(3, 3, KEY_F1), 53 KEY(3, 3, KEY_F1),
54 KEY(6, 3, KEY_O), 54 KEY(3, 6, KEY_O),
55 KEY(7, 3, KEY_SPACE), 55 KEY(3, 7, KEY_SPACE),
56 KEY(0, 4, KEY_W), 56 KEY(4, 0, KEY_W),
57 KEY(1, 4, KEY_Y), 57 KEY(4, 1, KEY_Y),
58 KEY(2, 4, KEY_U), 58 KEY(4, 2, KEY_U),
59 KEY(3, 4, KEY_F2), 59 KEY(4, 3, KEY_F2),
60 KEY(4, 4, KEY_VOLUMEUP), 60 KEY(4, 4, KEY_VOLUMEUP),
61 KEY(6, 4, KEY_L), 61 KEY(4, 6, KEY_L),
62 KEY(7, 4, KEY_LEFT), 62 KEY(4, 7, KEY_LEFT),
63 KEY(0, 5, KEY_S), 63 KEY(5, 0, KEY_S),
64 KEY(1, 5, KEY_H), 64 KEY(5, 1, KEY_H),
65 KEY(2, 5, KEY_J), 65 KEY(5, 2, KEY_J),
66 KEY(3, 5, KEY_F3), 66 KEY(5, 3, KEY_F3),
67 KEY(5, 5, KEY_VOLUMEDOWN), 67 KEY(5, 5, KEY_VOLUMEDOWN),
68 KEY(6, 5, KEY_M), 68 KEY(5, 6, KEY_M),
69 KEY(4, 5, KEY_ENTER), 69 KEY(5, 7, KEY_ENTER),
70 KEY(7, 5, KEY_RIGHT), 70 KEY(6, 0, KEY_Q),
71 KEY(0, 6, KEY_Q), 71 KEY(6, 1, KEY_A),
72 KEY(1, 6, KEY_A), 72 KEY(6, 2, KEY_N),
73 KEY(2, 6, KEY_N), 73 KEY(6, 3, KEY_BACKSPACE),
74 KEY(3, 6, KEY_BACKSPACE),
75 KEY(6, 6, KEY_P), 74 KEY(6, 6, KEY_P),
76 KEY(7, 6, KEY_UP),
77 KEY(6, 7, KEY_SELECT), 75 KEY(6, 7, KEY_SELECT),
78 KEY(7, 7, KEY_DOWN), 76 KEY(7, 0, KEY_PROG1), /*MACRO 1 <User defined> */
79 KEY(0, 7, KEY_PROG1), /*MACRO 1 <User defined> */ 77 KEY(7, 1, KEY_PROG2), /*MACRO 2 <User defined> */
80 KEY(1, 7, KEY_PROG2), /*MACRO 2 <User defined> */ 78 KEY(7, 2, KEY_PROG3), /*MACRO 3 <User defined> */
81 KEY(2, 7, KEY_PROG3), /*MACRO 3 <User defined> */ 79 KEY(7, 3, KEY_PROG4), /*MACRO 4 <User defined> */
82 KEY(3, 7, KEY_PROG4), /*MACRO 4 <User defined> */ 80 KEY(7, 5, KEY_RIGHT),
83 0 81 KEY(7, 6, KEY_UP),
82 KEY(7, 7, KEY_DOWN)
84}; 83};
85 84
86static struct matrix_keymap_data board_map_data = { 85static struct matrix_keymap_data board_map_data = {
diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c
index 489556eecbd1..7c5c00df3c70 100644
--- a/arch/arm/mach-omap2/clock34xx.c
+++ b/arch/arm/mach-omap2/clock34xx.c
@@ -473,7 +473,7 @@ static u16 _omap3_dpll_compute_freqsel(struct clk *clk, u8 n)
473 unsigned long fint; 473 unsigned long fint;
474 u16 f = 0; 474 u16 f = 0;
475 475
476 fint = clk->dpll_data->clk_ref->rate / (n + 1); 476 fint = clk->dpll_data->clk_ref->rate / n;
477 477
478 pr_debug("clock: fint is %lu\n", fint); 478 pr_debug("clock: fint is %lu\n", fint);
479 479
diff --git a/arch/arm/mach-omap2/clock34xx.h b/arch/arm/mach-omap2/clock34xx.h
index c8119781e00a..9565c05bebd2 100644
--- a/arch/arm/mach-omap2/clock34xx.h
+++ b/arch/arm/mach-omap2/clock34xx.h
@@ -489,9 +489,9 @@ static struct clk core_ck = {
489static struct clk dpll3_m2x2_ck = { 489static struct clk dpll3_m2x2_ck = {
490 .name = "dpll3_m2x2_ck", 490 .name = "dpll3_m2x2_ck",
491 .ops = &clkops_null, 491 .ops = &clkops_null,
492 .parent = &dpll3_x2_ck, 492 .parent = &dpll3_m2_ck,
493 .clkdm_name = "dpll3_clkdm", 493 .clkdm_name = "dpll3_clkdm",
494 .recalc = &followparent_recalc, 494 .recalc = &omap3_clkoutx2_recalc,
495}; 495};
496 496
497/* The PWRDN bit is apparently only available on 3430ES2 and above */ 497/* The PWRDN bit is apparently only available on 3430ES2 and above */
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index f8657568b1ba..f3c992e29651 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -378,7 +378,7 @@ EXPORT_SYMBOL(gpmc_cs_request);
378void gpmc_cs_free(int cs) 378void gpmc_cs_free(int cs)
379{ 379{
380 spin_lock(&gpmc_mem_lock); 380 spin_lock(&gpmc_mem_lock);
381 if (cs >= GPMC_CS_NUM || !gpmc_cs_reserved(cs)) { 381 if (cs >= GPMC_CS_NUM || cs < 0 || !gpmc_cs_reserved(cs)) {
382 printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs); 382 printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs);
383 BUG(); 383 BUG();
384 spin_unlock(&gpmc_mem_lock); 384 spin_unlock(&gpmc_mem_lock);
diff --git a/arch/arm/mach-pxa/colibri-pxa320.c b/arch/arm/mach-pxa/colibri-pxa320.c
index 494572825c7d..ec0e14b96682 100644
--- a/arch/arm/mach-pxa/colibri-pxa320.c
+++ b/arch/arm/mach-pxa/colibri-pxa320.c
@@ -27,6 +27,7 @@
27#include <mach/colibri.h> 27#include <mach/colibri.h>
28#include <mach/pxafb.h> 28#include <mach/pxafb.h>
29#include <mach/ohci.h> 29#include <mach/ohci.h>
30#include <mach/audio.h>
30 31
31#include "generic.h" 32#include "generic.h"
32#include "devices.h" 33#include "devices.h"
@@ -145,7 +146,8 @@ static void __init colibri_pxa320_init_lcd(void)
145static inline void colibri_pxa320_init_lcd(void) {} 146static inline void colibri_pxa320_init_lcd(void) {}
146#endif 147#endif
147 148
148#if defined(SND_AC97_CODEC) || defined(SND_AC97_CODEC_MODULE) 149#if defined(CONFIG_SND_AC97_CODEC) || \
150 defined(CONFIG_SND_AC97_CODEC_MODULE)
149static mfp_cfg_t colibri_pxa320_ac97_pin_config[] __initdata = { 151static mfp_cfg_t colibri_pxa320_ac97_pin_config[] __initdata = {
150 GPIO34_AC97_SYSCLK, 152 GPIO34_AC97_SYSCLK,
151 GPIO35_AC97_SDATA_IN_0, 153 GPIO35_AC97_SDATA_IN_0,
diff --git a/arch/arm/mach-pxa/cpufreq-pxa2xx.c b/arch/arm/mach-pxa/cpufreq-pxa2xx.c
index 983cc8c20081..9e4d9816726a 100644
--- a/arch/arm/mach-pxa/cpufreq-pxa2xx.c
+++ b/arch/arm/mach-pxa/cpufreq-pxa2xx.c
@@ -447,6 +447,7 @@ static __init int pxa_cpufreq_init(struct cpufreq_policy *policy)
447 pxa27x_freq_table[i].frequency = freq; 447 pxa27x_freq_table[i].frequency = freq;
448 pxa27x_freq_table[i].index = i; 448 pxa27x_freq_table[i].index = i;
449 } 449 }
450 pxa27x_freq_table[i].index = i;
450 pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END; 451 pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END;
451 452
452 /* 453 /*
diff --git a/arch/arm/mach-pxa/cpufreq-pxa3xx.c b/arch/arm/mach-pxa/cpufreq-pxa3xx.c
index 67f34a8d8e60..149cdd9aee4d 100644
--- a/arch/arm/mach-pxa/cpufreq-pxa3xx.c
+++ b/arch/arm/mach-pxa/cpufreq-pxa3xx.c
@@ -102,7 +102,7 @@ static int setup_freqs_table(struct cpufreq_policy *policy,
102 table[i].index = i; 102 table[i].index = i;
103 table[i].frequency = freqs[i].cpufreq_mhz * 1000; 103 table[i].frequency = freqs[i].cpufreq_mhz * 1000;
104 } 104 }
105 table[num].frequency = i; 105 table[num].index = i;
106 table[num].frequency = CPUFREQ_TABLE_END; 106 table[num].frequency = CPUFREQ_TABLE_END;
107 107
108 pxa3xx_freqs = freqs; 108 pxa3xx_freqs = freqs;
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index abff9e132749..83bd3c6e3884 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -604,7 +604,7 @@ static struct platform_device gpio_vbus = {
604static const struct ads7846_platform_data tsc2046_info = { 604static const struct ads7846_platform_data tsc2046_info = {
605 .model = 7846, 605 .model = 7846,
606 .vref_delay_usecs = 100, 606 .vref_delay_usecs = 100,
607 .pressure_max = 512, 607 .pressure_max = 1024,
608 .debounce_max = 10, 608 .debounce_max = 10,
609 .debounce_tol = 3, 609 .debounce_tol = 3,
610 .debounce_rep = 1, 610 .debounce_rep = 1,
diff --git a/arch/arm/mach-pxa/include/mach/entry-macro.S b/arch/arm/mach-pxa/include/mach/entry-macro.S
index 241880608ac6..a73bc86a3c26 100644
--- a/arch/arm/mach-pxa/include/mach/entry-macro.S
+++ b/arch/arm/mach-pxa/include/mach/entry-macro.S
@@ -46,5 +46,6 @@
46 beq 1001f 46 beq 1001f
47 bic \irqstat, \irqstat, #0x80000000 47 bic \irqstat, \irqstat, #0x80000000
48 mov \irqnr, \irqstat, lsr #16 48 mov \irqnr, \irqstat, lsr #16
49 add \irqnr, \irqnr, #(PXA_IRQ(0))
491001: 501001:
50 .endm 51 .endm
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 3da45d051743..d98023f55503 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -802,10 +802,12 @@ static void __init spitz_init(void)
802{ 802{
803 spitz_ficp_platform_data.gpio_pwdown = SPITZ_GPIO_IR_ON; 803 spitz_ficp_platform_data.gpio_pwdown = SPITZ_GPIO_IR_ON;
804 804
805#ifdef CONFIG_MACH_BORZOI
805 if (machine_is_borzoi()) { 806 if (machine_is_borzoi()) {
806 sharpsl_nand_platform_data.badblock_pattern = &sharpsl_akita_bbt; 807 sharpsl_nand_platform_data.badblock_pattern = &sharpsl_akita_bbt;
807 sharpsl_nand_platform_data.ecc_layout = &akita_oobinfo; 808 sharpsl_nand_platform_data.ecc_layout = &akita_oobinfo;
808 } 809 }
810#endif
809 811
810 platform_scoop_config = &spitz_pcmcia_config; 812 platform_scoop_config = &spitz_pcmcia_config;
811 813
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c
index be60d6deee8b..653e25be3dd8 100644
--- a/arch/arm/mach-u300/core.c
+++ b/arch/arm/mach-u300/core.c
@@ -408,7 +408,7 @@ static struct platform_device keypad_device = {
408}; 408};
409 409
410static struct platform_device rtc_device = { 410static struct platform_device rtc_device = {
411 .name = "rtc0", 411 .name = "rtc-coh901331",
412 .id = -1, 412 .id = -1,
413 .num_resources = ARRAY_SIZE(rtc_resources), 413 .num_resources = ARRAY_SIZE(rtc_resources),
414 .resource = rtc_resources, 414 .resource = rtc_resources,
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 71ebd7fcfea1..7c345b757df1 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -373,7 +373,7 @@ static inline int gpio_valid(int gpio)
373 373
374static int check_gpio(int gpio) 374static int check_gpio(int gpio)
375{ 375{
376 if (unlikely(gpio_valid(gpio)) < 0) { 376 if (unlikely(gpio_valid(gpio) < 0)) {
377 printk(KERN_ERR "omap-gpio: invalid GPIO %d\n", gpio); 377 printk(KERN_ERR "omap-gpio: invalid GPIO %d\n", gpio);
378 dump_stack(); 378 dump_stack();
379 return -1; 379 return -1;
diff --git a/arch/arm/plat-pxa/include/plat/mfp.h b/arch/arm/plat-pxa/include/plat/mfp.h
index 22086e696e8e..857a6839071c 100644
--- a/arch/arm/plat-pxa/include/plat/mfp.h
+++ b/arch/arm/plat-pxa/include/plat/mfp.h
@@ -16,7 +16,7 @@
16#ifndef __ASM_PLAT_MFP_H 16#ifndef __ASM_PLAT_MFP_H
17#define __ASM_PLAT_MFP_H 17#define __ASM_PLAT_MFP_H
18 18
19#define mfp_to_gpio(m) ((m) % 128) 19#define mfp_to_gpio(m) ((m) % 256)
20 20
21/* list of all the configurable MFP pins */ 21/* list of all the configurable MFP pins */
22enum { 22enum {
diff --git a/arch/arm/plat-pxa/mfp.c b/arch/arm/plat-pxa/mfp.c
index 9405d0379c85..be58f9fe65b0 100644
--- a/arch/arm/plat-pxa/mfp.c
+++ b/arch/arm/plat-pxa/mfp.c
@@ -207,7 +207,7 @@ unsigned long mfp_read(int mfp)
207{ 207{
208 unsigned long val, flags; 208 unsigned long val, flags;
209 209
210 BUG_ON(mfp >= MFP_PIN_MAX); 210 BUG_ON(mfp < 0 || mfp >= MFP_PIN_MAX);
211 211
212 spin_lock_irqsave(&mfp_spin_lock, flags); 212 spin_lock_irqsave(&mfp_spin_lock, flags);
213 val = mfpr_readl(mfp_table[mfp].mfpr_off); 213 val = mfpr_readl(mfp_table[mfp].mfpr_off);
@@ -220,7 +220,7 @@ void mfp_write(int mfp, unsigned long val)
220{ 220{
221 unsigned long flags; 221 unsigned long flags;
222 222
223 BUG_ON(mfp >= MFP_PIN_MAX); 223 BUG_ON(mfp < 0 || mfp >= MFP_PIN_MAX);
224 224
225 spin_lock_irqsave(&mfp_spin_lock, flags); 225 spin_lock_irqsave(&mfp_spin_lock, flags);
226 mfpr_writel(mfp_table[mfp].mfpr_off, val); 226 mfpr_writel(mfp_table[mfp].mfpr_off, val);
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 94be7bb6cb9a..07b976da6174 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
12# 12#
13# http://www.arm.linux.org.uk/developer/machines/?action=new 13# http://www.arm.linux.org.uk/developer/machines/?action=new
14# 14#
15# Last update: Fri Sep 18 21:42:00 2009 15# Last update: Wed Nov 25 22:14:58 2009
16# 16#
17# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number 17# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
18# 18#
@@ -928,7 +928,7 @@ palmt5 MACH_PALMT5 PALMT5 917
928palmtc MACH_PALMTC PALMTC 918 928palmtc MACH_PALMTC PALMTC 918
929omap_apollon MACH_OMAP_APOLLON OMAP_APOLLON 919 929omap_apollon MACH_OMAP_APOLLON OMAP_APOLLON 919
930mxc30030evb MACH_MXC30030EVB MXC30030EVB 920 930mxc30030evb MACH_MXC30030EVB MXC30030EVB 920
931rea_2d MACH_REA_2D REA_2D 921 931rea_cpu2 MACH_REA_2D REA_2D 921
932eti3e524 MACH_TI3E524 TI3E524 922 932eti3e524 MACH_TI3E524 TI3E524 922
933ateb9200 MACH_ATEB9200 ATEB9200 923 933ateb9200 MACH_ATEB9200 ATEB9200 923
934auckland MACH_AUCKLAND AUCKLAND 924 934auckland MACH_AUCKLAND AUCKLAND 924
@@ -2421,3 +2421,118 @@ liberty MACH_LIBERTY LIBERTY 2434
2421mh355 MACH_MH355 MH355 2435 2421mh355 MACH_MH355 MH355 2435
2422pc7802 MACH_PC7802 PC7802 2436 2422pc7802 MACH_PC7802 PC7802 2436
2423gnet_sgc MACH_GNET_SGC GNET_SGC 2437 2423gnet_sgc MACH_GNET_SGC GNET_SGC 2437
2424einstein15 MACH_EINSTEIN15 EINSTEIN15 2438
2425cmpd MACH_CMPD CMPD 2439
2426davinci_hase1 MACH_DAVINCI_HASE1 DAVINCI_HASE1 2440
2427lgeincitephone MACH_LGEINCITEPHONE LGEINCITEPHONE 2441
2428ea313x MACH_EA313X EA313X 2442
2429fwbd_39064 MACH_FWBD_39064 FWBD_39064 2443
2430fwbd_390128 MACH_FWBD_390128 FWBD_390128 2444
2431pelco_moe MACH_PELCO_MOE PELCO_MOE 2445
2432minimix27 MACH_MINIMIX27 MINIMIX27 2446
2433omap3_thunder MACH_OMAP3_THUNDER OMAP3_THUNDER 2447
2434passionc MACH_PASSIONC PASSIONC 2448
2435mx27amata MACH_MX27AMATA MX27AMATA 2449
2436bgat1 MACH_BGAT1 BGAT1 2450
2437buzz MACH_BUZZ BUZZ 2451
2438mb9g20 MACH_MB9G20 MB9G20 2452
2439yushan MACH_YUSHAN YUSHAN 2453
2440lizard MACH_LIZARD LIZARD 2454
2441omap3polycom MACH_OMAP3POLYCOM OMAP3POLYCOM 2455
2442smdkv210 MACH_SMDKV210 SMDKV210 2456
2443bravo MACH_BRAVO BRAVO 2457
2444siogentoo1 MACH_SIOGENTOO1 SIOGENTOO1 2458
2445siogentoo2 MACH_SIOGENTOO2 SIOGENTOO2 2459
2446sm3k MACH_SM3K SM3K 2460
2447acer_tempo_f900 MACH_ACER_TEMPO_F900 ACER_TEMPO_F900 2461
2448sst61vc010_dev MACH_SST61VC010_DEV SST61VC010_DEV 2462
2449glittertind MACH_GLITTERTIND GLITTERTIND 2463
2450omap_zoom3 MACH_OMAP_ZOOM3 OMAP_ZOOM3 2464
2451omap_3630sdp MACH_OMAP_3630SDP OMAP_3630SDP 2465
2452cybook2440 MACH_CYBOOK2440 CYBOOK2440 2466
2453torino_s MACH_TORINO_S TORINO_S 2467
2454havana MACH_HAVANA HAVANA 2468
2455beaumont_11 MACH_BEAUMONT_11 BEAUMONT_11 2469
2456vanguard MACH_VANGUARD VANGUARD 2470
2457s5pc110_draco MACH_S5PC110_DRACO S5PC110_DRACO 2471
2458cartesio_two MACH_CARTESIO_TWO CARTESIO_TWO 2472
2459aster MACH_ASTER ASTER 2473
2460voguesv210 MACH_VOGUESV210 VOGUESV210 2474
2461acm500x MACH_ACM500X ACM500X 2475
2462km9260 MACH_KM9260 KM9260 2476
2463nideflexg1 MACH_NIDEFLEXG1 NIDEFLEXG1 2477
2464ctera_plug_io MACH_CTERA_PLUG_IO CTERA_PLUG_IO 2478
2465smartq7 MACH_SMARTQ7 SMARTQ7 2479
2466at91sam9g10ek2 MACH_AT91SAM9G10EK2 AT91SAM9G10EK2 2480
2467asusp527 MACH_ASUSP527 ASUSP527 2481
2468at91sam9g20mpm2 MACH_AT91SAM9G20MPM2 AT91SAM9G20MPM2 2482
2469topasa900 MACH_TOPASA900 TOPASA900 2483
2470electrum_100 MACH_ELECTRUM_100 ELECTRUM_100 2484
2471mx51grb MACH_MX51GRB MX51GRB 2485
2472xea300 MACH_XEA300 XEA300 2486
2473htcstartrek MACH_HTCSTARTREK HTCSTARTREK 2487
2474lima MACH_LIMA LIMA 2488
2475csb740 MACH_CSB740 CSB740 2489
2476usb_s8815 MACH_USB_S8815 USB_S8815 2490
2477watson_efm_plugin MACH_WATSON_EFM_PLUGIN WATSON_EFM_PLUGIN 2491
2478milkyway MACH_MILKYWAY MILKYWAY 2492
2479g4evm MACH_G4EVM G4EVM 2493
2480picomod6 MACH_PICOMOD6 PICOMOD6 2494
2481omapl138_hawkboard MACH_OMAPL138_HAWKBOARD OMAPL138_HAWKBOARD 2495
2482ip6000 MACH_IP6000 IP6000 2496
2483ip6010 MACH_IP6010 IP6010 2497
2484utm400 MACH_UTM400 UTM400 2498
2485omap3_zybex MACH_OMAP3_ZYBEX OMAP3_ZYBEX 2499
2486wireless_space MACH_WIRELESS_SPACE WIRELESS_SPACE 2500
2487sx560 MACH_SX560 SX560 2501
2488ts41x MACH_TS41X TS41X 2502
2489elphel10373 MACH_ELPHEL10373 ELPHEL10373 2503
2490rhobot MACH_RHOBOT RHOBOT 2504
2491mx51_refresh MACH_MX51_REFRESH MX51_REFRESH 2505
2492ls9260 MACH_LS9260 LS9260 2506
2493shank MACH_SHANK SHANK 2507
2494qsd8x50_st1 MACH_QSD8X50_ST1 QSD8X50_ST1 2508
2495at91sam9m10ekes MACH_AT91SAM9M10EKES AT91SAM9M10EKES 2509
2496hiram MACH_HIRAM HIRAM 2510
2497phy3250 MACH_PHY3250 PHY3250 2511
2498ea3250 MACH_EA3250 EA3250 2512
2499fdi3250 MACH_FDI3250 FDI3250 2513
2500whitestone MACH_WHITESTONE WHITESTONE 2514
2501at91sam9263nit MACH_AT91SAM9263NIT AT91SAM9263NIT 2515
2502ccmx51 MACH_CCMX51 CCMX51 2516
2503ccmx51js MACH_CCMX51JS CCMX51JS 2517
2504ccwmx51 MACH_CCWMX51 CCWMX51 2518
2505ccwmx51js MACH_CCWMX51JS CCWMX51JS 2519
2506mini6410 MACH_MINI6410 MINI6410 2520
2507tiny6410 MACH_TINY6410 TINY6410 2521
2508nano6410 MACH_NANO6410 NANO6410 2522
2509at572d940hfnldb MACH_AT572D940HFNLDB AT572D940HFNLDB 2523
2510htcleo MACH_HTCLEO HTCLEO 2524
2511avp13 MACH_AVP13 AVP13 2525
2512xxsvideod MACH_XXSVIDEOD XXSVIDEOD 2526
2513vpnext MACH_VPNEXT VPNEXT 2527
2514swarco_itc3 MACH_SWARCO_ITC3 SWARCO_ITC3 2528
2515tx51 MACH_TX51 TX51 2529
2516dolby_cat1021 MACH_DOLBY_CAT1021 DOLBY_CAT1021 2530
2517mx28evk MACH_MX28EVK MX28EVK 2531
2518phoenix260 MACH_PHOENIX260 PHOENIX260 2532
2519uvaca_stork MACH_UVACA_STORK UVACA_STORK 2533
2520smartq5 MACH_SMARTQ5 SMARTQ5 2534
2521all3078 MACH_ALL3078 ALL3078 2535
2522ctera_2bay_ds MACH_CTERA_2BAY_DS CTERA_2BAY_DS 2536
2523siogentoo3 MACH_SIOGENTOO3 SIOGENTOO3 2537
2524epb5000 MACH_EPB5000 EPB5000 2538
2525hy9263 MACH_HY9263 HY9263 2539
2526acer_tempo_m900 MACH_ACER_TEMPO_M900 ACER_TEMPO_M900 2540
2527acer_tempo_dx650 MACH_ACER_TEMPO_DX900 ACER_TEMPO_DX900 2541
2528acer_tempo_x960 MACH_ACER_TEMPO_X960 ACER_TEMPO_X960 2542
2529acer_eten_v900 MACH_ACER_ETEN_V900 ACER_ETEN_V900 2543
2530acer_eten_x900 MACH_ACER_ETEN_X900 ACER_ETEN_X900 2544
2531bonnell MACH_BONNELL BONNELL 2545
2532oht_mx27 MACH_OHT_MX27 OHT_MX27 2546
2533htcquartz MACH_HTCQUARTZ HTCQUARTZ 2547
2534davinci_dm6467tevm MACH_DAVINCI_DM6467TEVM DAVINCI_DM6467TEVM 2548
2535c3ax03 MACH_C3AX03 C3AX03 2549
2536mxt_td60 MACH_MXT_TD60 MXT_TD60 2550
2537esyx MACH_ESYX ESYX 2551
2538bulldog MACH_BULLDOG BULLDOG 2553
diff --git a/arch/avr32/include/asm/bug.h b/arch/avr32/include/asm/bug.h
index 331d45bab18f..2aa373cc61b5 100644
--- a/arch/avr32/include/asm/bug.h
+++ b/arch/avr32/include/asm/bug.h
@@ -52,7 +52,7 @@
52#define BUG() \ 52#define BUG() \
53 do { \ 53 do { \
54 _BUG_OR_WARN(0); \ 54 _BUG_OR_WARN(0); \
55 for (;;); \ 55 unreachable(); \
56 } while (0) 56 } while (0)
57 57
58#define WARN_ON(condition) \ 58#define WARN_ON(condition) \
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index 1f170216d2f9..3946aff4f414 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -225,8 +225,13 @@ int blackfin_dma_suspend(void)
225void blackfin_dma_resume(void) 225void blackfin_dma_resume(void)
226{ 226{
227 int i; 227 int i;
228 for (i = 0; i < MAX_DMA_SUSPEND_CHANNELS; ++i) 228
229 dma_ch[i].regs->peripheral_map = dma_ch[i].saved_peripheral_map; 229 for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
230 dma_ch[i].regs->cfg = 0;
231
232 if (i < MAX_DMA_SUSPEND_CHANNELS)
233 dma_ch[i].regs->peripheral_map = dma_ch[i].saved_peripheral_map;
234 }
230} 235}
231#endif 236#endif
232 237
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
index f7b9cdce8239..b52c1f8c4bc0 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
@@ -38,7 +38,7 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
38 38
39#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE 39#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
40 d_cache = CPLB_L1_CHBL; 40 d_cache = CPLB_L1_CHBL;
41#ifdef CONFIG_BFIN_EXTMEM_WRITETROUGH 41#ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
42 d_cache |= CPLB_L1_AOW | CPLB_WT; 42 d_cache |= CPLB_L1_AOW | CPLB_WT;
43#endif 43#endif
44#endif 44#endif
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 430ae39456e8..5cc7e2e9e415 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -151,7 +151,7 @@ void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_
151 regs->pc = new_ip; 151 regs->pc = new_ip;
152 if (current->mm) 152 if (current->mm)
153 regs->p5 = current->mm->start_data; 153 regs->p5 = current->mm->start_data;
154#ifdef CONFIG_SMP 154#ifndef CONFIG_SMP
155 task_thread_info(current)->l1_task_info.stack_start = 155 task_thread_info(current)->l1_task_info.stack_start =
156 (void *)current->mm->context.stack_start; 156 (void *)current->mm->context.stack_start;
157 task_thread_info(current)->l1_task_info.lowest_sp = (void *)new_sp; 157 task_thread_info(current)->l1_task_info.lowest_sp = (void *)new_sp;
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 0982b5d5af10..56b0ba12175f 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -315,7 +315,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
315 case BFIN_MEM_ACCESS_CORE: 315 case BFIN_MEM_ACCESS_CORE:
316 case BFIN_MEM_ACCESS_CORE_ONLY: 316 case BFIN_MEM_ACCESS_CORE_ONLY:
317 copied = access_process_vm(child, addr, &data, 317 copied = access_process_vm(child, addr, &data,
318 to_copy, 0); 318 to_copy, 1);
319 if (copied) 319 if (copied)
320 break; 320 break;
321 321
diff --git a/arch/blackfin/mach-bf518/include/mach/anomaly.h b/arch/blackfin/mach-bf518/include/mach/anomaly.h
index e9c65390edd1..2829dd0400f1 100644
--- a/arch/blackfin/mach-bf518/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf518/include/mach/anomaly.h
@@ -1,9 +1,13 @@
1/* 1/*
2 * File: include/asm-blackfin/mach-bf518/anomaly.h 2 * DO NOT EDIT THIS FILE
3 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 3 * This file is under version control at
4 * svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE
4 * 7 *
5 * Copyright (C) 2004-2009 Analog Devices Inc. 8 * Copyright 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
7 */ 11 */
8 12
9/* This file should be up to date with: 13/* This file should be up to date with:
@@ -70,6 +74,10 @@
70#define ANOMALY_05000461 (1) 74#define ANOMALY_05000461 (1)
71/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */ 75/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
72#define ANOMALY_05000462 (1) 76#define ANOMALY_05000462 (1)
77/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
78#define ANOMALY_05000473 (1)
79/* TESTSET Instruction Cannot Be Interrupted */
80#define ANOMALY_05000477 (1)
73 81
74/* Anomalies that don't exist on this proc */ 82/* Anomalies that don't exist on this proc */
75#define ANOMALY_05000099 (0) 83#define ANOMALY_05000099 (0)
@@ -133,5 +141,7 @@
133#define ANOMALY_05000450 (0) 141#define ANOMALY_05000450 (0)
134#define ANOMALY_05000465 (0) 142#define ANOMALY_05000465 (0)
135#define ANOMALY_05000467 (0) 143#define ANOMALY_05000467 (0)
144#define ANOMALY_05000474 (0)
145#define ANOMALY_05000475 (0)
136 146
137#endif 147#endif
diff --git a/arch/blackfin/mach-bf527/include/mach/anomaly.h b/arch/blackfin/mach-bf527/include/mach/anomaly.h
index 3f9052687fa8..02040df8ec80 100644
--- a/arch/blackfin/mach-bf527/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf527/include/mach/anomaly.h
@@ -1,14 +1,18 @@
1/* 1/*
2 * File: include/asm-blackfin/mach-bf527/anomaly.h 2 * DO NOT EDIT THIS FILE
3 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 3 * This file is under version control at
4 * svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE
4 * 7 *
5 * Copyright (C) 2004-2009 Analog Devices Inc. 8 * Copyright 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
7 */ 11 */
8 12
9/* This file should be up to date with: 13/* This file should be up to date with:
10 * - Revision D, 08/14/2009; ADSP-BF526 Blackfin Processor Anomaly List 14 * - Revision D, 08/14/2009; ADSP-BF526 Blackfin Processor Anomaly List
11 * - Revision F, 03/03/2009; ADSP-BF527 Blackfin Processor Anomaly List 15 * - Revision G, 08/25/2009; ADSP-BF527 Blackfin Processor Anomaly List
12 */ 16 */
13 17
14#ifndef _MACH_ANOMALY_H_ 18#ifndef _MACH_ANOMALY_H_
@@ -200,6 +204,10 @@
200#define ANOMALY_05000467 (1) 204#define ANOMALY_05000467 (1)
201/* PLL Latches Incorrect Settings During Reset */ 205/* PLL Latches Incorrect Settings During Reset */
202#define ANOMALY_05000469 (1) 206#define ANOMALY_05000469 (1)
207/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
208#define ANOMALY_05000473 (1)
209/* TESTSET Instruction Cannot Be Interrupted */
210#define ANOMALY_05000477 (1)
203 211
204/* Anomalies that don't exist on this proc */ 212/* Anomalies that don't exist on this proc */
205#define ANOMALY_05000099 (0) 213#define ANOMALY_05000099 (0)
@@ -250,5 +258,7 @@
250#define ANOMALY_05000412 (0) 258#define ANOMALY_05000412 (0)
251#define ANOMALY_05000447 (0) 259#define ANOMALY_05000447 (0)
252#define ANOMALY_05000448 (0) 260#define ANOMALY_05000448 (0)
261#define ANOMALY_05000474 (0)
262#define ANOMALY_05000475 (0)
253 263
254#endif 264#endif
diff --git a/arch/blackfin/mach-bf533/include/mach/anomaly.h b/arch/blackfin/mach-bf533/include/mach/anomaly.h
index cd83db2fb1a1..9b3f7a27714d 100644
--- a/arch/blackfin/mach-bf533/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf533/include/mach/anomaly.h
@@ -1,9 +1,13 @@
1/* 1/*
2 * File: include/asm-blackfin/mach-bf533/anomaly.h 2 * DO NOT EDIT THIS FILE
3 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 3 * This file is under version control at
4 * svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE
4 * 7 *
5 * Copyright (C) 2004-2009 Analog Devices Inc. 8 * Copyright 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
7 */ 11 */
8 12
9/* This file should be up to date with: 13/* This file should be up to date with:
@@ -202,6 +206,10 @@
202#define ANOMALY_05000443 (1) 206#define ANOMALY_05000443 (1)
203/* False Hardware Error when RETI Points to Invalid Memory */ 207/* False Hardware Error when RETI Points to Invalid Memory */
204#define ANOMALY_05000461 (1) 208#define ANOMALY_05000461 (1)
209/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
210#define ANOMALY_05000473 (1)
211/* TESTSET Instruction Cannot Be Interrupted */
212#define ANOMALY_05000477 (1)
205 213
206/* These anomalies have been "phased" out of analog.com anomaly sheets and are 214/* These anomalies have been "phased" out of analog.com anomaly sheets and are
207 * here to show running on older silicon just isn't feasible. 215 * here to show running on older silicon just isn't feasible.
@@ -349,5 +357,7 @@
349#define ANOMALY_05000450 (0) 357#define ANOMALY_05000450 (0)
350#define ANOMALY_05000465 (0) 358#define ANOMALY_05000465 (0)
351#define ANOMALY_05000467 (0) 359#define ANOMALY_05000467 (0)
360#define ANOMALY_05000474 (0)
361#define ANOMALY_05000475 (0)
352 362
353#endif 363#endif
diff --git a/arch/blackfin/mach-bf537/include/mach/anomaly.h b/arch/blackfin/mach-bf537/include/mach/anomaly.h
index f091ad2d8ea8..d2c427bc6656 100644
--- a/arch/blackfin/mach-bf537/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf537/include/mach/anomaly.h
@@ -1,9 +1,13 @@
1/* 1/*
2 * File: include/asm-blackfin/mach-bf537/anomaly.h 2 * DO NOT EDIT THIS FILE
3 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 3 * This file is under version control at
4 * svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE
4 * 7 *
5 * Copyright (C) 2004-2009 Analog Devices Inc. 8 * Copyright 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
7 */ 11 */
8 12
9/* This file should be up to date with: 13/* This file should be up to date with:
@@ -156,6 +160,10 @@
156#define ANOMALY_05000443 (1) 160#define ANOMALY_05000443 (1)
157/* False Hardware Error when RETI Points to Invalid Memory */ 161/* False Hardware Error when RETI Points to Invalid Memory */
158#define ANOMALY_05000461 (1) 162#define ANOMALY_05000461 (1)
163/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
164#define ANOMALY_05000473 (1)
165/* TESTSET Instruction Cannot Be Interrupted */
166#define ANOMALY_05000477 (1)
159 167
160/* Anomalies that don't exist on this proc */ 168/* Anomalies that don't exist on this proc */
161#define ANOMALY_05000099 (0) 169#define ANOMALY_05000099 (0)
@@ -202,5 +210,7 @@
202#define ANOMALY_05000450 (0) 210#define ANOMALY_05000450 (0)
203#define ANOMALY_05000465 (0) 211#define ANOMALY_05000465 (0)
204#define ANOMALY_05000467 (0) 212#define ANOMALY_05000467 (0)
213#define ANOMALY_05000474 (0)
214#define ANOMALY_05000475 (0)
205 215
206#endif 216#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/anomaly.h b/arch/blackfin/mach-bf538/include/mach/anomaly.h
index 26b76083e14c..d882b7e6f59b 100644
--- a/arch/blackfin/mach-bf538/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf538/include/mach/anomaly.h
@@ -1,9 +1,13 @@
1/* 1/*
2 * File: include/asm-blackfin/mach-bf538/anomaly.h 2 * DO NOT EDIT THIS FILE
3 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 3 * This file is under version control at
4 * svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE
4 * 7 *
5 * Copyright (C) 2004-2009 Analog Devices Inc. 8 * Copyright 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
7 */ 11 */
8 12
9/* This file should be up to date with: 13/* This file should be up to date with:
@@ -128,6 +132,10 @@
128#define ANOMALY_05000443 (1) 132#define ANOMALY_05000443 (1)
129/* False Hardware Error when RETI Points to Invalid Memory */ 133/* False Hardware Error when RETI Points to Invalid Memory */
130#define ANOMALY_05000461 (1) 134#define ANOMALY_05000461 (1)
135/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
136#define ANOMALY_05000473 (1)
137/* TESTSET Instruction Cannot Be Interrupted */
138#define ANOMALY_05000477 (1)
131 139
132/* Anomalies that don't exist on this proc */ 140/* Anomalies that don't exist on this proc */
133#define ANOMALY_05000099 (0) 141#define ANOMALY_05000099 (0)
@@ -176,5 +184,7 @@
176#define ANOMALY_05000450 (0) 184#define ANOMALY_05000450 (0)
177#define ANOMALY_05000465 (0) 185#define ANOMALY_05000465 (0)
178#define ANOMALY_05000467 (0) 186#define ANOMALY_05000467 (0)
187#define ANOMALY_05000474 (0)
188#define ANOMALY_05000475 (0)
179 189
180#endif 190#endif
diff --git a/arch/blackfin/mach-bf548/include/mach/anomaly.h b/arch/blackfin/mach-bf548/include/mach/anomaly.h
index 52b116ae522a..7d08c7524498 100644
--- a/arch/blackfin/mach-bf548/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf548/include/mach/anomaly.h
@@ -1,9 +1,13 @@
1/* 1/*
2 * File: include/asm-blackfin/mach-bf548/anomaly.h 2 * DO NOT EDIT THIS FILE
3 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 3 * This file is under version control at
4 * svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE
4 * 7 *
5 * Copyright (C) 2004-2009 Analog Devices Inc. 8 * Copyright 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
7 */ 11 */
8 12
9/* This file should be up to date with: 13/* This file should be up to date with:
@@ -24,6 +28,8 @@
24#define ANOMALY_05000119 (1) 28#define ANOMALY_05000119 (1)
25/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */ 29/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
26#define ANOMALY_05000122 (1) 30#define ANOMALY_05000122 (1)
31/* Data Corruption with Cached External Memory and Non-Cached On-Chip L2 Memory */
32#define ANOMALY_05000220 (1)
27/* False Hardware Error from an Access in the Shadow of a Conditional Branch */ 33/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
28#define ANOMALY_05000245 (1) 34#define ANOMALY_05000245 (1)
29/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */ 35/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */
@@ -200,6 +206,14 @@
200#define ANOMALY_05000466 (1) 206#define ANOMALY_05000466 (1)
201/* Possible RX data corruption when control & data EP FIFOs are accessed via the core */ 207/* Possible RX data corruption when control & data EP FIFOs are accessed via the core */
202#define ANOMALY_05000467 (1) 208#define ANOMALY_05000467 (1)
209/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
210#define ANOMALY_05000473 (1)
211/* Access to DDR-SDRAM causes system hang under certain PLL/VR settings */
212#define ANOMALY_05000474 (1)
213/* Core Hang With L2/L3 Configured in Writeback Cache Mode */
214#define ANOMALY_05000475 (1)
215/* TESTSET Instruction Cannot Be Interrupted */
216#define ANOMALY_05000477 (1)
203 217
204/* Anomalies that don't exist on this proc */ 218/* Anomalies that don't exist on this proc */
205#define ANOMALY_05000099 (0) 219#define ANOMALY_05000099 (0)
@@ -215,7 +229,6 @@
215#define ANOMALY_05000198 (0) 229#define ANOMALY_05000198 (0)
216#define ANOMALY_05000202 (0) 230#define ANOMALY_05000202 (0)
217#define ANOMALY_05000215 (0) 231#define ANOMALY_05000215 (0)
218#define ANOMALY_05000220 (0)
219#define ANOMALY_05000227 (0) 232#define ANOMALY_05000227 (0)
220#define ANOMALY_05000230 (0) 233#define ANOMALY_05000230 (0)
221#define ANOMALY_05000231 (0) 234#define ANOMALY_05000231 (0)
diff --git a/arch/blackfin/mach-bf561/atomic.S b/arch/blackfin/mach-bf561/atomic.S
index 0261a5e751b3..f99f174b129f 100644
--- a/arch/blackfin/mach-bf561/atomic.S
+++ b/arch/blackfin/mach-bf561/atomic.S
@@ -19,6 +19,16 @@
19 \reg\().h = _corelock; 19 \reg\().h = _corelock;
20.endm 20.endm
21 21
22.macro safe_testset addr:req, scratch:req
23#if ANOMALY_05000477
24 cli \scratch;
25 testset (\addr);
26 sti \scratch;
27#else
28 testset (\addr);
29#endif
30.endm
31
22/* 32/*
23 * r0 = address of atomic data to flush and invalidate (32bit). 33 * r0 = address of atomic data to flush and invalidate (32bit).
24 * 34 *
@@ -33,7 +43,7 @@ ENTRY(_get_core_lock)
33 cli r0; 43 cli r0;
34 coreslot_loadaddr p0; 44 coreslot_loadaddr p0;
35.Lretry_corelock: 45.Lretry_corelock:
36 testset (p0); 46 safe_testset p0, r2;
37 if cc jump .Ldone_corelock; 47 if cc jump .Ldone_corelock;
38 SSYNC(r2); 48 SSYNC(r2);
39 jump .Lretry_corelock 49 jump .Lretry_corelock
@@ -56,7 +66,7 @@ ENTRY(_get_core_lock_noflush)
56 cli r0; 66 cli r0;
57 coreslot_loadaddr p0; 67 coreslot_loadaddr p0;
58.Lretry_corelock_noflush: 68.Lretry_corelock_noflush:
59 testset (p0); 69 safe_testset p0, r2;
60 if cc jump .Ldone_corelock_noflush; 70 if cc jump .Ldone_corelock_noflush;
61 SSYNC(r2); 71 SSYNC(r2);
62 jump .Lretry_corelock_noflush 72 jump .Lretry_corelock_noflush
diff --git a/arch/blackfin/mach-bf561/include/mach/anomaly.h b/arch/blackfin/mach-bf561/include/mach/anomaly.h
index 70da495c9665..5ddc981e9937 100644
--- a/arch/blackfin/mach-bf561/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf561/include/mach/anomaly.h
@@ -1,9 +1,13 @@
1/* 1/*
2 * File: include/asm-blackfin/mach-bf561/anomaly.h 2 * DO NOT EDIT THIS FILE
3 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 3 * This file is under version control at
4 * svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
5 * and can be replaced with that version at any time
6 * DO NOT EDIT THIS FILE
4 * 7 *
5 * Copyright (C) 2004-2009 Analog Devices Inc. 8 * Copyright 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later. 9 * Licensed under the ADI BSD license.
10 * https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
7 */ 11 */
8 12
9/* This file should be up to date with: 13/* This file should be up to date with:
@@ -213,7 +217,11 @@
213/* Disabling Peripherals with DMA Running May Cause DMA System Instability */ 217/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
214#define ANOMALY_05000278 (__SILICON_REVISION__ < 5) 218#define ANOMALY_05000278 (__SILICON_REVISION__ < 5)
215/* False Hardware Error Exception when ISR Context Is Not Restored */ 219/* False Hardware Error Exception when ISR Context Is Not Restored */
216#define ANOMALY_05000281 (__SILICON_REVISION__ < 5) 220/* Temporarily walk around for bug 5423 till this issue is confirmed by
221 * official anomaly document. It looks 05000281 still exists on bf561
222 * v0.5.
223 */
224#define ANOMALY_05000281 (__SILICON_REVISION__ <= 5)
217/* System MMR Write Is Stalled Indefinitely when Killed in a Particular Stage */ 225/* System MMR Write Is Stalled Indefinitely when Killed in a Particular Stage */
218#define ANOMALY_05000283 (1) 226#define ANOMALY_05000283 (1)
219/* Reads Will Receive Incorrect Data under Certain Conditions */ 227/* Reads Will Receive Incorrect Data under Certain Conditions */
@@ -280,6 +288,12 @@
280#define ANOMALY_05000443 (1) 288#define ANOMALY_05000443 (1)
281/* False Hardware Error when RETI Points to Invalid Memory */ 289/* False Hardware Error when RETI Points to Invalid Memory */
282#define ANOMALY_05000461 (1) 290#define ANOMALY_05000461 (1)
291/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
292#define ANOMALY_05000473 (1)
293/* Core Hang With L2/L3 Configured in Writeback Cache Mode */
294#define ANOMALY_05000475 (__SILICON_REVISION__ < 4)
295/* TESTSET Instruction Cannot Be Interrupted */
296#define ANOMALY_05000477 (1)
283 297
284/* Anomalies that don't exist on this proc */ 298/* Anomalies that don't exist on this proc */
285#define ANOMALY_05000119 (0) 299#define ANOMALY_05000119 (0)
@@ -304,5 +318,6 @@
304#define ANOMALY_05000450 (0) 318#define ANOMALY_05000450 (0)
305#define ANOMALY_05000465 (0) 319#define ANOMALY_05000465 (0)
306#define ANOMALY_05000467 (0) 320#define ANOMALY_05000467 (0)
321#define ANOMALY_05000474 (0)
307 322
308#endif 323#endif
diff --git a/arch/blackfin/mach-common/arch_checks.c b/arch/blackfin/mach-common/arch_checks.c
index 9dbafcdcf479..f2ca211a76a0 100644
--- a/arch/blackfin/mach-common/arch_checks.c
+++ b/arch/blackfin/mach-common/arch_checks.c
@@ -57,3 +57,8 @@
57 (!defined(CONFIG_BFIN_EXTMEM_DCACHEABLE) && defined(CONFIG_BFIN_L2_WRITEBACK))) 57 (!defined(CONFIG_BFIN_EXTMEM_DCACHEABLE) && defined(CONFIG_BFIN_L2_WRITEBACK)))
58# error You are exposing Anomaly 220 in this config, either config L2 as Write Through, or make External Memory WB. 58# error You are exposing Anomaly 220 in this config, either config L2 as Write Through, or make External Memory WB.
59#endif 59#endif
60
61#if ANOMALY_05000475 && \
62 (defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK))
63# error "Anomaly 475 does not allow you to use Write Back cache with L2 or External Memory"
64#endif
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index d98585f3237d..d92b168c8328 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -276,10 +276,9 @@ void smp_send_reschedule(int cpu)
276 if (cpu_is_offline(cpu)) 276 if (cpu_is_offline(cpu))
277 return; 277 return;
278 278
279 msg = kmalloc(sizeof(*msg), GFP_ATOMIC); 279 msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
280 if (!msg) 280 if (!msg)
281 return; 281 return;
282 memset(msg, 0, sizeof(msg));
283 INIT_LIST_HEAD(&msg->list); 282 INIT_LIST_HEAD(&msg->list);
284 msg->type = BFIN_IPI_RESCHEDULE; 283 msg->type = BFIN_IPI_RESCHEDULE;
285 284
@@ -305,10 +304,9 @@ void smp_send_stop(void)
305 if (cpus_empty(callmap)) 304 if (cpus_empty(callmap))
306 return; 305 return;
307 306
308 msg = kmalloc(sizeof(*msg), GFP_ATOMIC); 307 msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
309 if (!msg) 308 if (!msg)
310 return; 309 return;
311 memset(msg, 0, sizeof(msg));
312 INIT_LIST_HEAD(&msg->list); 310 INIT_LIST_HEAD(&msg->list);
313 msg->type = BFIN_IPI_CPU_STOP; 311 msg->type = BFIN_IPI_CPU_STOP;
314 312
diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h
index dcbaea7ce128..f0acde68aaea 100644
--- a/arch/ia64/include/asm/swiotlb.h
+++ b/arch/ia64/include/asm/swiotlb.h
@@ -4,8 +4,6 @@
4#include <linux/dma-mapping.h> 4#include <linux/dma-mapping.h>
5#include <linux/swiotlb.h> 5#include <linux/swiotlb.h>
6 6
7extern int swiotlb_force;
8
9#ifdef CONFIG_SWIOTLB 7#ifdef CONFIG_SWIOTLB
10extern int swiotlb; 8extern int swiotlb;
11extern void pci_swiotlb_init(void); 9extern void pci_swiotlb_init(void);
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 285aae8431c6..53292abf846c 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -41,7 +41,7 @@ struct dma_map_ops swiotlb_dma_ops = {
41void __init swiotlb_dma_init(void) 41void __init swiotlb_dma_init(void)
42{ 42{
43 dma_ops = &swiotlb_dma_ops; 43 dma_ops = &swiotlb_dma_ops;
44 swiotlb_init(); 44 swiotlb_init(1);
45} 45}
46 46
47void __init pci_swiotlb_init(void) 47void __init pci_swiotlb_init(void)
@@ -51,7 +51,7 @@ void __init pci_swiotlb_init(void)
51 swiotlb = 1; 51 swiotlb = 1;
52 printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); 52 printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
53 machvec_init("dig"); 53 machvec_init("dig");
54 swiotlb_init(); 54 swiotlb_init(1);
55 dma_ops = &swiotlb_dma_ops; 55 dma_ops = &swiotlb_dma_ops;
56#else 56#else
57 panic("Unable to find Intel IOMMU"); 57 panic("Unable to find Intel IOMMU");
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 03bd56a2fb6e..fd7620f025fa 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1,6 +1,7 @@
1config MIPS 1config MIPS
2 bool 2 bool
3 default y 3 default y
4 select HAVE_GENERIC_DMA_COHERENT
4 select HAVE_IDE 5 select HAVE_IDE
5 select HAVE_OPROFILE 6 select HAVE_OPROFILE
6 select HAVE_ARCH_KGDB 7 select HAVE_ARCH_KGDB
@@ -357,7 +358,14 @@ config SGI_IP22
357 select SWAP_IO_SPACE 358 select SWAP_IO_SPACE
358 select SYS_HAS_CPU_R4X00 359 select SYS_HAS_CPU_R4X00
359 select SYS_HAS_CPU_R5000 360 select SYS_HAS_CPU_R5000
360 select SYS_HAS_EARLY_PRINTK 361 #
362 # Disable EARLY_PRINTK for now since it leads to overwritten prom
363 # memory during early boot on some machines.
364 #
365 # See http://www.linux-mips.org/cgi-bin/mesg.cgi?a=linux-mips&i=20091119164009.GA15038%40deprecation.cyrius.com
366 # for a more details discussion
367 #
368 # select SYS_HAS_EARLY_PRINTK
361 select SYS_SUPPORTS_32BIT_KERNEL 369 select SYS_SUPPORTS_32BIT_KERNEL
362 select SYS_SUPPORTS_64BIT_KERNEL 370 select SYS_SUPPORTS_64BIT_KERNEL
363 select SYS_SUPPORTS_BIG_ENDIAN 371 select SYS_SUPPORTS_BIG_ENDIAN
@@ -409,7 +417,14 @@ config SGI_IP28
409 select SGI_HAS_ZILOG 417 select SGI_HAS_ZILOG
410 select SWAP_IO_SPACE 418 select SWAP_IO_SPACE
411 select SYS_HAS_CPU_R10000 419 select SYS_HAS_CPU_R10000
412 select SYS_HAS_EARLY_PRINTK 420 #
421 # Disable EARLY_PRINTK for now since it leads to overwritten prom
422 # memory during early boot on some machines.
423 #
424 # See http://www.linux-mips.org/cgi-bin/mesg.cgi?a=linux-mips&i=20091119164009.GA15038%40deprecation.cyrius.com
425 # for a more details discussion
426 #
427 # select SYS_HAS_EARLY_PRINTK
413 select SYS_SUPPORTS_64BIT_KERNEL 428 select SYS_SUPPORTS_64BIT_KERNEL
414 select SYS_SUPPORTS_BIG_ENDIAN 429 select SYS_SUPPORTS_BIG_ENDIAN
415 help 430 help
@@ -1438,6 +1453,7 @@ choice
1438 1453
1439config PAGE_SIZE_4KB 1454config PAGE_SIZE_4KB
1440 bool "4kB" 1455 bool "4kB"
1456 depends on !CPU_LOONGSON2
1441 help 1457 help
1442 This option select the standard 4kB Linux page size. On some 1458 This option select the standard 4kB Linux page size. On some
1443 R3000-family processors this is the only available page size. Using 1459 R3000-family processors this is the only available page size. Using
@@ -1762,7 +1778,7 @@ config SYS_SUPPORTS_SMARTMIPS
1762 1778
1763config ARCH_FLATMEM_ENABLE 1779config ARCH_FLATMEM_ENABLE
1764 def_bool y 1780 def_bool y
1765 depends on !NUMA 1781 depends on !NUMA && !CPU_LOONGSON2
1766 1782
1767config ARCH_DISCONTIGMEM_ENABLE 1783config ARCH_DISCONTIGMEM_ENABLE
1768 bool 1784 bool
diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c
index 079e33d52783..fb284c3b2cff 100644
--- a/arch/mips/bcm47xx/prom.c
+++ b/arch/mips/bcm47xx/prom.c
@@ -100,7 +100,7 @@ static __init void prom_init_console(void)
100 100
101static __init void prom_init_cmdline(void) 101static __init void prom_init_cmdline(void)
102{ 102{
103 char buf[CL_SIZE]; 103 static char buf[CL_SIZE] __initdata;
104 104
105 /* Get the kernel command line from CFE */ 105 /* Get the kernel command line from CFE */
106 if (cfe_getenv("LINUX_CMDLINE", buf, CL_SIZE) >= 0) { 106 if (cfe_getenv("LINUX_CMDLINE", buf, CL_SIZE) >= 0) {
diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig
index c69813b8488c..6c6a19aebe1f 100644
--- a/arch/mips/configs/rbtx49xx_defconfig
+++ b/arch/mips/configs/rbtx49xx_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.29-rc7 3# Linux kernel version: 2.6.32-rc6
4# Wed Mar 4 23:08:06 2009 4# Sun Nov 8 22:59:47 2009
5# 5#
6CONFIG_MIPS=y 6CONFIG_MIPS=y
7 7
@@ -9,16 +9,18 @@ CONFIG_MIPS=y
9# Machine selection 9# Machine selection
10# 10#
11# CONFIG_MACH_ALCHEMY is not set 11# CONFIG_MACH_ALCHEMY is not set
12# CONFIG_AR7 is not set
12# CONFIG_BASLER_EXCITE is not set 13# CONFIG_BASLER_EXCITE is not set
13# CONFIG_BCM47XX is not set 14# CONFIG_BCM47XX is not set
15# CONFIG_BCM63XX is not set
14# CONFIG_MIPS_COBALT is not set 16# CONFIG_MIPS_COBALT is not set
15# CONFIG_MACH_DECSTATION is not set 17# CONFIG_MACH_DECSTATION is not set
16# CONFIG_MACH_JAZZ is not set 18# CONFIG_MACH_JAZZ is not set
17# CONFIG_LASAT is not set 19# CONFIG_LASAT is not set
18# CONFIG_LEMOTE_FULONG is not set 20# CONFIG_MACH_LOONGSON is not set
19# CONFIG_MIPS_MALTA is not set 21# CONFIG_MIPS_MALTA is not set
20# CONFIG_MIPS_SIM is not set 22# CONFIG_MIPS_SIM is not set
21# CONFIG_MACH_EMMA is not set 23# CONFIG_NEC_MARKEINS is not set
22# CONFIG_MACH_VR41XX is not set 24# CONFIG_MACH_VR41XX is not set
23# CONFIG_NXP_STB220 is not set 25# CONFIG_NXP_STB220 is not set
24# CONFIG_NXP_STB225 is not set 26# CONFIG_NXP_STB225 is not set
@@ -45,6 +47,7 @@ CONFIG_MACH_TX49XX=y
45# CONFIG_WR_PPMC is not set 47# CONFIG_WR_PPMC is not set
46# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set 48# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
47# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set 49# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
50# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
48CONFIG_MACH_TXX9=y 51CONFIG_MACH_TXX9=y
49CONFIG_TOSHIBA_RBTX4927=y 52CONFIG_TOSHIBA_RBTX4927=y
50CONFIG_TOSHIBA_RBTX4938=y 53CONFIG_TOSHIBA_RBTX4938=y
@@ -86,7 +89,6 @@ CONFIG_DMA_NONCOHERENT=y
86CONFIG_DMA_NEED_PCI_MAP_STATE=y 89CONFIG_DMA_NEED_PCI_MAP_STATE=y
87CONFIG_EARLY_PRINTK=y 90CONFIG_EARLY_PRINTK=y
88CONFIG_SYS_HAS_EARLY_PRINTK=y 91CONFIG_SYS_HAS_EARLY_PRINTK=y
89# CONFIG_HOTPLUG_CPU is not set
90# CONFIG_NO_IOPORT is not set 92# CONFIG_NO_IOPORT is not set
91CONFIG_GENERIC_GPIO=y 93CONFIG_GENERIC_GPIO=y
92CONFIG_CPU_BIG_ENDIAN=y 94CONFIG_CPU_BIG_ENDIAN=y
@@ -101,7 +103,7 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5
101# 103#
102# CPU selection 104# CPU selection
103# 105#
104# CONFIG_CPU_LOONGSON2 is not set 106# CONFIG_CPU_LOONGSON2E is not set
105# CONFIG_CPU_MIPS32_R1 is not set 107# CONFIG_CPU_MIPS32_R1 is not set
106# CONFIG_CPU_MIPS32_R2 is not set 108# CONFIG_CPU_MIPS32_R2 is not set
107# CONFIG_CPU_MIPS64_R1 is not set 109# CONFIG_CPU_MIPS64_R1 is not set
@@ -137,6 +139,7 @@ CONFIG_32BIT=y
137CONFIG_PAGE_SIZE_4KB=y 139CONFIG_PAGE_SIZE_4KB=y
138# CONFIG_PAGE_SIZE_8KB is not set 140# CONFIG_PAGE_SIZE_8KB is not set
139# CONFIG_PAGE_SIZE_16KB is not set 141# CONFIG_PAGE_SIZE_16KB is not set
142# CONFIG_PAGE_SIZE_32KB is not set
140# CONFIG_PAGE_SIZE_64KB is not set 143# CONFIG_PAGE_SIZE_64KB is not set
141CONFIG_CPU_HAS_PREFETCH=y 144CONFIG_CPU_HAS_PREFETCH=y
142CONFIG_MIPS_MT_DISABLED=y 145CONFIG_MIPS_MT_DISABLED=y
@@ -154,7 +157,10 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
154# CONFIG_PHYS_ADDR_T_64BIT is not set 157# CONFIG_PHYS_ADDR_T_64BIT is not set
155CONFIG_ZONE_DMA_FLAG=0 158CONFIG_ZONE_DMA_FLAG=0
156CONFIG_VIRT_TO_BUS=y 159CONFIG_VIRT_TO_BUS=y
157CONFIG_UNEVICTABLE_LRU=y 160CONFIG_HAVE_MLOCK=y
161CONFIG_HAVE_MLOCKED_PAGE_BIT=y
162# CONFIG_KSM is not set
163CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
158CONFIG_TICK_ONESHOT=y 164CONFIG_TICK_ONESHOT=y
159CONFIG_NO_HZ=y 165CONFIG_NO_HZ=y
160CONFIG_HIGH_RES_TIMERS=y 166CONFIG_HIGH_RES_TIMERS=y
@@ -175,6 +181,7 @@ CONFIG_PREEMPT_NONE=y
175CONFIG_LOCKDEP_SUPPORT=y 181CONFIG_LOCKDEP_SUPPORT=y
176CONFIG_STACKTRACE_SUPPORT=y 182CONFIG_STACKTRACE_SUPPORT=y
177CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 183CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
184CONFIG_CONSTRUCTORS=y
178 185
179# 186#
180# General setup 187# General setup
@@ -194,11 +201,12 @@ CONFIG_SYSVIPC_SYSCTL=y
194# 201#
195# RCU Subsystem 202# RCU Subsystem
196# 203#
197CONFIG_CLASSIC_RCU=y 204CONFIG_TREE_RCU=y
198# CONFIG_TREE_RCU is not set 205# CONFIG_TREE_PREEMPT_RCU is not set
199# CONFIG_PREEMPT_RCU is not set 206# CONFIG_RCU_TRACE is not set
207CONFIG_RCU_FANOUT=32
208# CONFIG_RCU_FANOUT_EXACT is not set
200# CONFIG_TREE_RCU_TRACE is not set 209# CONFIG_TREE_RCU_TRACE is not set
201# CONFIG_PREEMPT_RCU_TRACE is not set
202CONFIG_IKCONFIG=y 210CONFIG_IKCONFIG=y
203CONFIG_IKCONFIG_PROC=y 211CONFIG_IKCONFIG_PROC=y
204CONFIG_LOG_BUF_SHIFT=14 212CONFIG_LOG_BUF_SHIFT=14
@@ -209,8 +217,12 @@ CONFIG_SYSFS_DEPRECATED_V2=y
209# CONFIG_NAMESPACES is not set 217# CONFIG_NAMESPACES is not set
210CONFIG_BLK_DEV_INITRD=y 218CONFIG_BLK_DEV_INITRD=y
211CONFIG_INITRAMFS_SOURCE="" 219CONFIG_INITRAMFS_SOURCE=""
220CONFIG_RD_GZIP=y
221# CONFIG_RD_BZIP2 is not set
222# CONFIG_RD_LZMA is not set
212CONFIG_CC_OPTIMIZE_FOR_SIZE=y 223CONFIG_CC_OPTIMIZE_FOR_SIZE=y
213CONFIG_SYSCTL=y 224CONFIG_SYSCTL=y
225CONFIG_ANON_INODES=y
214CONFIG_EMBEDDED=y 226CONFIG_EMBEDDED=y
215CONFIG_SYSCTL_SYSCALL=y 227CONFIG_SYSCTL_SYSCALL=y
216CONFIG_KALLSYMS=y 228CONFIG_KALLSYMS=y
@@ -220,25 +232,35 @@ CONFIG_PRINTK=y
220CONFIG_BUG=y 232CONFIG_BUG=y
221CONFIG_ELF_CORE=y 233CONFIG_ELF_CORE=y
222# CONFIG_PCSPKR_PLATFORM is not set 234# CONFIG_PCSPKR_PLATFORM is not set
223CONFIG_COMPAT_BRK=y
224CONFIG_BASE_FULL=y 235CONFIG_BASE_FULL=y
225# CONFIG_FUTEX is not set 236CONFIG_FUTEX=y
226CONFIG_ANON_INODES=y
227# CONFIG_EPOLL is not set 237# CONFIG_EPOLL is not set
228CONFIG_SIGNALFD=y 238CONFIG_SIGNALFD=y
229CONFIG_TIMERFD=y 239CONFIG_TIMERFD=y
230CONFIG_EVENTFD=y 240CONFIG_EVENTFD=y
231CONFIG_SHMEM=y 241CONFIG_SHMEM=y
232CONFIG_AIO=y 242CONFIG_AIO=y
243
244#
245# Kernel Performance Events And Counters
246#
233CONFIG_VM_EVENT_COUNTERS=y 247CONFIG_VM_EVENT_COUNTERS=y
234CONFIG_PCI_QUIRKS=y 248CONFIG_PCI_QUIRKS=y
249CONFIG_COMPAT_BRK=y
235CONFIG_SLAB=y 250CONFIG_SLAB=y
236# CONFIG_SLUB is not set 251# CONFIG_SLUB is not set
237# CONFIG_SLOB is not set 252# CONFIG_SLOB is not set
238# CONFIG_PROFILING is not set 253# CONFIG_PROFILING is not set
239CONFIG_HAVE_OPROFILE=y 254CONFIG_HAVE_OPROFILE=y
240# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set 255
256#
257# GCOV-based kernel profiling
258#
259# CONFIG_GCOV_KERNEL is not set
260# CONFIG_SLOW_WORK is not set
261CONFIG_HAVE_GENERIC_DMA_COHERENT=y
241CONFIG_SLABINFO=y 262CONFIG_SLABINFO=y
263CONFIG_RT_MUTEXES=y
242CONFIG_BASE_SMALL=0 264CONFIG_BASE_SMALL=0
243CONFIG_MODULES=y 265CONFIG_MODULES=y
244# CONFIG_MODULE_FORCE_LOAD is not set 266# CONFIG_MODULE_FORCE_LOAD is not set
@@ -246,8 +268,8 @@ CONFIG_MODULE_UNLOAD=y
246# CONFIG_MODVERSIONS is not set 268# CONFIG_MODVERSIONS is not set
247# CONFIG_MODULE_SRCVERSION_ALL is not set 269# CONFIG_MODULE_SRCVERSION_ALL is not set
248CONFIG_BLOCK=y 270CONFIG_BLOCK=y
249# CONFIG_LBD is not set 271# CONFIG_LBDAF is not set
250# CONFIG_BLK_DEV_IO_TRACE is not set 272# CONFIG_BLK_DEV_BSG is not set
251# CONFIG_BLK_DEV_INTEGRITY is not set 273# CONFIG_BLK_DEV_INTEGRITY is not set
252 274
253# 275#
@@ -274,6 +296,7 @@ CONFIG_PCI_DOMAINS=y
274# CONFIG_ARCH_SUPPORTS_MSI is not set 296# CONFIG_ARCH_SUPPORTS_MSI is not set
275# CONFIG_PCI_LEGACY is not set 297# CONFIG_PCI_LEGACY is not set
276# CONFIG_PCI_STUB is not set 298# CONFIG_PCI_STUB is not set
299# CONFIG_PCI_IOV is not set
277CONFIG_MMU=y 300CONFIG_MMU=y
278 301
279# 302#
@@ -288,6 +311,7 @@ CONFIG_TRAD_SIGNALS=y
288# 311#
289# Power management options 312# Power management options
290# 313#
314CONFIG_ARCH_HIBERNATION_POSSIBLE=y
291CONFIG_ARCH_SUSPEND_POSSIBLE=y 315CONFIG_ARCH_SUSPEND_POSSIBLE=y
292# CONFIG_PM is not set 316# CONFIG_PM is not set
293CONFIG_NET=y 317CONFIG_NET=y
@@ -295,7 +319,6 @@ CONFIG_NET=y
295# 319#
296# Networking options 320# Networking options
297# 321#
298CONFIG_COMPAT_NET_DEV_OPS=y
299CONFIG_PACKET=y 322CONFIG_PACKET=y
300# CONFIG_PACKET_MMAP is not set 323# CONFIG_PACKET_MMAP is not set
301CONFIG_UNIX=y 324CONFIG_UNIX=y
@@ -311,6 +334,7 @@ CONFIG_IP_PNP=y
311# CONFIG_NET_IPIP is not set 334# CONFIG_NET_IPIP is not set
312# CONFIG_NET_IPGRE is not set 335# CONFIG_NET_IPGRE is not set
313# CONFIG_IP_MROUTE is not set 336# CONFIG_IP_MROUTE is not set
337# CONFIG_ARPD is not set
314# CONFIG_SYN_COOKIES is not set 338# CONFIG_SYN_COOKIES is not set
315# CONFIG_INET_AH is not set 339# CONFIG_INET_AH is not set
316# CONFIG_INET_ESP is not set 340# CONFIG_INET_ESP is not set
@@ -336,6 +360,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
336# CONFIG_LLC2 is not set 360# CONFIG_LLC2 is not set
337# CONFIG_IPX is not set 361# CONFIG_IPX is not set
338# CONFIG_ATALK is not set 362# CONFIG_ATALK is not set
363# CONFIG_PHONET is not set
339# CONFIG_NET_SCHED is not set 364# CONFIG_NET_SCHED is not set
340# CONFIG_DCB is not set 365# CONFIG_DCB is not set
341 366
@@ -347,7 +372,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
347# CONFIG_CAN is not set 372# CONFIG_CAN is not set
348# CONFIG_IRDA is not set 373# CONFIG_IRDA is not set
349# CONFIG_BT is not set 374# CONFIG_BT is not set
350# CONFIG_PHONET is not set
351# CONFIG_WIRELESS is not set 375# CONFIG_WIRELESS is not set
352# CONFIG_WIMAX is not set 376# CONFIG_WIMAX is not set
353# CONFIG_RFKILL is not set 377# CONFIG_RFKILL is not set
@@ -365,9 +389,9 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
365# CONFIG_CONNECTOR is not set 389# CONFIG_CONNECTOR is not set
366CONFIG_MTD=y 390CONFIG_MTD=y
367# CONFIG_MTD_DEBUG is not set 391# CONFIG_MTD_DEBUG is not set
392# CONFIG_MTD_TESTS is not set
368# CONFIG_MTD_CONCAT is not set 393# CONFIG_MTD_CONCAT is not set
369CONFIG_MTD_PARTITIONS=y 394CONFIG_MTD_PARTITIONS=y
370# CONFIG_MTD_TESTS is not set
371# CONFIG_MTD_REDBOOT_PARTS is not set 395# CONFIG_MTD_REDBOOT_PARTS is not set
372CONFIG_MTD_CMDLINE_PARTS=y 396CONFIG_MTD_CMDLINE_PARTS=y
373# CONFIG_MTD_AR7_PARTS is not set 397# CONFIG_MTD_AR7_PARTS is not set
@@ -376,9 +400,9 @@ CONFIG_MTD_CMDLINE_PARTS=y
376# User Modules And Translation Layers 400# User Modules And Translation Layers
377# 401#
378CONFIG_MTD_CHAR=y 402CONFIG_MTD_CHAR=y
379# CONFIG_MTD_BLKDEVS is not set 403CONFIG_MTD_BLKDEVS=m
380# CONFIG_MTD_BLOCK is not set 404CONFIG_MTD_BLOCK=m
381# CONFIG_MTD_BLOCK_RO is not set 405CONFIG_MTD_BLOCK_RO=m
382# CONFIG_FTL is not set 406# CONFIG_FTL is not set
383# CONFIG_NFTL is not set 407# CONFIG_NFTL is not set
384# CONFIG_INFTL is not set 408# CONFIG_INFTL is not set
@@ -414,16 +438,20 @@ CONFIG_MTD_CFI_UTIL=y
414# 438#
415# Mapping drivers for chip access 439# Mapping drivers for chip access
416# 440#
417# CONFIG_MTD_COMPLEX_MAPPINGS is not set 441CONFIG_MTD_COMPLEX_MAPPINGS=y
418CONFIG_MTD_PHYSMAP=y 442CONFIG_MTD_PHYSMAP=y
419# CONFIG_MTD_PHYSMAP_COMPAT is not set 443# CONFIG_MTD_PHYSMAP_COMPAT is not set
444# CONFIG_MTD_PCI is not set
445# CONFIG_MTD_GPIO_ADDR is not set
420# CONFIG_MTD_INTEL_VR_NOR is not set 446# CONFIG_MTD_INTEL_VR_NOR is not set
447CONFIG_MTD_RBTX4939=y
421# CONFIG_MTD_PLATRAM is not set 448# CONFIG_MTD_PLATRAM is not set
422 449
423# 450#
424# Self-contained MTD device drivers 451# Self-contained MTD device drivers
425# 452#
426# CONFIG_MTD_PMC551 is not set 453# CONFIG_MTD_PMC551 is not set
454# CONFIG_MTD_SST25L is not set
427# CONFIG_MTD_SLRAM is not set 455# CONFIG_MTD_SLRAM is not set
428# CONFIG_MTD_PHRAM is not set 456# CONFIG_MTD_PHRAM is not set
429# CONFIG_MTD_MTDRAM is not set 457# CONFIG_MTD_MTDRAM is not set
@@ -435,7 +463,15 @@ CONFIG_MTD_PHYSMAP=y
435# CONFIG_MTD_DOC2000 is not set 463# CONFIG_MTD_DOC2000 is not set
436# CONFIG_MTD_DOC2001 is not set 464# CONFIG_MTD_DOC2001 is not set
437# CONFIG_MTD_DOC2001PLUS is not set 465# CONFIG_MTD_DOC2001PLUS is not set
438# CONFIG_MTD_NAND is not set 466CONFIG_MTD_NAND=m
467# CONFIG_MTD_NAND_VERIFY_WRITE is not set
468# CONFIG_MTD_NAND_ECC_SMC is not set
469# CONFIG_MTD_NAND_MUSEUM_IDS is not set
470CONFIG_MTD_NAND_IDS=m
471# CONFIG_MTD_NAND_CAFE is not set
472# CONFIG_MTD_NAND_NANDSIM is not set
473# CONFIG_MTD_NAND_PLATFORM is not set
474CONFIG_MTD_NAND_TXX9NDFMC=m
439# CONFIG_MTD_ONENAND is not set 475# CONFIG_MTD_ONENAND is not set
440 476
441# 477#
@@ -471,6 +507,7 @@ CONFIG_IDE=y
471# 507#
472# Please see Documentation/ide/ide.txt for help/info on IDE drives 508# Please see Documentation/ide/ide.txt for help/info on IDE drives
473# 509#
510CONFIG_IDE_XFER_MODE=y
474CONFIG_IDE_TIMINGS=y 511CONFIG_IDE_TIMINGS=y
475# CONFIG_BLK_DEV_IDE_SATA is not set 512# CONFIG_BLK_DEV_IDE_SATA is not set
476CONFIG_IDE_GD=y 513CONFIG_IDE_GD=y
@@ -534,8 +571,13 @@ CONFIG_BLK_DEV_IDEDMA=y
534# 571#
535 572
536# 573#
537# A new alternative FireWire stack is available with EXPERIMENTAL=y 574# You can enable one or both FireWire driver stacks.
538# 575#
576
577#
578# See the help texts for more information.
579#
580# CONFIG_FIREWIRE is not set
539# CONFIG_IEEE1394 is not set 581# CONFIG_IEEE1394 is not set
540# CONFIG_I2O is not set 582# CONFIG_I2O is not set
541CONFIG_NETDEVICES=y 583CONFIG_NETDEVICES=y
@@ -574,6 +616,8 @@ CONFIG_MII=y
574# CONFIG_NET_VENDOR_3COM is not set 616# CONFIG_NET_VENDOR_3COM is not set
575CONFIG_SMC91X=y 617CONFIG_SMC91X=y
576# CONFIG_DM9000 is not set 618# CONFIG_DM9000 is not set
619# CONFIG_ETHOC is not set
620# CONFIG_DNET is not set
577# CONFIG_NET_TULIP is not set 621# CONFIG_NET_TULIP is not set
578# CONFIG_HP100 is not set 622# CONFIG_HP100 is not set
579CONFIG_NE2000=y 623CONFIG_NE2000=y
@@ -602,18 +646,15 @@ CONFIG_TC35815=y
602# CONFIG_SMSC9420 is not set 646# CONFIG_SMSC9420 is not set
603# CONFIG_SUNDANCE is not set 647# CONFIG_SUNDANCE is not set
604# CONFIG_TLAN is not set 648# CONFIG_TLAN is not set
649# CONFIG_KS8842 is not set
650# CONFIG_KS8851 is not set
651# CONFIG_KS8851_MLL is not set
605# CONFIG_VIA_RHINE is not set 652# CONFIG_VIA_RHINE is not set
606# CONFIG_ATL2 is not set 653# CONFIG_ATL2 is not set
607# CONFIG_NETDEV_1000 is not set 654# CONFIG_NETDEV_1000 is not set
608# CONFIG_NETDEV_10000 is not set 655# CONFIG_NETDEV_10000 is not set
609# CONFIG_TR is not set 656# CONFIG_TR is not set
610 657# CONFIG_WLAN is not set
611#
612# Wireless LAN
613#
614# CONFIG_WLAN_PRE80211 is not set
615# CONFIG_WLAN_80211 is not set
616# CONFIG_IWLWIFI_LEDS is not set
617 658
618# 659#
619# Enable WiMAX (Networking options) to see the WiMAX drivers 660# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -653,6 +694,7 @@ CONFIG_DEVKMEM=y
653# 694#
654# Non-8250 serial port support 695# Non-8250 serial port support
655# 696#
697# CONFIG_SERIAL_MAX3100 is not set
656CONFIG_SERIAL_CORE=y 698CONFIG_SERIAL_CORE=y
657CONFIG_SERIAL_CORE_CONSOLE=y 699CONFIG_SERIAL_CORE_CONSOLE=y
658CONFIG_SERIAL_TXX9=y 700CONFIG_SERIAL_TXX9=y
@@ -666,7 +708,9 @@ CONFIG_UNIX98_PTYS=y
666CONFIG_LEGACY_PTYS=y 708CONFIG_LEGACY_PTYS=y
667CONFIG_LEGACY_PTY_COUNT=256 709CONFIG_LEGACY_PTY_COUNT=256
668# CONFIG_IPMI_HANDLER is not set 710# CONFIG_IPMI_HANDLER is not set
669# CONFIG_HW_RANDOM is not set 711CONFIG_HW_RANDOM=m
712# CONFIG_HW_RANDOM_TIMERIOMEM is not set
713CONFIG_HW_RANDOM_TX4939=m
670# CONFIG_R3964 is not set 714# CONFIG_R3964 is not set
671# CONFIG_APPLICOM is not set 715# CONFIG_APPLICOM is not set
672# CONFIG_RAW_DRIVER is not set 716# CONFIG_RAW_DRIVER is not set
@@ -686,6 +730,10 @@ CONFIG_SPI_TXX9=y
686# SPI Protocol Masters 730# SPI Protocol Masters
687# 731#
688# CONFIG_SPI_TLE62X0 is not set 732# CONFIG_SPI_TLE62X0 is not set
733
734#
735# PPS support
736#
689CONFIG_ARCH_REQUIRE_GPIOLIB=y 737CONFIG_ARCH_REQUIRE_GPIOLIB=y
690CONFIG_GPIOLIB=y 738CONFIG_GPIOLIB=y
691 739
@@ -701,17 +749,22 @@ CONFIG_GPIOLIB=y
701# PCI GPIO expanders: 749# PCI GPIO expanders:
702# 750#
703# CONFIG_GPIO_BT8XX is not set 751# CONFIG_GPIO_BT8XX is not set
752# CONFIG_GPIO_LANGWELL is not set
704 753
705# 754#
706# SPI GPIO expanders: 755# SPI GPIO expanders:
707# 756#
708# CONFIG_GPIO_MAX7301 is not set 757# CONFIG_GPIO_MAX7301 is not set
709# CONFIG_GPIO_MCP23S08 is not set 758# CONFIG_GPIO_MCP23S08 is not set
759# CONFIG_GPIO_MC33880 is not set
760
761#
762# AC97 GPIO expanders:
763#
710# CONFIG_W1 is not set 764# CONFIG_W1 is not set
711# CONFIG_POWER_SUPPLY is not set 765# CONFIG_POWER_SUPPLY is not set
712# CONFIG_HWMON is not set 766# CONFIG_HWMON is not set
713# CONFIG_THERMAL is not set 767# CONFIG_THERMAL is not set
714# CONFIG_THERMAL_HWMON is not set
715CONFIG_WATCHDOG=y 768CONFIG_WATCHDOG=y
716# CONFIG_WATCHDOG_NOWAYOUT is not set 769# CONFIG_WATCHDOG_NOWAYOUT is not set
717 770
@@ -740,28 +793,17 @@ CONFIG_SSB_POSSIBLE=y
740# CONFIG_MFD_CORE is not set 793# CONFIG_MFD_CORE is not set
741# CONFIG_MFD_SM501 is not set 794# CONFIG_MFD_SM501 is not set
742# CONFIG_HTC_PASIC3 is not set 795# CONFIG_HTC_PASIC3 is not set
796# CONFIG_UCB1400_CORE is not set
743# CONFIG_MFD_TMIO is not set 797# CONFIG_MFD_TMIO is not set
798# CONFIG_MFD_MC13783 is not set
799# CONFIG_EZX_PCAP is not set
744# CONFIG_REGULATOR is not set 800# CONFIG_REGULATOR is not set
745 801# CONFIG_MEDIA_SUPPORT is not set
746#
747# Multimedia devices
748#
749
750#
751# Multimedia core support
752#
753# CONFIG_VIDEO_DEV is not set
754# CONFIG_DVB_CORE is not set
755# CONFIG_VIDEO_MEDIA is not set
756
757#
758# Multimedia drivers
759#
760# CONFIG_DAB is not set
761 802
762# 803#
763# Graphics support 804# Graphics support
764# 805#
806# CONFIG_VGA_ARB is not set
765# CONFIG_DRM is not set 807# CONFIG_DRM is not set
766# CONFIG_VGASTATE is not set 808# CONFIG_VGASTATE is not set
767# CONFIG_VIDEO_OUTPUT_CONTROL is not set 809# CONFIG_VIDEO_OUTPUT_CONTROL is not set
@@ -772,7 +814,42 @@ CONFIG_SSB_POSSIBLE=y
772# Display device support 814# Display device support
773# 815#
774# CONFIG_DISPLAY_SUPPORT is not set 816# CONFIG_DISPLAY_SUPPORT is not set
775# CONFIG_SOUND is not set 817CONFIG_SOUND=m
818# CONFIG_SOUND_OSS_CORE is not set
819CONFIG_SND=m
820CONFIG_SND_TIMER=m
821CONFIG_SND_PCM=m
822# CONFIG_SND_SEQUENCER is not set
823# CONFIG_SND_MIXER_OSS is not set
824# CONFIG_SND_PCM_OSS is not set
825# CONFIG_SND_HRTIMER is not set
826# CONFIG_SND_DYNAMIC_MINORS is not set
827# CONFIG_SND_SUPPORT_OLD_API is not set
828# CONFIG_SND_VERBOSE_PROCFS is not set
829# CONFIG_SND_VERBOSE_PRINTK is not set
830# CONFIG_SND_DEBUG is not set
831CONFIG_SND_VMASTER=y
832# CONFIG_SND_RAWMIDI_SEQ is not set
833# CONFIG_SND_OPL3_LIB_SEQ is not set
834# CONFIG_SND_OPL4_LIB_SEQ is not set
835# CONFIG_SND_SBAWE_SEQ is not set
836# CONFIG_SND_EMU10K1_SEQ is not set
837CONFIG_SND_AC97_CODEC=m
838# CONFIG_SND_DRIVERS is not set
839# CONFIG_SND_PCI is not set
840# CONFIG_SND_SPI is not set
841# CONFIG_SND_MIPS is not set
842CONFIG_SND_SOC=m
843CONFIG_SND_SOC_AC97_BUS=y
844CONFIG_SND_SOC_TXX9ACLC=m
845CONFIG_HAS_TXX9_ACLC=y
846CONFIG_SND_SOC_TXX9ACLC_AC97=m
847CONFIG_SND_SOC_TXX9ACLC_GENERIC=m
848CONFIG_SND_SOC_I2C_AND_SPI=m
849# CONFIG_SND_SOC_ALL_CODECS is not set
850CONFIG_SND_SOC_AC97_CODEC=m
851# CONFIG_SOUND_PRIME is not set
852CONFIG_AC97_BUS=m
776# CONFIG_USB_SUPPORT is not set 853# CONFIG_USB_SUPPORT is not set
777# CONFIG_MMC is not set 854# CONFIG_MMC is not set
778# CONFIG_MEMSTICK is not set 855# CONFIG_MEMSTICK is not set
@@ -783,6 +860,8 @@ CONFIG_LEDS_CLASS=y
783# LED drivers 860# LED drivers
784# 861#
785CONFIG_LEDS_GPIO=y 862CONFIG_LEDS_GPIO=y
863CONFIG_LEDS_GPIO_PLATFORM=y
864# CONFIG_LEDS_DAC124S085 is not set
786 865
787# 866#
788# LED Triggers 867# LED Triggers
@@ -792,7 +871,12 @@ CONFIG_LEDS_TRIGGERS=y
792CONFIG_LEDS_TRIGGER_IDE_DISK=y 871CONFIG_LEDS_TRIGGER_IDE_DISK=y
793CONFIG_LEDS_TRIGGER_HEARTBEAT=y 872CONFIG_LEDS_TRIGGER_HEARTBEAT=y
794# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set 873# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
874# CONFIG_LEDS_TRIGGER_GPIO is not set
795# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set 875# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
876
877#
878# iptables trigger is under Netfilter config (LED target)
879#
796# CONFIG_ACCESSIBILITY is not set 880# CONFIG_ACCESSIBILITY is not set
797# CONFIG_INFINIBAND is not set 881# CONFIG_INFINIBAND is not set
798CONFIG_RTC_LIB=y 882CONFIG_RTC_LIB=y
@@ -820,6 +904,7 @@ CONFIG_RTC_INTF_DEV_UIE_EMUL=y
820# CONFIG_RTC_DRV_R9701 is not set 904# CONFIG_RTC_DRV_R9701 is not set
821CONFIG_RTC_DRV_RS5C348=y 905CONFIG_RTC_DRV_RS5C348=y
822# CONFIG_RTC_DRV_DS3234 is not set 906# CONFIG_RTC_DRV_DS3234 is not set
907# CONFIG_RTC_DRV_PCF2123 is not set
823 908
824# 909#
825# Platform RTC drivers 910# Platform RTC drivers
@@ -840,8 +925,26 @@ CONFIG_RTC_DRV_DS1742=y
840# on-CPU RTC drivers 925# on-CPU RTC drivers
841# 926#
842CONFIG_RTC_DRV_TX4939=y 927CONFIG_RTC_DRV_TX4939=y
843# CONFIG_DMADEVICES is not set 928CONFIG_DMADEVICES=y
929
930#
931# DMA Devices
932#
933CONFIG_TXX9_DMAC=m
934CONFIG_DMA_ENGINE=y
935
936#
937# DMA Clients
938#
939# CONFIG_NET_DMA is not set
940# CONFIG_ASYNC_TX_DMA is not set
941# CONFIG_DMATEST is not set
942# CONFIG_AUXDISPLAY is not set
844# CONFIG_UIO is not set 943# CONFIG_UIO is not set
944
945#
946# TI VLYNQ
947#
845# CONFIG_STAGING is not set 948# CONFIG_STAGING is not set
846 949
847# 950#
@@ -853,9 +956,10 @@ CONFIG_RTC_DRV_TX4939=y
853# CONFIG_REISERFS_FS is not set 956# CONFIG_REISERFS_FS is not set
854# CONFIG_JFS_FS is not set 957# CONFIG_JFS_FS is not set
855CONFIG_FS_POSIX_ACL=y 958CONFIG_FS_POSIX_ACL=y
856CONFIG_FILE_LOCKING=y
857# CONFIG_XFS_FS is not set 959# CONFIG_XFS_FS is not set
858# CONFIG_OCFS2_FS is not set 960# CONFIG_OCFS2_FS is not set
961CONFIG_FILE_LOCKING=y
962CONFIG_FSNOTIFY=y
859# CONFIG_DNOTIFY is not set 963# CONFIG_DNOTIFY is not set
860CONFIG_INOTIFY=y 964CONFIG_INOTIFY=y
861CONFIG_INOTIFY_USER=y 965CONFIG_INOTIFY_USER=y
@@ -866,6 +970,10 @@ CONFIG_INOTIFY_USER=y
866CONFIG_GENERIC_ACL=y 970CONFIG_GENERIC_ACL=y
867 971
868# 972#
973# Caches
974#
975
976#
869# CD-ROM/DVD Filesystems 977# CD-ROM/DVD Filesystems
870# 978#
871# CONFIG_ISO9660_FS is not set 979# CONFIG_ISO9660_FS is not set
@@ -890,7 +998,27 @@ CONFIG_TMPFS=y
890CONFIG_TMPFS_POSIX_ACL=y 998CONFIG_TMPFS_POSIX_ACL=y
891# CONFIG_HUGETLB_PAGE is not set 999# CONFIG_HUGETLB_PAGE is not set
892# CONFIG_CONFIGFS_FS is not set 1000# CONFIG_CONFIGFS_FS is not set
893# CONFIG_MISC_FILESYSTEMS is not set 1001CONFIG_MISC_FILESYSTEMS=y
1002# CONFIG_HFSPLUS_FS is not set
1003CONFIG_JFFS2_FS=m
1004CONFIG_JFFS2_FS_DEBUG=0
1005CONFIG_JFFS2_FS_WRITEBUFFER=y
1006# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
1007# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
1008CONFIG_JFFS2_ZLIB=y
1009# CONFIG_JFFS2_LZO is not set
1010CONFIG_JFFS2_RTIME=y
1011# CONFIG_JFFS2_RUBIN is not set
1012# CONFIG_CRAMFS is not set
1013# CONFIG_SQUASHFS is not set
1014# CONFIG_VXFS_FS is not set
1015# CONFIG_MINIX_FS is not set
1016# CONFIG_OMFS_FS is not set
1017# CONFIG_HPFS_FS is not set
1018# CONFIG_QNX4FS_FS is not set
1019# CONFIG_ROMFS_FS is not set
1020# CONFIG_SYSV_FS is not set
1021# CONFIG_UFS_FS is not set
894CONFIG_NETWORK_FILESYSTEMS=y 1022CONFIG_NETWORK_FILESYSTEMS=y
895CONFIG_NFS_FS=y 1023CONFIG_NFS_FS=y
896CONFIG_NFS_V3=y 1024CONFIG_NFS_V3=y
@@ -922,6 +1050,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
922CONFIG_ENABLE_MUST_CHECK=y 1050CONFIG_ENABLE_MUST_CHECK=y
923CONFIG_FRAME_WARN=1024 1051CONFIG_FRAME_WARN=1024
924# CONFIG_MAGIC_SYSRQ is not set 1052# CONFIG_MAGIC_SYSRQ is not set
1053CONFIG_STRIP_ASM_SYMS=y
925# CONFIG_UNUSED_SYMBOLS is not set 1054# CONFIG_UNUSED_SYMBOLS is not set
926CONFIG_DEBUG_FS=y 1055CONFIG_DEBUG_FS=y
927# CONFIG_HEADERS_CHECK is not set 1056# CONFIG_HEADERS_CHECK is not set
@@ -929,11 +1058,9 @@ CONFIG_DEBUG_FS=y
929# CONFIG_DEBUG_MEMORY_INIT is not set 1058# CONFIG_DEBUG_MEMORY_INIT is not set
930# CONFIG_RCU_CPU_STALL_DETECTOR is not set 1059# CONFIG_RCU_CPU_STALL_DETECTOR is not set
931CONFIG_SYSCTL_SYSCALL_CHECK=y 1060CONFIG_SYSCTL_SYSCALL_CHECK=y
932 1061CONFIG_TRACING_SUPPORT=y
933# 1062# CONFIG_FTRACE is not set
934# Tracers 1063# CONFIG_DYNAMIC_DEBUG is not set
935#
936# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
937# CONFIG_SAMPLES is not set 1064# CONFIG_SAMPLES is not set
938CONFIG_HAVE_ARCH_KGDB=y 1065CONFIG_HAVE_ARCH_KGDB=y
939CONFIG_CMDLINE="" 1066CONFIG_CMDLINE=""
@@ -946,6 +1073,7 @@ CONFIG_CMDLINE=""
946# CONFIG_SECURITYFS is not set 1073# CONFIG_SECURITYFS is not set
947# CONFIG_SECURITY_FILE_CAPABILITIES is not set 1074# CONFIG_SECURITY_FILE_CAPABILITIES is not set
948# CONFIG_CRYPTO is not set 1075# CONFIG_CRYPTO is not set
1076# CONFIG_BINARY_PRINTF is not set
949 1077
950# 1078#
951# Library routines 1079# Library routines
@@ -959,6 +1087,10 @@ CONFIG_GENERIC_FIND_LAST_BIT=y
959CONFIG_CRC32=y 1087CONFIG_CRC32=y
960# CONFIG_CRC7 is not set 1088# CONFIG_CRC7 is not set
961# CONFIG_LIBCRC32C is not set 1089# CONFIG_LIBCRC32C is not set
1090CONFIG_ZLIB_INFLATE=y
1091CONFIG_ZLIB_DEFLATE=m
1092CONFIG_DECOMPRESS_GZIP=y
962CONFIG_HAS_IOMEM=y 1093CONFIG_HAS_IOMEM=y
963CONFIG_HAS_IOPORT=y 1094CONFIG_HAS_IOPORT=y
964CONFIG_HAS_DMA=y 1095CONFIG_HAS_DMA=y
1096CONFIG_NLATTR=y
diff --git a/arch/mips/include/asm/bug.h b/arch/mips/include/asm/bug.h
index 6cf29c26e873..540c98a810d1 100644
--- a/arch/mips/include/asm/bug.h
+++ b/arch/mips/include/asm/bug.h
@@ -11,9 +11,7 @@
11static inline void __noreturn BUG(void) 11static inline void __noreturn BUG(void)
12{ 12{
13 __asm__ __volatile__("break %0" : : "i" (BRK_BUG)); 13 __asm__ __volatile__("break %0" : : "i" (BRK_BUG));
14 /* Fool GCC into thinking the function doesn't return. */ 14 unreachable();
15 while (1)
16 ;
17} 15}
18 16
19#define HAVE_ARCH_BUG 17#define HAVE_ARCH_BUG
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index d16afddb09a9..664ba53dc32a 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -3,6 +3,7 @@
3 3
4#include <asm/scatterlist.h> 4#include <asm/scatterlist.h>
5#include <asm/cache.h> 5#include <asm/cache.h>
6#include <asm-generic/dma-coherent.h>
6 7
7void *dma_alloc_noncoherent(struct device *dev, size_t size, 8void *dma_alloc_noncoherent(struct device *dev, size_t size,
8 dma_addr_t *dma_handle, gfp_t flag); 9 dma_addr_t *dma_handle, gfp_t flag);
@@ -73,14 +74,4 @@ extern int dma_is_consistent(struct device *dev, dma_addr_t dma_addr);
73extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 74extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
74 enum dma_data_direction direction); 75 enum dma_data_direction direction);
75 76
76#if 0
77#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
78
79extern int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
80 dma_addr_t device_addr, size_t size, int flags);
81extern void dma_release_declared_memory(struct device *dev);
82extern void * dma_mark_declared_memory_occupied(struct device *dev,
83 dma_addr_t device_addr, size_t size);
84#endif
85
86#endif /* _ASM_DMA_MAPPING_H */ 77#endif /* _ASM_DMA_MAPPING_H */
diff --git a/arch/mips/include/asm/mman.h b/arch/mips/include/asm/mman.h
index a2250f390a29..c892bfb3e2c1 100644
--- a/arch/mips/include/asm/mman.h
+++ b/arch/mips/include/asm/mman.h
@@ -75,6 +75,7 @@
75 75
76#define MADV_MERGEABLE 12 /* KSM may merge identical pages */ 76#define MADV_MERGEABLE 12 /* KSM may merge identical pages */
77#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ 77#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
78#define MADV_HWPOISON 100 /* poison a page for testing */
78 79
79/* compatibility flags */ 80/* compatibility flags */
80#define MAP_FILE 0 81#define MAP_FILE 0
diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
index fcf5f98d90cc..83b5509e09e8 100644
--- a/arch/mips/include/asm/system.h
+++ b/arch/mips/include/asm/system.h
@@ -12,6 +12,7 @@
12#ifndef _ASM_SYSTEM_H 12#ifndef _ASM_SYSTEM_H
13#define _ASM_SYSTEM_H 13#define _ASM_SYSTEM_H
14 14
15#include <linux/kernel.h>
15#include <linux/types.h> 16#include <linux/types.h>
16#include <linux/irqflags.h> 17#include <linux/irqflags.h>
17 18
@@ -193,10 +194,6 @@ extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 v
193#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels 194#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
194#endif 195#endif
195 196
196/* This function doesn't exist, so you'll get a linker error
197 if something tries to do an invalid xchg(). */
198extern void __xchg_called_with_bad_pointer(void);
199
200static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 197static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
201{ 198{
202 switch (size) { 199 switch (size) {
@@ -205,11 +202,17 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
205 case 8: 202 case 8:
206 return __xchg_u64(ptr, x); 203 return __xchg_u64(ptr, x);
207 } 204 }
208 __xchg_called_with_bad_pointer(); 205
209 return x; 206 return x;
210} 207}
211 208
212#define xchg(ptr, x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) 209#define xchg(ptr, x) \
210({ \
211 BUILD_BUG_ON(sizeof(*(ptr)) & ~0xc); \
212 \
213 ((__typeof__(*(ptr))) \
214 __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
215})
213 216
214extern void set_handler(unsigned long offset, void *addr, unsigned long len); 217extern void set_handler(unsigned long offset, void *addr, unsigned long len);
215extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len); 218extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len);
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c
index 98bd7de75778..b102e4f1630e 100644
--- a/arch/mips/kernel/cevt-smtc.c
+++ b/arch/mips/kernel/cevt-smtc.c
@@ -173,11 +173,12 @@ void smtc_distribute_timer(int vpe)
173 unsigned int mtflags; 173 unsigned int mtflags;
174 int cpu; 174 int cpu;
175 struct clock_event_device *cd; 175 struct clock_event_device *cd;
176 unsigned long nextstamp = 0L; 176 unsigned long nextstamp;
177 unsigned long reference; 177 unsigned long reference;
178 178
179 179
180repeat: 180repeat:
181 nextstamp = 0L;
181 for_each_online_cpu(cpu) { 182 for_each_online_cpu(cpu) {
182 /* 183 /*
183 * Find virtual CPUs within the current VPE who have 184 * Find virtual CPUs within the current VPE who have
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 3fe1fcfa2e73..fe0d79805603 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -306,6 +306,7 @@ static inline int mips_atomic_set(struct pt_regs *regs,
306 306
307 if (cpu_has_llsc && R10000_LLSC_WAR) { 307 if (cpu_has_llsc && R10000_LLSC_WAR) {
308 __asm__ __volatile__ ( 308 __asm__ __volatile__ (
309 " .set mips3 \n"
309 " li %[err], 0 \n" 310 " li %[err], 0 \n"
310 "1: ll %[old], (%[addr]) \n" 311 "1: ll %[old], (%[addr]) \n"
311 " move %[tmp], %[new] \n" 312 " move %[tmp], %[new] \n"
@@ -320,6 +321,7 @@ static inline int mips_atomic_set(struct pt_regs *regs,
320 " "STR(PTR)" 1b, 4b \n" 321 " "STR(PTR)" 1b, 4b \n"
321 " "STR(PTR)" 2b, 4b \n" 322 " "STR(PTR)" 2b, 4b \n"
322 " .previous \n" 323 " .previous \n"
324 " .set mips0 \n"
323 : [old] "=&r" (old), 325 : [old] "=&r" (old),
324 [err] "=&r" (err), 326 [err] "=&r" (err),
325 [tmp] "=&r" (tmp) 327 [tmp] "=&r" (tmp)
@@ -329,6 +331,7 @@ static inline int mips_atomic_set(struct pt_regs *regs,
329 : "memory"); 331 : "memory");
330 } else if (cpu_has_llsc) { 332 } else if (cpu_has_llsc) {
331 __asm__ __volatile__ ( 333 __asm__ __volatile__ (
334 " .set mips3 \n"
332 " li %[err], 0 \n" 335 " li %[err], 0 \n"
333 "1: ll %[old], (%[addr]) \n" 336 "1: ll %[old], (%[addr]) \n"
334 " move %[tmp], %[new] \n" 337 " move %[tmp], %[new] \n"
@@ -347,6 +350,7 @@ static inline int mips_atomic_set(struct pt_regs *regs,
347 " "STR(PTR)" 1b, 5b \n" 350 " "STR(PTR)" 1b, 5b \n"
348 " "STR(PTR)" 2b, 5b \n" 351 " "STR(PTR)" 2b, 5b \n"
349 " .previous \n" 352 " .previous \n"
353 " .set mips0 \n"
350 : [old] "=&r" (old), 354 : [old] "=&r" (old),
351 [err] "=&r" (err), 355 [err] "=&r" (err),
352 [tmp] "=&r" (tmp) 356 [tmp] "=&r" (tmp)
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 890f77927d62..454b53924490 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -163,33 +163,34 @@ static int isBranchInstr(mips_instruction * i)
163 163
164/* 164/*
165 * In the Linux kernel, we support selection of FPR format on the 165 * In the Linux kernel, we support selection of FPR format on the
166 * basis of the Status.FR bit. This does imply that, if a full 32 166 * basis of the Status.FR bit. If an FPU is not present, the FR bit
167 * FPRs are desired, there needs to be a flip-flop that can be written 167 * is hardwired to zero, which would imply a 32-bit FPU even for
168 * to one at that bit position. In any case, O32 MIPS ABI uses 168 * 64-bit CPUs. For 64-bit kernels with no FPU we use TIF_32BIT_REGS
169 * only the even FPRs (Status.FR = 0). 169 * as a proxy for the FR bit so that a 64-bit FPU is emulated. In any
170 * case, for a 32-bit kernel which uses the O32 MIPS ABI, only the
171 * even FPRs are used (Status.FR = 0).
170 */ 172 */
171 173static inline int cop1_64bit(struct pt_regs *xcp)
172#define CP0_STATUS_FR_SUPPORT 174{
173 175 if (cpu_has_fpu)
174#ifdef CP0_STATUS_FR_SUPPORT 176 return xcp->cp0_status & ST0_FR;
175#define FR_BIT ST0_FR 177#ifdef CONFIG_64BIT
178 return !test_thread_flag(TIF_32BIT_REGS);
176#else 179#else
177#define FR_BIT 0 180 return 0;
178#endif 181#endif
182}
183
184#define SIFROMREG(si, x) ((si) = cop1_64bit(xcp) || !(x & 1) ? \
185 (int)ctx->fpr[x] : (int)(ctx->fpr[x & ~1] >> 32))
179 186
180#define SIFROMREG(si, x) ((si) = \ 187#define SITOREG(si, x) (ctx->fpr[x & ~(cop1_64bit(xcp) == 0)] = \
181 (xcp->cp0_status & FR_BIT) || !(x & 1) ? \ 188 cop1_64bit(xcp) || !(x & 1) ? \
182 (int)ctx->fpr[x] : \
183 (int)(ctx->fpr[x & ~1] >> 32 ))
184#define SITOREG(si, x) (ctx->fpr[x & ~((xcp->cp0_status & FR_BIT) == 0)] = \
185 (xcp->cp0_status & FR_BIT) || !(x & 1) ? \
186 ctx->fpr[x & ~1] >> 32 << 32 | (u32)(si) : \ 189 ctx->fpr[x & ~1] >> 32 << 32 | (u32)(si) : \
187 ctx->fpr[x & ~1] << 32 >> 32 | (u64)(si) << 32) 190 ctx->fpr[x & ~1] << 32 >> 32 | (u64)(si) << 32)
188 191
189#define DIFROMREG(di, x) ((di) = \ 192#define DIFROMREG(di, x) ((di) = ctx->fpr[x & ~(cop1_64bit(xcp) == 0)])
190 ctx->fpr[x & ~((xcp->cp0_status & FR_BIT) == 0)]) 193#define DITOREG(di, x) (ctx->fpr[x & ~(cop1_64bit(xcp) == 0)] = (di))
191#define DITOREG(di, x) (ctx->fpr[x & ~((xcp->cp0_status & FR_BIT) == 0)] \
192 = (di))
193 194
194#define SPFROMREG(sp, x) SIFROMREG((sp).bits, x) 195#define SPFROMREG(sp, x) SIFROMREG((sp).bits, x)
195#define SPTOREG(sp, x) SITOREG((sp).bits, x) 196#define SPTOREG(sp, x) SITOREG((sp).bits, x)
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 7e48e76148aa..9367e33fbd18 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -90,6 +90,9 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
90{ 90{
91 void *ret; 91 void *ret;
92 92
93 if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
94 return ret;
95
93 gfp = massage_gfp_flags(dev, gfp); 96 gfp = massage_gfp_flags(dev, gfp);
94 97
95 ret = (void *) __get_free_pages(gfp, get_order(size)); 98 ret = (void *) __get_free_pages(gfp, get_order(size));
@@ -122,6 +125,10 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
122 dma_addr_t dma_handle) 125 dma_addr_t dma_handle)
123{ 126{
124 unsigned long addr = (unsigned long) vaddr; 127 unsigned long addr = (unsigned long) vaddr;
128 int order = get_order(size);
129
130 if (dma_release_from_coherent(dev, order, vaddr))
131 return;
125 132
126 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); 133 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
127 134
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index 61888ff72c87..9035c64bc5ed 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -54,7 +54,8 @@ static struct prom_pmemblock * __init prom_getmdesc(void)
54{ 54{
55 char *memsize_str; 55 char *memsize_str;
56 unsigned int memsize; 56 unsigned int memsize;
57 char cmdline[CL_SIZE], *ptr; 57 char *ptr;
58 static char cmdline[CL_SIZE] __initdata;
58 59
59 /* otherwise look in the environment */ 60 /* otherwise look in the environment */
60 memsize_str = prom_getenv("memsize"); 61 memsize_str = prom_getenv("memsize");
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index 9f40e1ff9b4f..041fc1afc3f4 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -110,7 +110,6 @@ static struct korina_device korina_dev0_data = {
110static struct platform_device korina_dev0 = { 110static struct platform_device korina_dev0 = {
111 .id = -1, 111 .id = -1,
112 .name = "korina", 112 .name = "korina",
113 .dev.driver_data = &korina_dev0_data,
114 .resource = korina_dev0_res, 113 .resource = korina_dev0_res,
115 .num_resources = ARRAY_SIZE(korina_dev0_res), 114 .num_resources = ARRAY_SIZE(korina_dev0_res),
116}; 115};
@@ -332,6 +331,8 @@ static int __init plat_setup_devices(void)
332 /* set the uart clock to the current cpu frequency */ 331 /* set the uart clock to the current cpu frequency */
333 rb532_uart_res[0].uartclk = idt_cpu_freq; 332 rb532_uart_res[0].uartclk = idt_cpu_freq;
334 333
334 dev_set_drvdata(&korina_dev0.dev, &korina_dev0_data);
335
335 return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs)); 336 return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs));
336} 337}
337 338
diff --git a/arch/mips/rb532/prom.c b/arch/mips/rb532/prom.c
index 46ca24dbcc2d..ad5bd1097974 100644
--- a/arch/mips/rb532/prom.c
+++ b/arch/mips/rb532/prom.c
@@ -69,7 +69,7 @@ static inline unsigned long tag2ul(char *arg, const char *tag)
69 69
70void __init prom_setup_cmdline(void) 70void __init prom_setup_cmdline(void)
71{ 71{
72 char cmd_line[CL_SIZE]; 72 static char cmd_line[CL_SIZE] __initdata;
73 char *cp, *board; 73 char *cp, *board;
74 int prom_argc; 74 int prom_argc;
75 char **prom_argv, **prom_envp; 75 char **prom_argv, **prom_envp;
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index e10184c1b3e1..d66802edebb2 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -160,7 +160,7 @@ static void __init prom_init_cmdline(void)
160 int argc; 160 int argc;
161 int *argv32; 161 int *argv32;
162 int i; /* Always ignore the "-c" at argv[0] */ 162 int i; /* Always ignore the "-c" at argv[0] */
163 char builtin[CL_SIZE]; 163 static char builtin[CL_SIZE] __initdata;
164 164
165 if (fw_arg0 >= CKSEG0 || fw_arg1 < CKSEG0) { 165 if (fw_arg0 >= CKSEG0 || fw_arg1 < CKSEG0) {
166 /* 166 /*
@@ -315,7 +315,7 @@ static inline void txx9_cache_fixup(void)
315 315
316static void __init preprocess_cmdline(void) 316static void __init preprocess_cmdline(void)
317{ 317{
318 char cmdline[CL_SIZE]; 318 static char cmdline[CL_SIZE] __initdata;
319 char *s; 319 char *s;
320 320
321 strcpy(cmdline, arcs_cmdline); 321 strcpy(cmdline, arcs_cmdline);
diff --git a/arch/parisc/include/asm/fcntl.h b/arch/parisc/include/asm/fcntl.h
index 5f39d5597ced..1e1c824764ee 100644
--- a/arch/parisc/include/asm/fcntl.h
+++ b/arch/parisc/include/asm/fcntl.h
@@ -28,8 +28,6 @@
28#define F_SETOWN 12 /* for sockets. */ 28#define F_SETOWN 12 /* for sockets. */
29#define F_SETSIG 13 /* for sockets. */ 29#define F_SETSIG 13 /* for sockets. */
30#define F_GETSIG 14 /* for sockets. */ 30#define F_GETSIG 14 /* for sockets. */
31#define F_GETOWN_EX 15
32#define F_SETOWN_EX 16
33 31
34/* for posix fcntl() and lockf() */ 32/* for posix fcntl() and lockf() */
35#define F_RDLCK 01 33#define F_RDLCK 01
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 69dad5a850a8..a36799e85693 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -28,7 +28,7 @@
28#define dbg(x...) 28#define dbg(x...)
29#endif 29#endif
30 30
31#define KERNEL_START (KERNEL_BINARY_TEXT_START - 0x1000) 31#define KERNEL_START (KERNEL_BINARY_TEXT_START)
32 32
33extern struct unwind_table_entry __start___unwind[]; 33extern struct unwind_table_entry __start___unwind[];
34extern struct unwind_table_entry __stop___unwind[]; 34extern struct unwind_table_entry __stop___unwind[];
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index fda4baa059b5..9dab4a4e09f7 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -78,9 +78,6 @@ SECTIONS
78 */ 78 */
79 . = ALIGN(PAGE_SIZE); 79 . = ALIGN(PAGE_SIZE);
80 data_start = .; 80 data_start = .;
81 EXCEPTION_TABLE(16)
82
83 NOTES
84 81
85 /* unwind info */ 82 /* unwind info */
86 .PARISC.unwind : { 83 .PARISC.unwind : {
@@ -89,6 +86,9 @@ SECTIONS
89 __stop___unwind = .; 86 __stop___unwind = .;
90 } 87 }
91 88
89 EXCEPTION_TABLE(16)
90 NOTES
91
92 /* Data */ 92 /* Data */
93 RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) 93 RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
94 94
diff --git a/arch/powerpc/boot/addRamDisk.c b/arch/powerpc/boot/addRamDisk.c
index c02a99952be7..893f446cbd22 100644
--- a/arch/powerpc/boot/addRamDisk.c
+++ b/arch/powerpc/boot/addRamDisk.c
@@ -58,7 +58,7 @@ static int check_elf64(void *p, int size, struct addr_range *r)
58 58
59 return 64; 59 return 64;
60} 60}
61void get4k(FILE *file, char *buf ) 61static void get4k(FILE *file, char *buf )
62{ 62{
63 unsigned j; 63 unsigned j;
64 unsigned num = fread(buf, 1, 4096, file); 64 unsigned num = fread(buf, 1, 4096, file);
@@ -66,12 +66,12 @@ void get4k(FILE *file, char *buf )
66 buf[j] = 0; 66 buf[j] = 0;
67} 67}
68 68
69void put4k(FILE *file, char *buf ) 69static void put4k(FILE *file, char *buf )
70{ 70{
71 fwrite(buf, 1, 4096, file); 71 fwrite(buf, 1, 4096, file);
72} 72}
73 73
74void death(const char *msg, FILE *fdesc, const char *fname) 74static void death(const char *msg, FILE *fdesc, const char *fname)
75{ 75{
76 fprintf(stderr, msg); 76 fprintf(stderr, msg);
77 fclose(fdesc); 77 fclose(fdesc);
diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
index b6bac6f61c16..916369575c97 100644
--- a/arch/powerpc/include/asm/kmap_types.h
+++ b/arch/powerpc/include/asm/kmap_types.h
@@ -29,5 +29,16 @@ enum km_type {
29 KM_TYPE_NR 29 KM_TYPE_NR
30}; 30};
31 31
32/*
33 * This is a temporary build fix that (so they say on lkml....) should no longer
34 * be required after 2.6.33, because of changes planned to the kmap code.
35 * Let's try to remove this cruft then.
36 */
37#ifdef CONFIG_DEBUG_HIGHMEM
38#define KM_NMI (-1)
39#define KM_NMI_PTE (-1)
40#define KM_IRQ_PTE (-1)
41#endif
42
32#endif /* __KERNEL__ */ 43#endif /* __KERNEL__ */
33#endif /* _ASM_POWERPC_KMAP_TYPES_H */ 44#endif /* _ASM_POWERPC_KMAP_TYPES_H */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 53bcf3d792db..b152de3e64d4 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -345,7 +345,7 @@ void __init setup_arch(char **cmdline_p)
345 345
346#ifdef CONFIG_SWIOTLB 346#ifdef CONFIG_SWIOTLB
347 if (ppc_swiotlb_enable) 347 if (ppc_swiotlb_enable)
348 swiotlb_init(); 348 swiotlb_init(1);
349#endif 349#endif
350 350
351 paging_init(); 351 paging_init();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 04f638d82fb3..df2c9e932b37 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -550,7 +550,7 @@ void __init setup_arch(char **cmdline_p)
550 550
551#ifdef CONFIG_SWIOTLB 551#ifdef CONFIG_SWIOTLB
552 if (ppc_swiotlb_enable) 552 if (ppc_swiotlb_enable)
553 swiotlb_init(); 553 swiotlb_init(1);
554#endif 554#endif
555 555
556 paging_init(); 556 paging_init();
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 43c0acad7160..16c673096a22 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -95,6 +95,34 @@ config S390
95 select HAVE_ARCH_TRACEHOOK 95 select HAVE_ARCH_TRACEHOOK
96 select INIT_ALL_POSSIBLE 96 select INIT_ALL_POSSIBLE
97 select HAVE_PERF_EVENTS 97 select HAVE_PERF_EVENTS
98 select ARCH_INLINE_SPIN_TRYLOCK
99 select ARCH_INLINE_SPIN_TRYLOCK_BH
100 select ARCH_INLINE_SPIN_LOCK
101 select ARCH_INLINE_SPIN_LOCK_BH
102 select ARCH_INLINE_SPIN_LOCK_IRQ
103 select ARCH_INLINE_SPIN_LOCK_IRQSAVE
104 select ARCH_INLINE_SPIN_UNLOCK
105 select ARCH_INLINE_SPIN_UNLOCK_BH
106 select ARCH_INLINE_SPIN_UNLOCK_IRQ
107 select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
108 select ARCH_INLINE_READ_TRYLOCK
109 select ARCH_INLINE_READ_LOCK
110 select ARCH_INLINE_READ_LOCK_BH
111 select ARCH_INLINE_READ_LOCK_IRQ
112 select ARCH_INLINE_READ_LOCK_IRQSAVE
113 select ARCH_INLINE_READ_UNLOCK
114 select ARCH_INLINE_READ_UNLOCK_BH
115 select ARCH_INLINE_READ_UNLOCK_IRQ
116 select ARCH_INLINE_READ_UNLOCK_IRQRESTORE
117 select ARCH_INLINE_WRITE_TRYLOCK
118 select ARCH_INLINE_WRITE_LOCK
119 select ARCH_INLINE_WRITE_LOCK_BH
120 select ARCH_INLINE_WRITE_LOCK_IRQ
121 select ARCH_INLINE_WRITE_LOCK_IRQSAVE
122 select ARCH_INLINE_WRITE_UNLOCK
123 select ARCH_INLINE_WRITE_UNLOCK_BH
124 select ARCH_INLINE_WRITE_UNLOCK_IRQ
125 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
98 126
99config SCHED_OMIT_FRAME_POINTER 127config SCHED_OMIT_FRAME_POINTER
100 bool 128 bool
diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h
index 7efd0abe8887..efb74fd5156e 100644
--- a/arch/s390/include/asm/bug.h
+++ b/arch/s390/include/asm/bug.h
@@ -49,7 +49,7 @@
49 49
50#define BUG() do { \ 50#define BUG() do { \
51 __EMIT_BUG(0); \ 51 __EMIT_BUG(0); \
52 for (;;); \ 52 unreachable(); \
53} while (0) 53} while (0)
54 54
55#define WARN_ON(x) ({ \ 55#define WARN_ON(x) ({ \
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 41ce6861174e..c9af0d19c7ab 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -191,33 +191,4 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
191#define _raw_read_relax(lock) cpu_relax() 191#define _raw_read_relax(lock) cpu_relax()
192#define _raw_write_relax(lock) cpu_relax() 192#define _raw_write_relax(lock) cpu_relax()
193 193
194#define __always_inline__spin_lock
195#define __always_inline__read_lock
196#define __always_inline__write_lock
197#define __always_inline__spin_lock_bh
198#define __always_inline__read_lock_bh
199#define __always_inline__write_lock_bh
200#define __always_inline__spin_lock_irq
201#define __always_inline__read_lock_irq
202#define __always_inline__write_lock_irq
203#define __always_inline__spin_lock_irqsave
204#define __always_inline__read_lock_irqsave
205#define __always_inline__write_lock_irqsave
206#define __always_inline__spin_trylock
207#define __always_inline__read_trylock
208#define __always_inline__write_trylock
209#define __always_inline__spin_trylock_bh
210#define __always_inline__spin_unlock
211#define __always_inline__read_unlock
212#define __always_inline__write_unlock
213#define __always_inline__spin_unlock_bh
214#define __always_inline__read_unlock_bh
215#define __always_inline__write_unlock_bh
216#define __always_inline__spin_unlock_irq
217#define __always_inline__read_unlock_irq
218#define __always_inline__write_unlock_irq
219#define __always_inline__spin_unlock_irqrestore
220#define __always_inline__read_unlock_irqrestore
221#define __always_inline__write_unlock_irqrestore
222
223#endif /* __ASM_SPINLOCK_H */ 194#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index bf8b4ae7ff2d..e49e9e0c69fd 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -55,6 +55,7 @@ static void __init reset_tod_clock(void)
55 disabled_wait(0); 55 disabled_wait(0);
56 56
57 sched_clock_base_cc = TOD_UNIX_EPOCH; 57 sched_clock_base_cc = TOD_UNIX_EPOCH;
58 S390_lowcore.last_update_clock = sched_clock_base_cc;
58} 59}
59 60
60#ifdef CONFIG_SHARED_KERNEL 61#ifdef CONFIG_SHARED_KERNEL
@@ -167,6 +168,14 @@ static noinline __init void create_kernel_nss(void)
167 return; 168 return;
168 } 169 }
169 170
171 /* re-initialize cputime accounting. */
172 sched_clock_base_cc = get_clock();
173 S390_lowcore.last_update_clock = sched_clock_base_cc;
174 S390_lowcore.last_update_timer = 0x7fffffffffffffffULL;
175 S390_lowcore.user_timer = 0;
176 S390_lowcore.system_timer = 0;
177 asm volatile("SPT 0(%0)" : : "a" (&S390_lowcore.last_update_timer));
178
170 /* re-setup boot command line with new ipl vm parms */ 179 /* re-setup boot command line with new ipl vm parms */
171 ipl_update_parameters(); 180 ipl_update_parameters();
172 setup_boot_command_line(); 181 setup_boot_command_line();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index f43d2ee54464..48215d15762b 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -565,10 +565,10 @@ pgm_svcper:
565 lh %r7,0x8a # get svc number from lowcore 565 lh %r7,0x8a # get svc number from lowcore
566 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 566 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
567 TRACE_IRQS_OFF 567 TRACE_IRQS_OFF
568 l %r1,__TI_task(%r9) 568 l %r8,__TI_task(%r9)
569 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 569 mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID
570 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS 570 mvc __THREAD_per+__PER_address(4,%r8),__LC_PER_ADDRESS
571 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 571 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
572 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 572 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
573 TRACE_IRQS_ON 573 TRACE_IRQS_ON
574 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 574 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index a6f7b20df616..9aff1d449b6e 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -543,10 +543,10 @@ pgm_svcper:
543 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 543 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
544 llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore 544 llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore
545 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 545 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
546 lg %r1,__TI_task(%r9) 546 lg %r8,__TI_task(%r9)
547 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 547 mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID
548 mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS 548 mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS
549 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 549 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
550 oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 550 oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
551 TRACE_IRQS_ON 551 TRACE_IRQS_ON
552 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 552 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
diff --git a/arch/sh/kernel/cpu/irq/imask.c b/arch/sh/kernel/cpu/irq/imask.c
index 6b5d191eec3a..a351ed84eec5 100644
--- a/arch/sh/kernel/cpu/irq/imask.c
+++ b/arch/sh/kernel/cpu/irq/imask.c
@@ -68,7 +68,7 @@ static void unmask_imask_irq(unsigned int irq)
68} 68}
69 69
70static struct irq_chip imask_irq_chip = { 70static struct irq_chip imask_irq_chip = {
71 .typename = "SR.IMASK", 71 .name = "SR.IMASK",
72 .mask = mask_imask_irq, 72 .mask = mask_imask_irq,
73 .unmask = unmask_imask_irq, 73 .unmask = unmask_imask_irq,
74 .mask_ack = mask_imask_irq, 74 .mask_ack = mask_imask_irq,
diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c
index 6c092f1f5557..06e7e2959b54 100644
--- a/arch/sh/kernel/cpu/irq/intc-sh5.c
+++ b/arch/sh/kernel/cpu/irq/intc-sh5.c
@@ -85,7 +85,7 @@ static void mask_and_ack_intc(unsigned int);
85static void end_intc_irq(unsigned int irq); 85static void end_intc_irq(unsigned int irq);
86 86
87static struct irq_chip intc_irq_type = { 87static struct irq_chip intc_irq_type = {
88 .typename = "INTC", 88 .name = "INTC",
89 .startup = startup_intc_irq, 89 .startup = startup_intc_irq,
90 .shutdown = shutdown_intc_irq, 90 .shutdown = shutdown_intc_irq,
91 .enable = enable_intc_irq, 91 .enable = enable_intc_irq,
diff --git a/arch/sparc/boot/btfixupprep.c b/arch/sparc/boot/btfixupprep.c
index 52a4208fe4f0..bbf91b9c3d39 100644
--- a/arch/sparc/boot/btfixupprep.c
+++ b/arch/sparc/boot/btfixupprep.c
@@ -61,14 +61,14 @@ unsigned long lastfoffset = -1;
61unsigned long lastfrelno; 61unsigned long lastfrelno;
62btfixup *lastf; 62btfixup *lastf;
63 63
64void fatal(void) __attribute__((noreturn)); 64static void fatal(void) __attribute__((noreturn));
65void fatal(void) 65static void fatal(void)
66{ 66{
67 fprintf(stderr, "Malformed output from objdump\n%s\n", buffer); 67 fprintf(stderr, "Malformed output from objdump\n%s\n", buffer);
68 exit(1); 68 exit(1);
69} 69}
70 70
71btfixup *find(int type, char *name) 71static btfixup *find(int type, char *name)
72{ 72{
73 int i; 73 int i;
74 for (i = 0; i < last; i++) { 74 for (i = 0; i < last; i++) {
@@ -88,7 +88,7 @@ btfixup *find(int type, char *name)
88 return array + last - 1; 88 return array + last - 1;
89} 89}
90 90
91void set_mode (char *buffer) 91static void set_mode (char *buffer)
92{ 92{
93 for (mode = 0;; mode++) 93 for (mode = 0;; mode++)
94 if (buffer[mode] < '0' || buffer[mode] > '9') 94 if (buffer[mode] < '0' || buffer[mode] > '9')
diff --git a/arch/sparc/boot/piggyback_32.c b/arch/sparc/boot/piggyback_32.c
index e8dc9adfcd61..ac944aec7301 100644
--- a/arch/sparc/boot/piggyback_32.c
+++ b/arch/sparc/boot/piggyback_32.c
@@ -35,17 +35,17 @@
35 * as PROM looks for a.out image only. 35 * as PROM looks for a.out image only.
36 */ 36 */
37 37
38unsigned short ld2(char *p) 38static unsigned short ld2(char *p)
39{ 39{
40 return (p[0] << 8) | p[1]; 40 return (p[0] << 8) | p[1];
41} 41}
42 42
43unsigned int ld4(char *p) 43static unsigned int ld4(char *p)
44{ 44{
45 return (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3]; 45 return (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
46} 46}
47 47
48void st4(char *p, unsigned int x) 48static void st4(char *p, unsigned int x)
49{ 49{
50 p[0] = x >> 24; 50 p[0] = x >> 24;
51 p[1] = x >> 16; 51 p[1] = x >> 16;
@@ -53,7 +53,7 @@ void st4(char *p, unsigned int x)
53 p[3] = x; 53 p[3] = x;
54} 54}
55 55
56void usage(void) 56static void usage(void)
57{ 57{
58 /* fs_img.gz is an image of initial ramdisk. */ 58 /* fs_img.gz is an image of initial ramdisk. */
59 fprintf(stderr, "Usage: piggyback vmlinux.aout System.map fs_img.gz\n"); 59 fprintf(stderr, "Usage: piggyback vmlinux.aout System.map fs_img.gz\n");
@@ -61,7 +61,7 @@ void usage(void)
61 exit(1); 61 exit(1);
62} 62}
63 63
64void die(char *str) 64static void die(char *str)
65{ 65{
66 perror (str); 66 perror (str);
67 exit(1); 67 exit(1);
diff --git a/arch/sparc/boot/piggyback_64.c b/arch/sparc/boot/piggyback_64.c
index c63fd1b6bdd4..a26a686cb5aa 100644
--- a/arch/sparc/boot/piggyback_64.c
+++ b/arch/sparc/boot/piggyback_64.c
@@ -32,7 +32,7 @@
32/* Note: run this on an a.out kernel (use elftoaout for it), as PROM looks for a.out image onlly 32/* Note: run this on an a.out kernel (use elftoaout for it), as PROM looks for a.out image onlly
33 usage: piggyback vmlinux System.map tail, where tail is gzipped fs of the initial ramdisk */ 33 usage: piggyback vmlinux System.map tail, where tail is gzipped fs of the initial ramdisk */
34 34
35void die(char *str) 35static void die(char *str)
36{ 36{
37 perror (str); 37 perror (str);
38 exit(1); 38 exit(1);
diff --git a/arch/sparc/mm/init_64.h b/arch/sparc/mm/init_64.h
index c2f772dbd556..77d1b313e344 100644
--- a/arch/sparc/mm/init_64.h
+++ b/arch/sparc/mm/init_64.h
@@ -45,7 +45,7 @@ extern void free_initmem(void);
45#define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK) 45#define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
46 46
47#define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \ 47#define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
48 sizeof(struct page *)) >> VMEMMAP_CHUNK_SHIFT) 48 sizeof(struct page)) >> VMEMMAP_CHUNK_SHIFT)
49extern unsigned long vmemmap_table[VMEMMAP_SIZE]; 49extern unsigned long vmemmap_table[VMEMMAP_SIZE];
50#endif 50#endif
51 51
diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h
index 4b180897e6b5..5af2982133b5 100644
--- a/arch/x86/include/asm/amd_iommu.h
+++ b/arch/x86/include/asm/amd_iommu.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 2 * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com> 4 * Leo Duran <leo.duran@amd.com>
5 * 5 *
@@ -23,19 +23,13 @@
23#include <linux/irqreturn.h> 23#include <linux/irqreturn.h>
24 24
25#ifdef CONFIG_AMD_IOMMU 25#ifdef CONFIG_AMD_IOMMU
26extern int amd_iommu_init(void); 26
27extern int amd_iommu_init_dma_ops(void);
28extern int amd_iommu_init_passthrough(void);
29extern void amd_iommu_detect(void); 27extern void amd_iommu_detect(void);
30extern irqreturn_t amd_iommu_int_handler(int irq, void *data); 28
31extern void amd_iommu_flush_all_domains(void);
32extern void amd_iommu_flush_all_devices(void);
33extern void amd_iommu_shutdown(void);
34extern void amd_iommu_apply_erratum_63(u16 devid);
35#else 29#else
36static inline int amd_iommu_init(void) { return -ENODEV; } 30
37static inline void amd_iommu_detect(void) { } 31static inline void amd_iommu_detect(void) { }
38static inline void amd_iommu_shutdown(void) { } 32
39#endif 33#endif
40 34
41#endif /* _ASM_X86_AMD_IOMMU_H */ 35#endif /* _ASM_X86_AMD_IOMMU_H */
diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h
new file mode 100644
index 000000000000..84786fb9a23b
--- /dev/null
+++ b/arch/x86/include/asm/amd_iommu_proto.h
@@ -0,0 +1,38 @@
1/*
2 * Copyright (C) 2009 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef _ASM_X86_AMD_IOMMU_PROTO_H
20#define _ASM_X86_AMD_IOMMU_PROTO_H
21
22struct amd_iommu;
23
24extern int amd_iommu_init_dma_ops(void);
25extern int amd_iommu_init_passthrough(void);
26extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
27extern void amd_iommu_flush_all_domains(void);
28extern void amd_iommu_flush_all_devices(void);
29extern void amd_iommu_apply_erratum_63(u16 devid);
30extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
31
32#ifndef CONFIG_AMD_IOMMU_STATS
33
34static inline void amd_iommu_stats_init(void) { }
35
36#endif /* !CONFIG_AMD_IOMMU_STATS */
37
38#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 2a2cc7a78a81..ba19ad4c47d0 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 2 * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com> 4 * Leo Duran <leo.duran@amd.com>
5 * 5 *
@@ -25,6 +25,11 @@
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26 26
27/* 27/*
28 * Maximum number of IOMMUs supported
29 */
30#define MAX_IOMMUS 32
31
32/*
28 * some size calculation constants 33 * some size calculation constants
29 */ 34 */
30#define DEV_TABLE_ENTRY_SIZE 32 35#define DEV_TABLE_ENTRY_SIZE 32
@@ -206,6 +211,9 @@ extern bool amd_iommu_dump;
206 printk(KERN_INFO "AMD-Vi: " format, ## arg); \ 211 printk(KERN_INFO "AMD-Vi: " format, ## arg); \
207 } while(0); 212 } while(0);
208 213
214/* global flag if IOMMUs cache non-present entries */
215extern bool amd_iommu_np_cache;
216
209/* 217/*
210 * Make iterating over all IOMMUs easier 218 * Make iterating over all IOMMUs easier
211 */ 219 */
@@ -226,6 +234,8 @@ extern bool amd_iommu_dump;
226 * independent of their use. 234 * independent of their use.
227 */ 235 */
228struct protection_domain { 236struct protection_domain {
237 struct list_head list; /* for list of all protection domains */
238 struct list_head dev_list; /* List of all devices in this domain */
229 spinlock_t lock; /* mostly used to lock the page table*/ 239 spinlock_t lock; /* mostly used to lock the page table*/
230 u16 id; /* the domain id written to the device table */ 240 u16 id; /* the domain id written to the device table */
231 int mode; /* paging mode (0-6 levels) */ 241 int mode; /* paging mode (0-6 levels) */
@@ -233,7 +243,20 @@ struct protection_domain {
233 unsigned long flags; /* flags to find out type of domain */ 243 unsigned long flags; /* flags to find out type of domain */
234 bool updated; /* complete domain flush required */ 244 bool updated; /* complete domain flush required */
235 unsigned dev_cnt; /* devices assigned to this domain */ 245 unsigned dev_cnt; /* devices assigned to this domain */
246 unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
236 void *priv; /* private data */ 247 void *priv; /* private data */
248
249};
250
251/*
252 * This struct contains device specific data for the IOMMU
253 */
254struct iommu_dev_data {
255 struct list_head list; /* For domain->dev_list */
256 struct device *dev; /* Device this data belong to */
257 struct device *alias; /* The Alias Device */
258 struct protection_domain *domain; /* Domain the device is bound to */
259 atomic_t bind; /* Domain attach reverent count */
237}; 260};
238 261
239/* 262/*
@@ -291,6 +314,9 @@ struct dma_ops_domain {
291struct amd_iommu { 314struct amd_iommu {
292 struct list_head list; 315 struct list_head list;
293 316
317 /* Index within the IOMMU array */
318 int index;
319
294 /* locks the accesses to the hardware */ 320 /* locks the accesses to the hardware */
295 spinlock_t lock; 321 spinlock_t lock;
296 322
@@ -357,6 +383,21 @@ struct amd_iommu {
357extern struct list_head amd_iommu_list; 383extern struct list_head amd_iommu_list;
358 384
359/* 385/*
386 * Array with pointers to each IOMMU struct
387 * The indices are referenced in the protection domains
388 */
389extern struct amd_iommu *amd_iommus[MAX_IOMMUS];
390
391/* Number of IOMMUs present in the system */
392extern int amd_iommus_present;
393
394/*
395 * Declarations for the global list of all protection domains
396 */
397extern spinlock_t amd_iommu_pd_lock;
398extern struct list_head amd_iommu_pd_list;
399
400/*
360 * Structure defining one entry in the device table 401 * Structure defining one entry in the device table
361 */ 402 */
362struct dev_table_entry { 403struct dev_table_entry {
@@ -416,15 +457,9 @@ extern unsigned amd_iommu_aperture_order;
416/* largest PCI device id we expect translation requests for */ 457/* largest PCI device id we expect translation requests for */
417extern u16 amd_iommu_last_bdf; 458extern u16 amd_iommu_last_bdf;
418 459
419/* data structures for protection domain handling */
420extern struct protection_domain **amd_iommu_pd_table;
421
422/* allocation bitmap for domain ids */ 460/* allocation bitmap for domain ids */
423extern unsigned long *amd_iommu_pd_alloc_bitmap; 461extern unsigned long *amd_iommu_pd_alloc_bitmap;
424 462
425/* will be 1 if device isolation is enabled */
426extern bool amd_iommu_isolate;
427
428/* 463/*
429 * If true, the addresses will be flushed on unmap time, not when 464 * If true, the addresses will be flushed on unmap time, not when
430 * they are reused 465 * they are reused
@@ -462,11 +497,6 @@ struct __iommu_counter {
462#define ADD_STATS_COUNTER(name, x) 497#define ADD_STATS_COUNTER(name, x)
463#define SUB_STATS_COUNTER(name, x) 498#define SUB_STATS_COUNTER(name, x)
464 499
465static inline void amd_iommu_stats_init(void) { }
466
467#endif /* CONFIG_AMD_IOMMU_STATS */ 500#endif /* CONFIG_AMD_IOMMU_STATS */
468 501
469/* some function prototypes */
470extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
471
472#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ 502#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index d9cf1cd156d2..f654d1bb17fb 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -22,14 +22,14 @@ do { \
22 ".popsection" \ 22 ".popsection" \
23 : : "i" (__FILE__), "i" (__LINE__), \ 23 : : "i" (__FILE__), "i" (__LINE__), \
24 "i" (sizeof(struct bug_entry))); \ 24 "i" (sizeof(struct bug_entry))); \
25 for (;;) ; \ 25 unreachable(); \
26} while (0) 26} while (0)
27 27
28#else 28#else
29#define BUG() \ 29#define BUG() \
30do { \ 30do { \
31 asm volatile("ud2"); \ 31 asm volatile("ud2"); \
32 for (;;) ; \ 32 unreachable(); \
33} while (0) 33} while (0)
34#endif 34#endif
35 35
diff --git a/arch/x86/include/asm/calgary.h b/arch/x86/include/asm/calgary.h
index b03bedb62aa7..0918654305af 100644
--- a/arch/x86/include/asm/calgary.h
+++ b/arch/x86/include/asm/calgary.h
@@ -62,10 +62,8 @@ struct cal_chipset_ops {
62extern int use_calgary; 62extern int use_calgary;
63 63
64#ifdef CONFIG_CALGARY_IOMMU 64#ifdef CONFIG_CALGARY_IOMMU
65extern int calgary_iommu_init(void);
66extern void detect_calgary(void); 65extern void detect_calgary(void);
67#else 66#else
68static inline int calgary_iommu_init(void) { return 1; }
69static inline void detect_calgary(void) { return; } 67static inline void detect_calgary(void) { return; }
70#endif 68#endif
71 69
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
index cee34e9ca45b..029f230ab637 100644
--- a/arch/x86/include/asm/device.h
+++ b/arch/x86/include/asm/device.h
@@ -8,7 +8,7 @@ struct dev_archdata {
8#ifdef CONFIG_X86_64 8#ifdef CONFIG_X86_64
9struct dma_map_ops *dma_ops; 9struct dma_map_ops *dma_ops;
10#endif 10#endif
11#ifdef CONFIG_DMAR 11#if defined(CONFIG_DMAR) || defined(CONFIG_AMD_IOMMU)
12 void *iommu; /* hook for IOMMU specific extension */ 12 void *iommu; /* hook for IOMMU specific extension */
13#endif 13#endif
14}; 14};
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 6a25d5d42836..0f6c02f3b7d4 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -20,7 +20,8 @@
20# define ISA_DMA_BIT_MASK DMA_BIT_MASK(32) 20# define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
21#endif 21#endif
22 22
23extern dma_addr_t bad_dma_address; 23#define DMA_ERROR_CODE 0
24
24extern int iommu_merge; 25extern int iommu_merge;
25extern struct device x86_dma_fallback_dev; 26extern struct device x86_dma_fallback_dev;
26extern int panic_on_overflow; 27extern int panic_on_overflow;
@@ -48,7 +49,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
48 if (ops->mapping_error) 49 if (ops->mapping_error)
49 return ops->mapping_error(dev, dma_addr); 50 return ops->mapping_error(dev, dma_addr);
50 51
51 return (dma_addr == bad_dma_address); 52 return (dma_addr == DMA_ERROR_CODE);
52} 53}
53 54
54#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 55#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h
index 6cfdafa409d8..4ac5b0f33fc1 100644
--- a/arch/x86/include/asm/gart.h
+++ b/arch/x86/include/asm/gart.h
@@ -35,8 +35,7 @@ extern int gart_iommu_aperture_allowed;
35extern int gart_iommu_aperture_disabled; 35extern int gart_iommu_aperture_disabled;
36 36
37extern void early_gart_iommu_check(void); 37extern void early_gart_iommu_check(void);
38extern void gart_iommu_init(void); 38extern int gart_iommu_init(void);
39extern void gart_iommu_shutdown(void);
40extern void __init gart_parse_options(char *); 39extern void __init gart_parse_options(char *);
41extern void gart_iommu_hole_init(void); 40extern void gart_iommu_hole_init(void);
42 41
@@ -48,12 +47,6 @@ extern void gart_iommu_hole_init(void);
48static inline void early_gart_iommu_check(void) 47static inline void early_gart_iommu_check(void)
49{ 48{
50} 49}
51static inline void gart_iommu_init(void)
52{
53}
54static inline void gart_iommu_shutdown(void)
55{
56}
57static inline void gart_parse_options(char *options) 50static inline void gart_parse_options(char *options)
58{ 51{
59} 52}
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index fd6d21bbee6c..345c99cef152 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -1,8 +1,6 @@
1#ifndef _ASM_X86_IOMMU_H 1#ifndef _ASM_X86_IOMMU_H
2#define _ASM_X86_IOMMU_H 2#define _ASM_X86_IOMMU_H
3 3
4extern void pci_iommu_shutdown(void);
5extern void no_iommu_init(void);
6extern struct dma_map_ops nommu_dma_ops; 4extern struct dma_map_ops nommu_dma_ops;
7extern int force_iommu, no_iommu; 5extern int force_iommu, no_iommu;
8extern int iommu_detected; 6extern int iommu_detected;
diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h
index b9e4e20174fb..87ffcb12a1b8 100644
--- a/arch/x86/include/asm/swiotlb.h
+++ b/arch/x86/include/asm/swiotlb.h
@@ -3,17 +3,14 @@
3 3
4#include <linux/swiotlb.h> 4#include <linux/swiotlb.h>
5 5
6/* SWIOTLB interface */
7
8extern int swiotlb_force;
9
10#ifdef CONFIG_SWIOTLB 6#ifdef CONFIG_SWIOTLB
11extern int swiotlb; 7extern int swiotlb;
12extern void pci_swiotlb_init(void); 8extern int pci_swiotlb_init(void);
13#else 9#else
14#define swiotlb 0 10#define swiotlb 0
15static inline void pci_swiotlb_init(void) 11static inline int pci_swiotlb_init(void)
16{ 12{
13 return 0;
17} 14}
18#endif 15#endif
19 16
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 2c756fd4ab0e..d8e71459f025 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -91,6 +91,14 @@ struct x86_init_timers {
91}; 91};
92 92
93/** 93/**
94 * struct x86_init_iommu - platform specific iommu setup
95 * @iommu_init: platform specific iommu setup
96 */
97struct x86_init_iommu {
98 int (*iommu_init)(void);
99};
100
101/**
94 * struct x86_init_ops - functions for platform specific setup 102 * struct x86_init_ops - functions for platform specific setup
95 * 103 *
96 */ 104 */
@@ -101,6 +109,7 @@ struct x86_init_ops {
101 struct x86_init_oem oem; 109 struct x86_init_oem oem;
102 struct x86_init_paging paging; 110 struct x86_init_paging paging;
103 struct x86_init_timers timers; 111 struct x86_init_timers timers;
112 struct x86_init_iommu iommu;
104}; 113};
105 114
106/** 115/**
@@ -121,6 +130,7 @@ struct x86_platform_ops {
121 unsigned long (*calibrate_tsc)(void); 130 unsigned long (*calibrate_tsc)(void);
122 unsigned long (*get_wallclock)(void); 131 unsigned long (*get_wallclock)(void);
123 int (*set_wallclock)(unsigned long nowtime); 132 int (*set_wallclock)(unsigned long nowtime);
133 void (*iommu_shutdown)(void);
124}; 134};
125 135
126extern struct x86_init_ops x86_init; 136extern struct x86_init_ops x86_init;
diff --git a/arch/x86/kernel/acpi/processor.c b/arch/x86/kernel/acpi/processor.c
index d296f4a195c9..d85d1b2432ba 100644
--- a/arch/x86/kernel/acpi/processor.c
+++ b/arch/x86/kernel/acpi/processor.c
@@ -79,7 +79,8 @@ void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
79 struct cpuinfo_x86 *c = &cpu_data(pr->id); 79 struct cpuinfo_x86 *c = &cpu_data(pr->id);
80 80
81 pr->pdc = NULL; 81 pr->pdc = NULL;
82 if (c->x86_vendor == X86_VENDOR_INTEL) 82 if (c->x86_vendor == X86_VENDOR_INTEL ||
83 c->x86_vendor == X86_VENDOR_CENTAUR)
83 init_intel_pdc(pr, c); 84 init_intel_pdc(pr, c);
84 85
85 return; 86 return;
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 0285521e0a99..32fb09102a13 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 2 * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com> 4 * Leo Duran <leo.duran@amd.com>
5 * 5 *
@@ -28,6 +28,7 @@
28#include <asm/proto.h> 28#include <asm/proto.h>
29#include <asm/iommu.h> 29#include <asm/iommu.h>
30#include <asm/gart.h> 30#include <asm/gart.h>
31#include <asm/amd_iommu_proto.h>
31#include <asm/amd_iommu_types.h> 32#include <asm/amd_iommu_types.h>
32#include <asm/amd_iommu.h> 33#include <asm/amd_iommu.h>
33 34
@@ -56,20 +57,115 @@ struct iommu_cmd {
56 u32 data[4]; 57 u32 data[4];
57}; 58};
58 59
59static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
60 struct unity_map_entry *e);
61static struct dma_ops_domain *find_protection_domain(u16 devid);
62static u64 *alloc_pte(struct protection_domain *domain,
63 unsigned long address, int end_lvl,
64 u64 **pte_page, gfp_t gfp);
65static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
66 unsigned long start_page,
67 unsigned int pages);
68static void reset_iommu_command_buffer(struct amd_iommu *iommu); 60static void reset_iommu_command_buffer(struct amd_iommu *iommu);
69static u64 *fetch_pte(struct protection_domain *domain,
70 unsigned long address, int map_size);
71static void update_domain(struct protection_domain *domain); 61static void update_domain(struct protection_domain *domain);
72 62
63/****************************************************************************
64 *
65 * Helper functions
66 *
67 ****************************************************************************/
68
69static inline u16 get_device_id(struct device *dev)
70{
71 struct pci_dev *pdev = to_pci_dev(dev);
72
73 return calc_devid(pdev->bus->number, pdev->devfn);
74}
75
76static struct iommu_dev_data *get_dev_data(struct device *dev)
77{
78 return dev->archdata.iommu;
79}
80
81/*
82 * In this function the list of preallocated protection domains is traversed to
83 * find the domain for a specific device
84 */
85static struct dma_ops_domain *find_protection_domain(u16 devid)
86{
87 struct dma_ops_domain *entry, *ret = NULL;
88 unsigned long flags;
89 u16 alias = amd_iommu_alias_table[devid];
90
91 if (list_empty(&iommu_pd_list))
92 return NULL;
93
94 spin_lock_irqsave(&iommu_pd_list_lock, flags);
95
96 list_for_each_entry(entry, &iommu_pd_list, list) {
97 if (entry->target_dev == devid ||
98 entry->target_dev == alias) {
99 ret = entry;
100 break;
101 }
102 }
103
104 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
105
106 return ret;
107}
108
109/*
110 * This function checks if the driver got a valid device from the caller to
111 * avoid dereferencing invalid pointers.
112 */
113static bool check_device(struct device *dev)
114{
115 u16 devid;
116
117 if (!dev || !dev->dma_mask)
118 return false;
119
120 /* No device or no PCI device */
121 if (!dev || dev->bus != &pci_bus_type)
122 return false;
123
124 devid = get_device_id(dev);
125
126 /* Out of our scope? */
127 if (devid > amd_iommu_last_bdf)
128 return false;
129
130 if (amd_iommu_rlookup_table[devid] == NULL)
131 return false;
132
133 return true;
134}
135
136static int iommu_init_device(struct device *dev)
137{
138 struct iommu_dev_data *dev_data;
139 struct pci_dev *pdev;
140 u16 devid, alias;
141
142 if (dev->archdata.iommu)
143 return 0;
144
145 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
146 if (!dev_data)
147 return -ENOMEM;
148
149 dev_data->dev = dev;
150
151 devid = get_device_id(dev);
152 alias = amd_iommu_alias_table[devid];
153 pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff);
154 if (pdev)
155 dev_data->alias = &pdev->dev;
156
157 atomic_set(&dev_data->bind, 0);
158
159 dev->archdata.iommu = dev_data;
160
161
162 return 0;
163}
164
165static void iommu_uninit_device(struct device *dev)
166{
167 kfree(dev->archdata.iommu);
168}
73#ifdef CONFIG_AMD_IOMMU_STATS 169#ifdef CONFIG_AMD_IOMMU_STATS
74 170
75/* 171/*
@@ -90,7 +186,6 @@ DECLARE_STATS_COUNTER(alloced_io_mem);
90DECLARE_STATS_COUNTER(total_map_requests); 186DECLARE_STATS_COUNTER(total_map_requests);
91 187
92static struct dentry *stats_dir; 188static struct dentry *stats_dir;
93static struct dentry *de_isolate;
94static struct dentry *de_fflush; 189static struct dentry *de_fflush;
95 190
96static void amd_iommu_stats_add(struct __iommu_counter *cnt) 191static void amd_iommu_stats_add(struct __iommu_counter *cnt)
@@ -108,9 +203,6 @@ static void amd_iommu_stats_init(void)
108 if (stats_dir == NULL) 203 if (stats_dir == NULL)
109 return; 204 return;
110 205
111 de_isolate = debugfs_create_bool("isolation", 0444, stats_dir,
112 (u32 *)&amd_iommu_isolate);
113
114 de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir, 206 de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
115 (u32 *)&amd_iommu_unmap_flush); 207 (u32 *)&amd_iommu_unmap_flush);
116 208
@@ -130,12 +222,6 @@ static void amd_iommu_stats_init(void)
130 222
131#endif 223#endif
132 224
133/* returns !0 if the IOMMU is caching non-present entries in its TLB */
134static int iommu_has_npcache(struct amd_iommu *iommu)
135{
136 return iommu->cap & (1UL << IOMMU_CAP_NPCACHE);
137}
138
139/**************************************************************************** 225/****************************************************************************
140 * 226 *
141 * Interrupt handling functions 227 * Interrupt handling functions
@@ -199,6 +285,7 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
199 break; 285 break;
200 case EVENT_TYPE_ILL_CMD: 286 case EVENT_TYPE_ILL_CMD:
201 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); 287 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
288 iommu->reset_in_progress = true;
202 reset_iommu_command_buffer(iommu); 289 reset_iommu_command_buffer(iommu);
203 dump_command(address); 290 dump_command(address);
204 break; 291 break;
@@ -321,11 +408,8 @@ static void __iommu_wait_for_completion(struct amd_iommu *iommu)
321 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; 408 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
322 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); 409 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
323 410
324 if (unlikely(i == EXIT_LOOP_COUNT)) { 411 if (unlikely(i == EXIT_LOOP_COUNT))
325 spin_unlock(&iommu->lock); 412 iommu->reset_in_progress = true;
326 reset_iommu_command_buffer(iommu);
327 spin_lock(&iommu->lock);
328 }
329} 413}
330 414
331/* 415/*
@@ -372,26 +456,46 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
372out: 456out:
373 spin_unlock_irqrestore(&iommu->lock, flags); 457 spin_unlock_irqrestore(&iommu->lock, flags);
374 458
459 if (iommu->reset_in_progress)
460 reset_iommu_command_buffer(iommu);
461
375 return 0; 462 return 0;
376} 463}
377 464
465static void iommu_flush_complete(struct protection_domain *domain)
466{
467 int i;
468
469 for (i = 0; i < amd_iommus_present; ++i) {
470 if (!domain->dev_iommu[i])
471 continue;
472
473 /*
474 * Devices of this domain are behind this IOMMU
475 * We need to wait for completion of all commands.
476 */
477 iommu_completion_wait(amd_iommus[i]);
478 }
479}
480
378/* 481/*
379 * Command send function for invalidating a device table entry 482 * Command send function for invalidating a device table entry
380 */ 483 */
381static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) 484static int iommu_flush_device(struct device *dev)
382{ 485{
486 struct amd_iommu *iommu;
383 struct iommu_cmd cmd; 487 struct iommu_cmd cmd;
384 int ret; 488 u16 devid;
385 489
386 BUG_ON(iommu == NULL); 490 devid = get_device_id(dev);
491 iommu = amd_iommu_rlookup_table[devid];
387 492
493 /* Build command */
388 memset(&cmd, 0, sizeof(cmd)); 494 memset(&cmd, 0, sizeof(cmd));
389 CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); 495 CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY);
390 cmd.data[0] = devid; 496 cmd.data[0] = devid;
391 497
392 ret = iommu_queue_command(iommu, &cmd); 498 return iommu_queue_command(iommu, &cmd);
393
394 return ret;
395} 499}
396 500
397static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, 501static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
@@ -430,11 +534,11 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
430 * It invalidates a single PTE if the range to flush is within a single 534 * It invalidates a single PTE if the range to flush is within a single
431 * page. Otherwise it flushes the whole TLB of the IOMMU. 535 * page. Otherwise it flushes the whole TLB of the IOMMU.
432 */ 536 */
433static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid, 537static void __iommu_flush_pages(struct protection_domain *domain,
434 u64 address, size_t size) 538 u64 address, size_t size, int pde)
435{ 539{
436 int s = 0; 540 int s = 0, i;
437 unsigned pages = iommu_num_pages(address, size, PAGE_SIZE); 541 unsigned long pages = iommu_num_pages(address, size, PAGE_SIZE);
438 542
439 address &= PAGE_MASK; 543 address &= PAGE_MASK;
440 544
@@ -447,142 +551,212 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
447 s = 1; 551 s = 1;
448 } 552 }
449 553
450 iommu_queue_inv_iommu_pages(iommu, address, domid, 0, s);
451 554
452 return 0; 555 for (i = 0; i < amd_iommus_present; ++i) {
556 if (!domain->dev_iommu[i])
557 continue;
558
559 /*
560 * Devices of this domain are behind this IOMMU
561 * We need a TLB flush
562 */
563 iommu_queue_inv_iommu_pages(amd_iommus[i], address,
564 domain->id, pde, s);
565 }
566
567 return;
453} 568}
454 569
455/* Flush the whole IO/TLB for a given protection domain */ 570static void iommu_flush_pages(struct protection_domain *domain,
456static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid) 571 u64 address, size_t size)
457{ 572{
458 u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; 573 __iommu_flush_pages(domain, address, size, 0);
459 574}
460 INC_STATS_COUNTER(domain_flush_single);
461 575
462 iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1); 576/* Flush the whole IO/TLB for a given protection domain */
577static void iommu_flush_tlb(struct protection_domain *domain)
578{
579 __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
463} 580}
464 581
465/* Flush the whole IO/TLB for a given protection domain - including PDE */ 582/* Flush the whole IO/TLB for a given protection domain - including PDE */
466static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid) 583static void iommu_flush_tlb_pde(struct protection_domain *domain)
467{ 584{
468 u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; 585 __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
469
470 INC_STATS_COUNTER(domain_flush_single);
471
472 iommu_queue_inv_iommu_pages(iommu, address, domid, 1, 1);
473} 586}
474 587
588
475/* 589/*
476 * This function flushes one domain on one IOMMU 590 * This function flushes the DTEs for all devices in domain
477 */ 591 */
478static void flush_domain_on_iommu(struct amd_iommu *iommu, u16 domid) 592static void iommu_flush_domain_devices(struct protection_domain *domain)
479{ 593{
480 struct iommu_cmd cmd; 594 struct iommu_dev_data *dev_data;
481 unsigned long flags; 595 unsigned long flags;
482 596
483 __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 597 spin_lock_irqsave(&domain->lock, flags);
484 domid, 1, 1);
485 598
486 spin_lock_irqsave(&iommu->lock, flags); 599 list_for_each_entry(dev_data, &domain->dev_list, list)
487 __iommu_queue_command(iommu, &cmd); 600 iommu_flush_device(dev_data->dev);
488 __iommu_completion_wait(iommu); 601
489 __iommu_wait_for_completion(iommu); 602 spin_unlock_irqrestore(&domain->lock, flags);
490 spin_unlock_irqrestore(&iommu->lock, flags);
491} 603}
492 604
493static void flush_all_domains_on_iommu(struct amd_iommu *iommu) 605static void iommu_flush_all_domain_devices(void)
494{ 606{
495 int i; 607 struct protection_domain *domain;
608 unsigned long flags;
496 609
497 for (i = 1; i < MAX_DOMAIN_ID; ++i) { 610 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
498 if (!test_bit(i, amd_iommu_pd_alloc_bitmap)) 611
499 continue; 612 list_for_each_entry(domain, &amd_iommu_pd_list, list) {
500 flush_domain_on_iommu(iommu, i); 613 iommu_flush_domain_devices(domain);
614 iommu_flush_complete(domain);
501 } 615 }
502 616
617 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
618}
619
620void amd_iommu_flush_all_devices(void)
621{
622 iommu_flush_all_domain_devices();
503} 623}
504 624
505/* 625/*
506 * This function is used to flush the IO/TLB for a given protection domain 626 * This function uses heavy locking and may disable irqs for some time. But
507 * on every IOMMU in the system 627 * this is no issue because it is only called during resume.
508 */ 628 */
509static void iommu_flush_domain(u16 domid) 629void amd_iommu_flush_all_domains(void)
510{ 630{
511 struct amd_iommu *iommu; 631 struct protection_domain *domain;
632 unsigned long flags;
512 633
513 INC_STATS_COUNTER(domain_flush_all); 634 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
514 635
515 for_each_iommu(iommu) 636 list_for_each_entry(domain, &amd_iommu_pd_list, list) {
516 flush_domain_on_iommu(iommu, domid); 637 spin_lock(&domain->lock);
638 iommu_flush_tlb_pde(domain);
639 iommu_flush_complete(domain);
640 spin_unlock(&domain->lock);
641 }
642
643 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
517} 644}
518 645
519void amd_iommu_flush_all_domains(void) 646static void reset_iommu_command_buffer(struct amd_iommu *iommu)
520{ 647{
521 struct amd_iommu *iommu; 648 pr_err("AMD-Vi: Resetting IOMMU command buffer\n");
522 649
523 for_each_iommu(iommu) 650 if (iommu->reset_in_progress)
524 flush_all_domains_on_iommu(iommu); 651 panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n");
652
653 amd_iommu_reset_cmd_buffer(iommu);
654 amd_iommu_flush_all_devices();
655 amd_iommu_flush_all_domains();
656
657 iommu->reset_in_progress = false;
525} 658}
526 659
527static void flush_all_devices_for_iommu(struct amd_iommu *iommu) 660/****************************************************************************
661 *
662 * The functions below are used the create the page table mappings for
663 * unity mapped regions.
664 *
665 ****************************************************************************/
666
667/*
668 * This function is used to add another level to an IO page table. Adding
669 * another level increases the size of the address space by 9 bits to a size up
670 * to 64 bits.
671 */
672static bool increase_address_space(struct protection_domain *domain,
673 gfp_t gfp)
528{ 674{
529 int i; 675 u64 *pte;
530 676
531 for (i = 0; i <= amd_iommu_last_bdf; ++i) { 677 if (domain->mode == PAGE_MODE_6_LEVEL)
532 if (iommu != amd_iommu_rlookup_table[i]) 678 /* address space already 64 bit large */
533 continue; 679 return false;
534 680
535 iommu_queue_inv_dev_entry(iommu, i); 681 pte = (void *)get_zeroed_page(gfp);
536 iommu_completion_wait(iommu); 682 if (!pte)
537 } 683 return false;
684
685 *pte = PM_LEVEL_PDE(domain->mode,
686 virt_to_phys(domain->pt_root));
687 domain->pt_root = pte;
688 domain->mode += 1;
689 domain->updated = true;
690
691 return true;
538} 692}
539 693
540static void flush_devices_by_domain(struct protection_domain *domain) 694static u64 *alloc_pte(struct protection_domain *domain,
695 unsigned long address,
696 int end_lvl,
697 u64 **pte_page,
698 gfp_t gfp)
541{ 699{
542 struct amd_iommu *iommu; 700 u64 *pte, *page;
543 int i; 701 int level;
544 702
545 for (i = 0; i <= amd_iommu_last_bdf; ++i) { 703 while (address > PM_LEVEL_SIZE(domain->mode))
546 if ((domain == NULL && amd_iommu_pd_table[i] == NULL) || 704 increase_address_space(domain, gfp);
547 (amd_iommu_pd_table[i] != domain))
548 continue;
549 705
550 iommu = amd_iommu_rlookup_table[i]; 706 level = domain->mode - 1;
551 if (!iommu) 707 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
552 continue;
553 708
554 iommu_queue_inv_dev_entry(iommu, i); 709 while (level > end_lvl) {
555 iommu_completion_wait(iommu); 710 if (!IOMMU_PTE_PRESENT(*pte)) {
711 page = (u64 *)get_zeroed_page(gfp);
712 if (!page)
713 return NULL;
714 *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
715 }
716
717 level -= 1;
718
719 pte = IOMMU_PTE_PAGE(*pte);
720
721 if (pte_page && level == end_lvl)
722 *pte_page = pte;
723
724 pte = &pte[PM_LEVEL_INDEX(level, address)];
556 } 725 }
726
727 return pte;
557} 728}
558 729
559static void reset_iommu_command_buffer(struct amd_iommu *iommu) 730/*
731 * This function checks if there is a PTE for a given dma address. If
732 * there is one, it returns the pointer to it.
733 */
734static u64 *fetch_pte(struct protection_domain *domain,
735 unsigned long address, int map_size)
560{ 736{
561 pr_err("AMD-Vi: Resetting IOMMU command buffer\n"); 737 int level;
738 u64 *pte;
562 739
563 if (iommu->reset_in_progress) 740 level = domain->mode - 1;
564 panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n"); 741 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
565 742
566 iommu->reset_in_progress = true; 743 while (level > map_size) {
744 if (!IOMMU_PTE_PRESENT(*pte))
745 return NULL;
567 746
568 amd_iommu_reset_cmd_buffer(iommu); 747 level -= 1;
569 flush_all_devices_for_iommu(iommu);
570 flush_all_domains_on_iommu(iommu);
571 748
572 iommu->reset_in_progress = false; 749 pte = IOMMU_PTE_PAGE(*pte);
573} 750 pte = &pte[PM_LEVEL_INDEX(level, address)];
574 751
575void amd_iommu_flush_all_devices(void) 752 if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) {
576{ 753 pte = NULL;
577 flush_devices_by_domain(NULL); 754 break;
578} 755 }
756 }
579 757
580/**************************************************************************** 758 return pte;
581 * 759}
582 * The functions below are used the create the page table mappings for
583 * unity mapped regions.
584 *
585 ****************************************************************************/
586 760
587/* 761/*
588 * Generic mapping functions. It maps a physical address into a DMA 762 * Generic mapping functions. It maps a physical address into a DMA
@@ -654,28 +828,6 @@ static int iommu_for_unity_map(struct amd_iommu *iommu,
654} 828}
655 829
656/* 830/*
657 * Init the unity mappings for a specific IOMMU in the system
658 *
659 * Basically iterates over all unity mapping entries and applies them to
660 * the default domain DMA of that IOMMU if necessary.
661 */
662static int iommu_init_unity_mappings(struct amd_iommu *iommu)
663{
664 struct unity_map_entry *entry;
665 int ret;
666
667 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
668 if (!iommu_for_unity_map(iommu, entry))
669 continue;
670 ret = dma_ops_unity_map(iommu->default_dom, entry);
671 if (ret)
672 return ret;
673 }
674
675 return 0;
676}
677
678/*
679 * This function actually applies the mapping to the page table of the 831 * This function actually applies the mapping to the page table of the
680 * dma_ops domain. 832 * dma_ops domain.
681 */ 833 */
@@ -704,6 +856,28 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
704} 856}
705 857
706/* 858/*
859 * Init the unity mappings for a specific IOMMU in the system
860 *
861 * Basically iterates over all unity mapping entries and applies them to
862 * the default domain DMA of that IOMMU if necessary.
863 */
864static int iommu_init_unity_mappings(struct amd_iommu *iommu)
865{
866 struct unity_map_entry *entry;
867 int ret;
868
869 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
870 if (!iommu_for_unity_map(iommu, entry))
871 continue;
872 ret = dma_ops_unity_map(iommu->default_dom, entry);
873 if (ret)
874 return ret;
875 }
876
877 return 0;
878}
879
880/*
707 * Inits the unity mappings required for a specific device 881 * Inits the unity mappings required for a specific device
708 */ 882 */
709static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom, 883static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
@@ -740,34 +914,23 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
740 */ 914 */
741 915
742/* 916/*
743 * This function checks if there is a PTE for a given dma address. If 917 * Used to reserve address ranges in the aperture (e.g. for exclusion
744 * there is one, it returns the pointer to it. 918 * ranges.
745 */ 919 */
746static u64 *fetch_pte(struct protection_domain *domain, 920static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
747 unsigned long address, int map_size) 921 unsigned long start_page,
922 unsigned int pages)
748{ 923{
749 int level; 924 unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
750 u64 *pte;
751
752 level = domain->mode - 1;
753 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
754
755 while (level > map_size) {
756 if (!IOMMU_PTE_PRESENT(*pte))
757 return NULL;
758
759 level -= 1;
760 925
761 pte = IOMMU_PTE_PAGE(*pte); 926 if (start_page + pages > last_page)
762 pte = &pte[PM_LEVEL_INDEX(level, address)]; 927 pages = last_page - start_page;
763 928
764 if ((PM_PTE_LEVEL(*pte) == 0) && level != map_size) { 929 for (i = start_page; i < start_page + pages; ++i) {
765 pte = NULL; 930 int index = i / APERTURE_RANGE_PAGES;
766 break; 931 int page = i % APERTURE_RANGE_PAGES;
767 } 932 __set_bit(page, dom->aperture[index]->bitmap);
768 } 933 }
769
770 return pte;
771} 934}
772 935
773/* 936/*
@@ -775,11 +938,11 @@ static u64 *fetch_pte(struct protection_domain *domain,
775 * aperture in case of dma_ops domain allocation or address allocation 938 * aperture in case of dma_ops domain allocation or address allocation
776 * failure. 939 * failure.
777 */ 940 */
778static int alloc_new_range(struct amd_iommu *iommu, 941static int alloc_new_range(struct dma_ops_domain *dma_dom,
779 struct dma_ops_domain *dma_dom,
780 bool populate, gfp_t gfp) 942 bool populate, gfp_t gfp)
781{ 943{
782 int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; 944 int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
945 struct amd_iommu *iommu;
783 int i; 946 int i;
784 947
785#ifdef CONFIG_IOMMU_STRESS 948#ifdef CONFIG_IOMMU_STRESS
@@ -819,14 +982,17 @@ static int alloc_new_range(struct amd_iommu *iommu,
819 dma_dom->aperture_size += APERTURE_RANGE_SIZE; 982 dma_dom->aperture_size += APERTURE_RANGE_SIZE;
820 983
821 /* Intialize the exclusion range if necessary */ 984 /* Intialize the exclusion range if necessary */
822 if (iommu->exclusion_start && 985 for_each_iommu(iommu) {
823 iommu->exclusion_start >= dma_dom->aperture[index]->offset && 986 if (iommu->exclusion_start &&
824 iommu->exclusion_start < dma_dom->aperture_size) { 987 iommu->exclusion_start >= dma_dom->aperture[index]->offset
825 unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT; 988 && iommu->exclusion_start < dma_dom->aperture_size) {
826 int pages = iommu_num_pages(iommu->exclusion_start, 989 unsigned long startpage;
827 iommu->exclusion_length, 990 int pages = iommu_num_pages(iommu->exclusion_start,
828 PAGE_SIZE); 991 iommu->exclusion_length,
829 dma_ops_reserve_addresses(dma_dom, startpage, pages); 992 PAGE_SIZE);
993 startpage = iommu->exclusion_start >> PAGE_SHIFT;
994 dma_ops_reserve_addresses(dma_dom, startpage, pages);
995 }
830 } 996 }
831 997
832 /* 998 /*
@@ -928,7 +1094,7 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev,
928 } 1094 }
929 1095
930 if (unlikely(address == -1)) 1096 if (unlikely(address == -1))
931 address = bad_dma_address; 1097 address = DMA_ERROR_CODE;
932 1098
933 WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size); 1099 WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
934 1100
@@ -973,6 +1139,31 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
973 * 1139 *
974 ****************************************************************************/ 1140 ****************************************************************************/
975 1141
1142/*
1143 * This function adds a protection domain to the global protection domain list
1144 */
1145static void add_domain_to_list(struct protection_domain *domain)
1146{
1147 unsigned long flags;
1148
1149 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1150 list_add(&domain->list, &amd_iommu_pd_list);
1151 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1152}
1153
1154/*
1155 * This function removes a protection domain to the global
1156 * protection domain list
1157 */
1158static void del_domain_from_list(struct protection_domain *domain)
1159{
1160 unsigned long flags;
1161
1162 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1163 list_del(&domain->list);
1164 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1165}
1166
976static u16 domain_id_alloc(void) 1167static u16 domain_id_alloc(void)
977{ 1168{
978 unsigned long flags; 1169 unsigned long flags;
@@ -1000,26 +1191,6 @@ static void domain_id_free(int id)
1000 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1191 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1001} 1192}
1002 1193
1003/*
1004 * Used to reserve address ranges in the aperture (e.g. for exclusion
1005 * ranges.
1006 */
1007static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
1008 unsigned long start_page,
1009 unsigned int pages)
1010{
1011 unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
1012
1013 if (start_page + pages > last_page)
1014 pages = last_page - start_page;
1015
1016 for (i = start_page; i < start_page + pages; ++i) {
1017 int index = i / APERTURE_RANGE_PAGES;
1018 int page = i % APERTURE_RANGE_PAGES;
1019 __set_bit(page, dom->aperture[index]->bitmap);
1020 }
1021}
1022
1023static void free_pagetable(struct protection_domain *domain) 1194static void free_pagetable(struct protection_domain *domain)
1024{ 1195{
1025 int i, j; 1196 int i, j;
@@ -1061,6 +1232,8 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
1061 if (!dom) 1232 if (!dom)
1062 return; 1233 return;
1063 1234
1235 del_domain_from_list(&dom->domain);
1236
1064 free_pagetable(&dom->domain); 1237 free_pagetable(&dom->domain);
1065 1238
1066 for (i = 0; i < APERTURE_MAX_RANGES; ++i) { 1239 for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
@@ -1078,7 +1251,7 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
1078 * It also intializes the page table and the address allocator data 1251 * It also intializes the page table and the address allocator data
1079 * structures required for the dma_ops interface 1252 * structures required for the dma_ops interface
1080 */ 1253 */
1081static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu) 1254static struct dma_ops_domain *dma_ops_domain_alloc(void)
1082{ 1255{
1083 struct dma_ops_domain *dma_dom; 1256 struct dma_ops_domain *dma_dom;
1084 1257
@@ -1091,6 +1264,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu)
1091 dma_dom->domain.id = domain_id_alloc(); 1264 dma_dom->domain.id = domain_id_alloc();
1092 if (dma_dom->domain.id == 0) 1265 if (dma_dom->domain.id == 0)
1093 goto free_dma_dom; 1266 goto free_dma_dom;
1267 INIT_LIST_HEAD(&dma_dom->domain.dev_list);
1094 dma_dom->domain.mode = PAGE_MODE_2_LEVEL; 1268 dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
1095 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); 1269 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
1096 dma_dom->domain.flags = PD_DMA_OPS_MASK; 1270 dma_dom->domain.flags = PD_DMA_OPS_MASK;
@@ -1101,7 +1275,9 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu)
1101 dma_dom->need_flush = false; 1275 dma_dom->need_flush = false;
1102 dma_dom->target_dev = 0xffff; 1276 dma_dom->target_dev = 0xffff;
1103 1277
1104 if (alloc_new_range(iommu, dma_dom, true, GFP_KERNEL)) 1278 add_domain_to_list(&dma_dom->domain);
1279
1280 if (alloc_new_range(dma_dom, true, GFP_KERNEL))
1105 goto free_dma_dom; 1281 goto free_dma_dom;
1106 1282
1107 /* 1283 /*
@@ -1129,22 +1305,6 @@ static bool dma_ops_domain(struct protection_domain *domain)
1129 return domain->flags & PD_DMA_OPS_MASK; 1305 return domain->flags & PD_DMA_OPS_MASK;
1130} 1306}
1131 1307
1132/*
1133 * Find out the protection domain structure for a given PCI device. This
1134 * will give us the pointer to the page table root for example.
1135 */
1136static struct protection_domain *domain_for_device(u16 devid)
1137{
1138 struct protection_domain *dom;
1139 unsigned long flags;
1140
1141 read_lock_irqsave(&amd_iommu_devtable_lock, flags);
1142 dom = amd_iommu_pd_table[devid];
1143 read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1144
1145 return dom;
1146}
1147
1148static void set_dte_entry(u16 devid, struct protection_domain *domain) 1308static void set_dte_entry(u16 devid, struct protection_domain *domain)
1149{ 1309{
1150 u64 pte_root = virt_to_phys(domain->pt_root); 1310 u64 pte_root = virt_to_phys(domain->pt_root);
@@ -1156,42 +1316,123 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain)
1156 amd_iommu_dev_table[devid].data[2] = domain->id; 1316 amd_iommu_dev_table[devid].data[2] = domain->id;
1157 amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); 1317 amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
1158 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); 1318 amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
1319}
1320
1321static void clear_dte_entry(u16 devid)
1322{
1323 /* remove entry from the device table seen by the hardware */
1324 amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
1325 amd_iommu_dev_table[devid].data[1] = 0;
1326 amd_iommu_dev_table[devid].data[2] = 0;
1159 1327
1160 amd_iommu_pd_table[devid] = domain; 1328 amd_iommu_apply_erratum_63(devid);
1329}
1330
1331static void do_attach(struct device *dev, struct protection_domain *domain)
1332{
1333 struct iommu_dev_data *dev_data;
1334 struct amd_iommu *iommu;
1335 u16 devid;
1336
1337 devid = get_device_id(dev);
1338 iommu = amd_iommu_rlookup_table[devid];
1339 dev_data = get_dev_data(dev);
1340
1341 /* Update data structures */
1342 dev_data->domain = domain;
1343 list_add(&dev_data->list, &domain->dev_list);
1344 set_dte_entry(devid, domain);
1345
1346 /* Do reference counting */
1347 domain->dev_iommu[iommu->index] += 1;
1348 domain->dev_cnt += 1;
1349
1350 /* Flush the DTE entry */
1351 iommu_flush_device(dev);
1352}
1353
1354static void do_detach(struct device *dev)
1355{
1356 struct iommu_dev_data *dev_data;
1357 struct amd_iommu *iommu;
1358 u16 devid;
1359
1360 devid = get_device_id(dev);
1361 iommu = amd_iommu_rlookup_table[devid];
1362 dev_data = get_dev_data(dev);
1363
1364 /* decrease reference counters */
1365 dev_data->domain->dev_iommu[iommu->index] -= 1;
1366 dev_data->domain->dev_cnt -= 1;
1367
1368 /* Update data structures */
1369 dev_data->domain = NULL;
1370 list_del(&dev_data->list);
1371 clear_dte_entry(devid);
1372
1373 /* Flush the DTE entry */
1374 iommu_flush_device(dev);
1161} 1375}
1162 1376
1163/* 1377/*
1164 * If a device is not yet associated with a domain, this function does 1378 * If a device is not yet associated with a domain, this function does
1165 * assigns it visible for the hardware 1379 * assigns it visible for the hardware
1166 */ 1380 */
1167static void __attach_device(struct amd_iommu *iommu, 1381static int __attach_device(struct device *dev,
1168 struct protection_domain *domain, 1382 struct protection_domain *domain)
1169 u16 devid)
1170{ 1383{
1384 struct iommu_dev_data *dev_data, *alias_data;
1385
1386 dev_data = get_dev_data(dev);
1387 alias_data = get_dev_data(dev_data->alias);
1388
1389 if (!alias_data)
1390 return -EINVAL;
1391
1171 /* lock domain */ 1392 /* lock domain */
1172 spin_lock(&domain->lock); 1393 spin_lock(&domain->lock);
1173 1394
1174 /* update DTE entry */ 1395 /* Some sanity checks */
1175 set_dte_entry(devid, domain); 1396 if (alias_data->domain != NULL &&
1397 alias_data->domain != domain)
1398 return -EBUSY;
1176 1399
1177 domain->dev_cnt += 1; 1400 if (dev_data->domain != NULL &&
1401 dev_data->domain != domain)
1402 return -EBUSY;
1403
1404 /* Do real assignment */
1405 if (dev_data->alias != dev) {
1406 alias_data = get_dev_data(dev_data->alias);
1407 if (alias_data->domain == NULL)
1408 do_attach(dev_data->alias, domain);
1409
1410 atomic_inc(&alias_data->bind);
1411 }
1412
1413 if (dev_data->domain == NULL)
1414 do_attach(dev, domain);
1415
1416 atomic_inc(&dev_data->bind);
1178 1417
1179 /* ready */ 1418 /* ready */
1180 spin_unlock(&domain->lock); 1419 spin_unlock(&domain->lock);
1420
1421 return 0;
1181} 1422}
1182 1423
1183/* 1424/*
1184 * If a device is not yet associated with a domain, this function does 1425 * If a device is not yet associated with a domain, this function does
1185 * assigns it visible for the hardware 1426 * assigns it visible for the hardware
1186 */ 1427 */
1187static void attach_device(struct amd_iommu *iommu, 1428static int attach_device(struct device *dev,
1188 struct protection_domain *domain, 1429 struct protection_domain *domain)
1189 u16 devid)
1190{ 1430{
1191 unsigned long flags; 1431 unsigned long flags;
1432 int ret;
1192 1433
1193 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 1434 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1194 __attach_device(iommu, domain, devid); 1435 ret = __attach_device(dev, domain);
1195 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1436 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1196 1437
1197 /* 1438 /*
@@ -1199,98 +1440,125 @@ static void attach_device(struct amd_iommu *iommu,
1199 * left the caches in the IOMMU dirty. So we have to flush 1440 * left the caches in the IOMMU dirty. So we have to flush
1200 * here to evict all dirty stuff. 1441 * here to evict all dirty stuff.
1201 */ 1442 */
1202 iommu_queue_inv_dev_entry(iommu, devid); 1443 iommu_flush_tlb_pde(domain);
1203 iommu_flush_tlb_pde(iommu, domain->id); 1444
1445 return ret;
1204} 1446}
1205 1447
1206/* 1448/*
1207 * Removes a device from a protection domain (unlocked) 1449 * Removes a device from a protection domain (unlocked)
1208 */ 1450 */
1209static void __detach_device(struct protection_domain *domain, u16 devid) 1451static void __detach_device(struct device *dev)
1210{ 1452{
1453 struct iommu_dev_data *dev_data = get_dev_data(dev);
1454 struct iommu_dev_data *alias_data;
1455 unsigned long flags;
1211 1456
1212 /* lock domain */ 1457 BUG_ON(!dev_data->domain);
1213 spin_lock(&domain->lock);
1214
1215 /* remove domain from the lookup table */
1216 amd_iommu_pd_table[devid] = NULL;
1217 1458
1218 /* remove entry from the device table seen by the hardware */ 1459 spin_lock_irqsave(&dev_data->domain->lock, flags);
1219 amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
1220 amd_iommu_dev_table[devid].data[1] = 0;
1221 amd_iommu_dev_table[devid].data[2] = 0;
1222 1460
1223 amd_iommu_apply_erratum_63(devid); 1461 if (dev_data->alias != dev) {
1462 alias_data = get_dev_data(dev_data->alias);
1463 if (atomic_dec_and_test(&alias_data->bind))
1464 do_detach(dev_data->alias);
1465 }
1224 1466
1225 /* decrease reference counter */ 1467 if (atomic_dec_and_test(&dev_data->bind))
1226 domain->dev_cnt -= 1; 1468 do_detach(dev);
1227 1469
1228 /* ready */ 1470 spin_unlock_irqrestore(&dev_data->domain->lock, flags);
1229 spin_unlock(&domain->lock);
1230 1471
1231 /* 1472 /*
1232 * If we run in passthrough mode the device must be assigned to the 1473 * If we run in passthrough mode the device must be assigned to the
1233 * passthrough domain if it is detached from any other domain 1474 * passthrough domain if it is detached from any other domain
1234 */ 1475 */
1235 if (iommu_pass_through) { 1476 if (iommu_pass_through && dev_data->domain == NULL)
1236 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; 1477 __attach_device(dev, pt_domain);
1237 __attach_device(iommu, pt_domain, devid);
1238 }
1239} 1478}
1240 1479
1241/* 1480/*
1242 * Removes a device from a protection domain (with devtable_lock held) 1481 * Removes a device from a protection domain (with devtable_lock held)
1243 */ 1482 */
1244static void detach_device(struct protection_domain *domain, u16 devid) 1483static void detach_device(struct device *dev)
1245{ 1484{
1246 unsigned long flags; 1485 unsigned long flags;
1247 1486
1248 /* lock device table */ 1487 /* lock device table */
1249 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 1488 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1250 __detach_device(domain, devid); 1489 __detach_device(dev);
1251 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1490 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1252} 1491}
1253 1492
1493/*
1494 * Find out the protection domain structure for a given PCI device. This
1495 * will give us the pointer to the page table root for example.
1496 */
1497static struct protection_domain *domain_for_device(struct device *dev)
1498{
1499 struct protection_domain *dom;
1500 struct iommu_dev_data *dev_data, *alias_data;
1501 unsigned long flags;
1502 u16 devid, alias;
1503
1504 devid = get_device_id(dev);
1505 alias = amd_iommu_alias_table[devid];
1506 dev_data = get_dev_data(dev);
1507 alias_data = get_dev_data(dev_data->alias);
1508 if (!alias_data)
1509 return NULL;
1510
1511 read_lock_irqsave(&amd_iommu_devtable_lock, flags);
1512 dom = dev_data->domain;
1513 if (dom == NULL &&
1514 alias_data->domain != NULL) {
1515 __attach_device(dev, alias_data->domain);
1516 dom = alias_data->domain;
1517 }
1518
1519 read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1520
1521 return dom;
1522}
1523
1254static int device_change_notifier(struct notifier_block *nb, 1524static int device_change_notifier(struct notifier_block *nb,
1255 unsigned long action, void *data) 1525 unsigned long action, void *data)
1256{ 1526{
1257 struct device *dev = data; 1527 struct device *dev = data;
1258 struct pci_dev *pdev = to_pci_dev(dev); 1528 u16 devid;
1259 u16 devid = calc_devid(pdev->bus->number, pdev->devfn);
1260 struct protection_domain *domain; 1529 struct protection_domain *domain;
1261 struct dma_ops_domain *dma_domain; 1530 struct dma_ops_domain *dma_domain;
1262 struct amd_iommu *iommu; 1531 struct amd_iommu *iommu;
1263 unsigned long flags; 1532 unsigned long flags;
1264 1533
1265 if (devid > amd_iommu_last_bdf) 1534 if (!check_device(dev))
1266 goto out; 1535 return 0;
1267
1268 devid = amd_iommu_alias_table[devid];
1269
1270 iommu = amd_iommu_rlookup_table[devid];
1271 if (iommu == NULL)
1272 goto out;
1273
1274 domain = domain_for_device(devid);
1275 1536
1276 if (domain && !dma_ops_domain(domain)) 1537 devid = get_device_id(dev);
1277 WARN_ONCE(1, "AMD IOMMU WARNING: device %s already bound " 1538 iommu = amd_iommu_rlookup_table[devid];
1278 "to a non-dma-ops domain\n", dev_name(dev));
1279 1539
1280 switch (action) { 1540 switch (action) {
1281 case BUS_NOTIFY_UNBOUND_DRIVER: 1541 case BUS_NOTIFY_UNBOUND_DRIVER:
1542
1543 domain = domain_for_device(dev);
1544
1282 if (!domain) 1545 if (!domain)
1283 goto out; 1546 goto out;
1284 if (iommu_pass_through) 1547 if (iommu_pass_through)
1285 break; 1548 break;
1286 detach_device(domain, devid); 1549 detach_device(dev);
1287 break; 1550 break;
1288 case BUS_NOTIFY_ADD_DEVICE: 1551 case BUS_NOTIFY_ADD_DEVICE:
1552
1553 iommu_init_device(dev);
1554
1555 domain = domain_for_device(dev);
1556
1289 /* allocate a protection domain if a device is added */ 1557 /* allocate a protection domain if a device is added */
1290 dma_domain = find_protection_domain(devid); 1558 dma_domain = find_protection_domain(devid);
1291 if (dma_domain) 1559 if (dma_domain)
1292 goto out; 1560 goto out;
1293 dma_domain = dma_ops_domain_alloc(iommu); 1561 dma_domain = dma_ops_domain_alloc();
1294 if (!dma_domain) 1562 if (!dma_domain)
1295 goto out; 1563 goto out;
1296 dma_domain->target_dev = devid; 1564 dma_domain->target_dev = devid;
@@ -1300,11 +1568,15 @@ static int device_change_notifier(struct notifier_block *nb,
1300 spin_unlock_irqrestore(&iommu_pd_list_lock, flags); 1568 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
1301 1569
1302 break; 1570 break;
1571 case BUS_NOTIFY_DEL_DEVICE:
1572
1573 iommu_uninit_device(dev);
1574
1303 default: 1575 default:
1304 goto out; 1576 goto out;
1305 } 1577 }
1306 1578
1307 iommu_queue_inv_dev_entry(iommu, devid); 1579 iommu_flush_device(dev);
1308 iommu_completion_wait(iommu); 1580 iommu_completion_wait(iommu);
1309 1581
1310out: 1582out:
@@ -1322,106 +1594,46 @@ static struct notifier_block device_nb = {
1322 *****************************************************************************/ 1594 *****************************************************************************/
1323 1595
1324/* 1596/*
1325 * This function checks if the driver got a valid device from the caller to
1326 * avoid dereferencing invalid pointers.
1327 */
1328static bool check_device(struct device *dev)
1329{
1330 if (!dev || !dev->dma_mask)
1331 return false;
1332
1333 return true;
1334}
1335
1336/*
1337 * In this function the list of preallocated protection domains is traversed to
1338 * find the domain for a specific device
1339 */
1340static struct dma_ops_domain *find_protection_domain(u16 devid)
1341{
1342 struct dma_ops_domain *entry, *ret = NULL;
1343 unsigned long flags;
1344
1345 if (list_empty(&iommu_pd_list))
1346 return NULL;
1347
1348 spin_lock_irqsave(&iommu_pd_list_lock, flags);
1349
1350 list_for_each_entry(entry, &iommu_pd_list, list) {
1351 if (entry->target_dev == devid) {
1352 ret = entry;
1353 break;
1354 }
1355 }
1356
1357 spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
1358
1359 return ret;
1360}
1361
1362/*
1363 * In the dma_ops path we only have the struct device. This function 1597 * In the dma_ops path we only have the struct device. This function
1364 * finds the corresponding IOMMU, the protection domain and the 1598 * finds the corresponding IOMMU, the protection domain and the
1365 * requestor id for a given device. 1599 * requestor id for a given device.
1366 * If the device is not yet associated with a domain this is also done 1600 * If the device is not yet associated with a domain this is also done
1367 * in this function. 1601 * in this function.
1368 */ 1602 */
1369static int get_device_resources(struct device *dev, 1603static struct protection_domain *get_domain(struct device *dev)
1370 struct amd_iommu **iommu,
1371 struct protection_domain **domain,
1372 u16 *bdf)
1373{ 1604{
1605 struct protection_domain *domain;
1374 struct dma_ops_domain *dma_dom; 1606 struct dma_ops_domain *dma_dom;
1375 struct pci_dev *pcidev; 1607 u16 devid = get_device_id(dev);
1376 u16 _bdf;
1377
1378 *iommu = NULL;
1379 *domain = NULL;
1380 *bdf = 0xffff;
1381
1382 if (dev->bus != &pci_bus_type)
1383 return 0;
1384 1608
1385 pcidev = to_pci_dev(dev); 1609 if (!check_device(dev))
1386 _bdf = calc_devid(pcidev->bus->number, pcidev->devfn); 1610 return ERR_PTR(-EINVAL);
1387 1611
1388 /* device not translated by any IOMMU in the system? */ 1612 domain = domain_for_device(dev);
1389 if (_bdf > amd_iommu_last_bdf) 1613 if (domain != NULL && !dma_ops_domain(domain))
1390 return 0; 1614 return ERR_PTR(-EBUSY);
1391 1615
1392 *bdf = amd_iommu_alias_table[_bdf]; 1616 if (domain != NULL)
1617 return domain;
1393 1618
1394 *iommu = amd_iommu_rlookup_table[*bdf]; 1619 /* Device not bount yet - bind it */
1395 if (*iommu == NULL) 1620 dma_dom = find_protection_domain(devid);
1396 return 0; 1621 if (!dma_dom)
1397 *domain = domain_for_device(*bdf); 1622 dma_dom = amd_iommu_rlookup_table[devid]->default_dom;
1398 if (*domain == NULL) { 1623 attach_device(dev, &dma_dom->domain);
1399 dma_dom = find_protection_domain(*bdf); 1624 DUMP_printk("Using protection domain %d for device %s\n",
1400 if (!dma_dom) 1625 dma_dom->domain.id, dev_name(dev));
1401 dma_dom = (*iommu)->default_dom;
1402 *domain = &dma_dom->domain;
1403 attach_device(*iommu, *domain, *bdf);
1404 DUMP_printk("Using protection domain %d for device %s\n",
1405 (*domain)->id, dev_name(dev));
1406 }
1407
1408 if (domain_for_device(_bdf) == NULL)
1409 attach_device(*iommu, *domain, _bdf);
1410 1626
1411 return 1; 1627 return &dma_dom->domain;
1412} 1628}
1413 1629
1414static void update_device_table(struct protection_domain *domain) 1630static void update_device_table(struct protection_domain *domain)
1415{ 1631{
1416 unsigned long flags; 1632 struct iommu_dev_data *dev_data;
1417 int i;
1418 1633
1419 for (i = 0; i <= amd_iommu_last_bdf; ++i) { 1634 list_for_each_entry(dev_data, &domain->dev_list, list) {
1420 if (amd_iommu_pd_table[i] != domain) 1635 u16 devid = get_device_id(dev_data->dev);
1421 continue; 1636 set_dte_entry(devid, domain);
1422 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1423 set_dte_entry(i, domain);
1424 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1425 } 1637 }
1426} 1638}
1427 1639
@@ -1431,76 +1643,13 @@ static void update_domain(struct protection_domain *domain)
1431 return; 1643 return;
1432 1644
1433 update_device_table(domain); 1645 update_device_table(domain);
1434 flush_devices_by_domain(domain); 1646 iommu_flush_domain_devices(domain);
1435 iommu_flush_domain(domain->id); 1647 iommu_flush_tlb_pde(domain);
1436 1648
1437 domain->updated = false; 1649 domain->updated = false;
1438} 1650}
1439 1651
1440/* 1652/*
1441 * This function is used to add another level to an IO page table. Adding
1442 * another level increases the size of the address space by 9 bits to a size up
1443 * to 64 bits.
1444 */
1445static bool increase_address_space(struct protection_domain *domain,
1446 gfp_t gfp)
1447{
1448 u64 *pte;
1449
1450 if (domain->mode == PAGE_MODE_6_LEVEL)
1451 /* address space already 64 bit large */
1452 return false;
1453
1454 pte = (void *)get_zeroed_page(gfp);
1455 if (!pte)
1456 return false;
1457
1458 *pte = PM_LEVEL_PDE(domain->mode,
1459 virt_to_phys(domain->pt_root));
1460 domain->pt_root = pte;
1461 domain->mode += 1;
1462 domain->updated = true;
1463
1464 return true;
1465}
1466
1467static u64 *alloc_pte(struct protection_domain *domain,
1468 unsigned long address,
1469 int end_lvl,
1470 u64 **pte_page,
1471 gfp_t gfp)
1472{
1473 u64 *pte, *page;
1474 int level;
1475
1476 while (address > PM_LEVEL_SIZE(domain->mode))
1477 increase_address_space(domain, gfp);
1478
1479 level = domain->mode - 1;
1480 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1481
1482 while (level > end_lvl) {
1483 if (!IOMMU_PTE_PRESENT(*pte)) {
1484 page = (u64 *)get_zeroed_page(gfp);
1485 if (!page)
1486 return NULL;
1487 *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
1488 }
1489
1490 level -= 1;
1491
1492 pte = IOMMU_PTE_PAGE(*pte);
1493
1494 if (pte_page && level == end_lvl)
1495 *pte_page = pte;
1496
1497 pte = &pte[PM_LEVEL_INDEX(level, address)];
1498 }
1499
1500 return pte;
1501}
1502
1503/*
1504 * This function fetches the PTE for a given address in the aperture 1653 * This function fetches the PTE for a given address in the aperture
1505 */ 1654 */
1506static u64* dma_ops_get_pte(struct dma_ops_domain *dom, 1655static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
@@ -1530,8 +1679,7 @@ static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
1530 * This is the generic map function. It maps one 4kb page at paddr to 1679 * This is the generic map function. It maps one 4kb page at paddr to
1531 * the given address in the DMA address space for the domain. 1680 * the given address in the DMA address space for the domain.
1532 */ 1681 */
1533static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu, 1682static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
1534 struct dma_ops_domain *dom,
1535 unsigned long address, 1683 unsigned long address,
1536 phys_addr_t paddr, 1684 phys_addr_t paddr,
1537 int direction) 1685 int direction)
@@ -1544,7 +1692,7 @@ static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,
1544 1692
1545 pte = dma_ops_get_pte(dom, address); 1693 pte = dma_ops_get_pte(dom, address);
1546 if (!pte) 1694 if (!pte)
1547 return bad_dma_address; 1695 return DMA_ERROR_CODE;
1548 1696
1549 __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; 1697 __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
1550 1698
@@ -1565,8 +1713,7 @@ static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu,
1565/* 1713/*
1566 * The generic unmapping function for on page in the DMA address space. 1714 * The generic unmapping function for on page in the DMA address space.
1567 */ 1715 */
1568static void dma_ops_domain_unmap(struct amd_iommu *iommu, 1716static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
1569 struct dma_ops_domain *dom,
1570 unsigned long address) 1717 unsigned long address)
1571{ 1718{
1572 struct aperture_range *aperture; 1719 struct aperture_range *aperture;
@@ -1597,7 +1744,6 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
1597 * Must be called with the domain lock held. 1744 * Must be called with the domain lock held.
1598 */ 1745 */
1599static dma_addr_t __map_single(struct device *dev, 1746static dma_addr_t __map_single(struct device *dev,
1600 struct amd_iommu *iommu,
1601 struct dma_ops_domain *dma_dom, 1747 struct dma_ops_domain *dma_dom,
1602 phys_addr_t paddr, 1748 phys_addr_t paddr,
1603 size_t size, 1749 size_t size,
@@ -1625,7 +1771,7 @@ static dma_addr_t __map_single(struct device *dev,
1625retry: 1771retry:
1626 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask, 1772 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
1627 dma_mask); 1773 dma_mask);
1628 if (unlikely(address == bad_dma_address)) { 1774 if (unlikely(address == DMA_ERROR_CODE)) {
1629 /* 1775 /*
1630 * setting next_address here will let the address 1776 * setting next_address here will let the address
1631 * allocator only scan the new allocated range in the 1777 * allocator only scan the new allocated range in the
@@ -1633,7 +1779,7 @@ retry:
1633 */ 1779 */
1634 dma_dom->next_address = dma_dom->aperture_size; 1780 dma_dom->next_address = dma_dom->aperture_size;
1635 1781
1636 if (alloc_new_range(iommu, dma_dom, false, GFP_ATOMIC)) 1782 if (alloc_new_range(dma_dom, false, GFP_ATOMIC))
1637 goto out; 1783 goto out;
1638 1784
1639 /* 1785 /*
@@ -1645,8 +1791,8 @@ retry:
1645 1791
1646 start = address; 1792 start = address;
1647 for (i = 0; i < pages; ++i) { 1793 for (i = 0; i < pages; ++i) {
1648 ret = dma_ops_domain_map(iommu, dma_dom, start, paddr, dir); 1794 ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
1649 if (ret == bad_dma_address) 1795 if (ret == DMA_ERROR_CODE)
1650 goto out_unmap; 1796 goto out_unmap;
1651 1797
1652 paddr += PAGE_SIZE; 1798 paddr += PAGE_SIZE;
@@ -1657,10 +1803,10 @@ retry:
1657 ADD_STATS_COUNTER(alloced_io_mem, size); 1803 ADD_STATS_COUNTER(alloced_io_mem, size);
1658 1804
1659 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { 1805 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
1660 iommu_flush_tlb(iommu, dma_dom->domain.id); 1806 iommu_flush_tlb(&dma_dom->domain);
1661 dma_dom->need_flush = false; 1807 dma_dom->need_flush = false;
1662 } else if (unlikely(iommu_has_npcache(iommu))) 1808 } else if (unlikely(amd_iommu_np_cache))
1663 iommu_flush_pages(iommu, dma_dom->domain.id, address, size); 1809 iommu_flush_pages(&dma_dom->domain, address, size);
1664 1810
1665out: 1811out:
1666 return address; 1812 return address;
@@ -1669,20 +1815,19 @@ out_unmap:
1669 1815
1670 for (--i; i >= 0; --i) { 1816 for (--i; i >= 0; --i) {
1671 start -= PAGE_SIZE; 1817 start -= PAGE_SIZE;
1672 dma_ops_domain_unmap(iommu, dma_dom, start); 1818 dma_ops_domain_unmap(dma_dom, start);
1673 } 1819 }
1674 1820
1675 dma_ops_free_addresses(dma_dom, address, pages); 1821 dma_ops_free_addresses(dma_dom, address, pages);
1676 1822
1677 return bad_dma_address; 1823 return DMA_ERROR_CODE;
1678} 1824}
1679 1825
1680/* 1826/*
1681 * Does the reverse of the __map_single function. Must be called with 1827 * Does the reverse of the __map_single function. Must be called with
1682 * the domain lock held too 1828 * the domain lock held too
1683 */ 1829 */
1684static void __unmap_single(struct amd_iommu *iommu, 1830static void __unmap_single(struct dma_ops_domain *dma_dom,
1685 struct dma_ops_domain *dma_dom,
1686 dma_addr_t dma_addr, 1831 dma_addr_t dma_addr,
1687 size_t size, 1832 size_t size,
1688 int dir) 1833 int dir)
@@ -1690,7 +1835,7 @@ static void __unmap_single(struct amd_iommu *iommu,
1690 dma_addr_t i, start; 1835 dma_addr_t i, start;
1691 unsigned int pages; 1836 unsigned int pages;
1692 1837
1693 if ((dma_addr == bad_dma_address) || 1838 if ((dma_addr == DMA_ERROR_CODE) ||
1694 (dma_addr + size > dma_dom->aperture_size)) 1839 (dma_addr + size > dma_dom->aperture_size))
1695 return; 1840 return;
1696 1841
@@ -1699,7 +1844,7 @@ static void __unmap_single(struct amd_iommu *iommu,
1699 start = dma_addr; 1844 start = dma_addr;
1700 1845
1701 for (i = 0; i < pages; ++i) { 1846 for (i = 0; i < pages; ++i) {
1702 dma_ops_domain_unmap(iommu, dma_dom, start); 1847 dma_ops_domain_unmap(dma_dom, start);
1703 start += PAGE_SIZE; 1848 start += PAGE_SIZE;
1704 } 1849 }
1705 1850
@@ -1708,7 +1853,7 @@ static void __unmap_single(struct amd_iommu *iommu,
1708 dma_ops_free_addresses(dma_dom, dma_addr, pages); 1853 dma_ops_free_addresses(dma_dom, dma_addr, pages);
1709 1854
1710 if (amd_iommu_unmap_flush || dma_dom->need_flush) { 1855 if (amd_iommu_unmap_flush || dma_dom->need_flush) {
1711 iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size); 1856 iommu_flush_pages(&dma_dom->domain, dma_addr, size);
1712 dma_dom->need_flush = false; 1857 dma_dom->need_flush = false;
1713 } 1858 }
1714} 1859}
@@ -1722,36 +1867,29 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
1722 struct dma_attrs *attrs) 1867 struct dma_attrs *attrs)
1723{ 1868{
1724 unsigned long flags; 1869 unsigned long flags;
1725 struct amd_iommu *iommu;
1726 struct protection_domain *domain; 1870 struct protection_domain *domain;
1727 u16 devid;
1728 dma_addr_t addr; 1871 dma_addr_t addr;
1729 u64 dma_mask; 1872 u64 dma_mask;
1730 phys_addr_t paddr = page_to_phys(page) + offset; 1873 phys_addr_t paddr = page_to_phys(page) + offset;
1731 1874
1732 INC_STATS_COUNTER(cnt_map_single); 1875 INC_STATS_COUNTER(cnt_map_single);
1733 1876
1734 if (!check_device(dev)) 1877 domain = get_domain(dev);
1735 return bad_dma_address; 1878 if (PTR_ERR(domain) == -EINVAL)
1736
1737 dma_mask = *dev->dma_mask;
1738
1739 get_device_resources(dev, &iommu, &domain, &devid);
1740
1741 if (iommu == NULL || domain == NULL)
1742 /* device not handled by any AMD IOMMU */
1743 return (dma_addr_t)paddr; 1879 return (dma_addr_t)paddr;
1880 else if (IS_ERR(domain))
1881 return DMA_ERROR_CODE;
1744 1882
1745 if (!dma_ops_domain(domain)) 1883 dma_mask = *dev->dma_mask;
1746 return bad_dma_address;
1747 1884
1748 spin_lock_irqsave(&domain->lock, flags); 1885 spin_lock_irqsave(&domain->lock, flags);
1749 addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false, 1886
1887 addr = __map_single(dev, domain->priv, paddr, size, dir, false,
1750 dma_mask); 1888 dma_mask);
1751 if (addr == bad_dma_address) 1889 if (addr == DMA_ERROR_CODE)
1752 goto out; 1890 goto out;
1753 1891
1754 iommu_completion_wait(iommu); 1892 iommu_flush_complete(domain);
1755 1893
1756out: 1894out:
1757 spin_unlock_irqrestore(&domain->lock, flags); 1895 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1766,25 +1904,19 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
1766 enum dma_data_direction dir, struct dma_attrs *attrs) 1904 enum dma_data_direction dir, struct dma_attrs *attrs)
1767{ 1905{
1768 unsigned long flags; 1906 unsigned long flags;
1769 struct amd_iommu *iommu;
1770 struct protection_domain *domain; 1907 struct protection_domain *domain;
1771 u16 devid;
1772 1908
1773 INC_STATS_COUNTER(cnt_unmap_single); 1909 INC_STATS_COUNTER(cnt_unmap_single);
1774 1910
1775 if (!check_device(dev) || 1911 domain = get_domain(dev);
1776 !get_device_resources(dev, &iommu, &domain, &devid)) 1912 if (IS_ERR(domain))
1777 /* device not handled by any AMD IOMMU */
1778 return;
1779
1780 if (!dma_ops_domain(domain))
1781 return; 1913 return;
1782 1914
1783 spin_lock_irqsave(&domain->lock, flags); 1915 spin_lock_irqsave(&domain->lock, flags);
1784 1916
1785 __unmap_single(iommu, domain->priv, dma_addr, size, dir); 1917 __unmap_single(domain->priv, dma_addr, size, dir);
1786 1918
1787 iommu_completion_wait(iommu); 1919 iommu_flush_complete(domain);
1788 1920
1789 spin_unlock_irqrestore(&domain->lock, flags); 1921 spin_unlock_irqrestore(&domain->lock, flags);
1790} 1922}
@@ -1816,9 +1948,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
1816 struct dma_attrs *attrs) 1948 struct dma_attrs *attrs)
1817{ 1949{
1818 unsigned long flags; 1950 unsigned long flags;
1819 struct amd_iommu *iommu;
1820 struct protection_domain *domain; 1951 struct protection_domain *domain;
1821 u16 devid;
1822 int i; 1952 int i;
1823 struct scatterlist *s; 1953 struct scatterlist *s;
1824 phys_addr_t paddr; 1954 phys_addr_t paddr;
@@ -1827,25 +1957,20 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
1827 1957
1828 INC_STATS_COUNTER(cnt_map_sg); 1958 INC_STATS_COUNTER(cnt_map_sg);
1829 1959
1830 if (!check_device(dev)) 1960 domain = get_domain(dev);
1961 if (PTR_ERR(domain) == -EINVAL)
1962 return map_sg_no_iommu(dev, sglist, nelems, dir);
1963 else if (IS_ERR(domain))
1831 return 0; 1964 return 0;
1832 1965
1833 dma_mask = *dev->dma_mask; 1966 dma_mask = *dev->dma_mask;
1834 1967
1835 get_device_resources(dev, &iommu, &domain, &devid);
1836
1837 if (!iommu || !domain)
1838 return map_sg_no_iommu(dev, sglist, nelems, dir);
1839
1840 if (!dma_ops_domain(domain))
1841 return 0;
1842
1843 spin_lock_irqsave(&domain->lock, flags); 1968 spin_lock_irqsave(&domain->lock, flags);
1844 1969
1845 for_each_sg(sglist, s, nelems, i) { 1970 for_each_sg(sglist, s, nelems, i) {
1846 paddr = sg_phys(s); 1971 paddr = sg_phys(s);
1847 1972
1848 s->dma_address = __map_single(dev, iommu, domain->priv, 1973 s->dma_address = __map_single(dev, domain->priv,
1849 paddr, s->length, dir, false, 1974 paddr, s->length, dir, false,
1850 dma_mask); 1975 dma_mask);
1851 1976
@@ -1856,7 +1981,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
1856 goto unmap; 1981 goto unmap;
1857 } 1982 }
1858 1983
1859 iommu_completion_wait(iommu); 1984 iommu_flush_complete(domain);
1860 1985
1861out: 1986out:
1862 spin_unlock_irqrestore(&domain->lock, flags); 1987 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1865,7 +1990,7 @@ out:
1865unmap: 1990unmap:
1866 for_each_sg(sglist, s, mapped_elems, i) { 1991 for_each_sg(sglist, s, mapped_elems, i) {
1867 if (s->dma_address) 1992 if (s->dma_address)
1868 __unmap_single(iommu, domain->priv, s->dma_address, 1993 __unmap_single(domain->priv, s->dma_address,
1869 s->dma_length, dir); 1994 s->dma_length, dir);
1870 s->dma_address = s->dma_length = 0; 1995 s->dma_address = s->dma_length = 0;
1871 } 1996 }
@@ -1884,30 +2009,25 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
1884 struct dma_attrs *attrs) 2009 struct dma_attrs *attrs)
1885{ 2010{
1886 unsigned long flags; 2011 unsigned long flags;
1887 struct amd_iommu *iommu;
1888 struct protection_domain *domain; 2012 struct protection_domain *domain;
1889 struct scatterlist *s; 2013 struct scatterlist *s;
1890 u16 devid;
1891 int i; 2014 int i;
1892 2015
1893 INC_STATS_COUNTER(cnt_unmap_sg); 2016 INC_STATS_COUNTER(cnt_unmap_sg);
1894 2017
1895 if (!check_device(dev) || 2018 domain = get_domain(dev);
1896 !get_device_resources(dev, &iommu, &domain, &devid)) 2019 if (IS_ERR(domain))
1897 return;
1898
1899 if (!dma_ops_domain(domain))
1900 return; 2020 return;
1901 2021
1902 spin_lock_irqsave(&domain->lock, flags); 2022 spin_lock_irqsave(&domain->lock, flags);
1903 2023
1904 for_each_sg(sglist, s, nelems, i) { 2024 for_each_sg(sglist, s, nelems, i) {
1905 __unmap_single(iommu, domain->priv, s->dma_address, 2025 __unmap_single(domain->priv, s->dma_address,
1906 s->dma_length, dir); 2026 s->dma_length, dir);
1907 s->dma_address = s->dma_length = 0; 2027 s->dma_address = s->dma_length = 0;
1908 } 2028 }
1909 2029
1910 iommu_completion_wait(iommu); 2030 iommu_flush_complete(domain);
1911 2031
1912 spin_unlock_irqrestore(&domain->lock, flags); 2032 spin_unlock_irqrestore(&domain->lock, flags);
1913} 2033}
@@ -1920,49 +2040,44 @@ static void *alloc_coherent(struct device *dev, size_t size,
1920{ 2040{
1921 unsigned long flags; 2041 unsigned long flags;
1922 void *virt_addr; 2042 void *virt_addr;
1923 struct amd_iommu *iommu;
1924 struct protection_domain *domain; 2043 struct protection_domain *domain;
1925 u16 devid;
1926 phys_addr_t paddr; 2044 phys_addr_t paddr;
1927 u64 dma_mask = dev->coherent_dma_mask; 2045 u64 dma_mask = dev->coherent_dma_mask;
1928 2046
1929 INC_STATS_COUNTER(cnt_alloc_coherent); 2047 INC_STATS_COUNTER(cnt_alloc_coherent);
1930 2048
1931 if (!check_device(dev)) 2049 domain = get_domain(dev);
2050 if (PTR_ERR(domain) == -EINVAL) {
2051 virt_addr = (void *)__get_free_pages(flag, get_order(size));
2052 *dma_addr = __pa(virt_addr);
2053 return virt_addr;
2054 } else if (IS_ERR(domain))
1932 return NULL; 2055 return NULL;
1933 2056
1934 if (!get_device_resources(dev, &iommu, &domain, &devid)) 2057 dma_mask = dev->coherent_dma_mask;
1935 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 2058 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
2059 flag |= __GFP_ZERO;
1936 2060
1937 flag |= __GFP_ZERO;
1938 virt_addr = (void *)__get_free_pages(flag, get_order(size)); 2061 virt_addr = (void *)__get_free_pages(flag, get_order(size));
1939 if (!virt_addr) 2062 if (!virt_addr)
1940 return NULL; 2063 return NULL;
1941 2064
1942 paddr = virt_to_phys(virt_addr); 2065 paddr = virt_to_phys(virt_addr);
1943 2066
1944 if (!iommu || !domain) {
1945 *dma_addr = (dma_addr_t)paddr;
1946 return virt_addr;
1947 }
1948
1949 if (!dma_ops_domain(domain))
1950 goto out_free;
1951
1952 if (!dma_mask) 2067 if (!dma_mask)
1953 dma_mask = *dev->dma_mask; 2068 dma_mask = *dev->dma_mask;
1954 2069
1955 spin_lock_irqsave(&domain->lock, flags); 2070 spin_lock_irqsave(&domain->lock, flags);
1956 2071
1957 *dma_addr = __map_single(dev, iommu, domain->priv, paddr, 2072 *dma_addr = __map_single(dev, domain->priv, paddr,
1958 size, DMA_BIDIRECTIONAL, true, dma_mask); 2073 size, DMA_BIDIRECTIONAL, true, dma_mask);
1959 2074
1960 if (*dma_addr == bad_dma_address) { 2075 if (*dma_addr == DMA_ERROR_CODE) {
1961 spin_unlock_irqrestore(&domain->lock, flags); 2076 spin_unlock_irqrestore(&domain->lock, flags);
1962 goto out_free; 2077 goto out_free;
1963 } 2078 }
1964 2079
1965 iommu_completion_wait(iommu); 2080 iommu_flush_complete(domain);
1966 2081
1967 spin_unlock_irqrestore(&domain->lock, flags); 2082 spin_unlock_irqrestore(&domain->lock, flags);
1968 2083
@@ -1982,28 +2097,19 @@ static void free_coherent(struct device *dev, size_t size,
1982 void *virt_addr, dma_addr_t dma_addr) 2097 void *virt_addr, dma_addr_t dma_addr)
1983{ 2098{
1984 unsigned long flags; 2099 unsigned long flags;
1985 struct amd_iommu *iommu;
1986 struct protection_domain *domain; 2100 struct protection_domain *domain;
1987 u16 devid;
1988 2101
1989 INC_STATS_COUNTER(cnt_free_coherent); 2102 INC_STATS_COUNTER(cnt_free_coherent);
1990 2103
1991 if (!check_device(dev)) 2104 domain = get_domain(dev);
1992 return; 2105 if (IS_ERR(domain))
1993
1994 get_device_resources(dev, &iommu, &domain, &devid);
1995
1996 if (!iommu || !domain)
1997 goto free_mem;
1998
1999 if (!dma_ops_domain(domain))
2000 goto free_mem; 2106 goto free_mem;
2001 2107
2002 spin_lock_irqsave(&domain->lock, flags); 2108 spin_lock_irqsave(&domain->lock, flags);
2003 2109
2004 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); 2110 __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
2005 2111
2006 iommu_completion_wait(iommu); 2112 iommu_flush_complete(domain);
2007 2113
2008 spin_unlock_irqrestore(&domain->lock, flags); 2114 spin_unlock_irqrestore(&domain->lock, flags);
2009 2115
@@ -2017,22 +2123,7 @@ free_mem:
2017 */ 2123 */
2018static int amd_iommu_dma_supported(struct device *dev, u64 mask) 2124static int amd_iommu_dma_supported(struct device *dev, u64 mask)
2019{ 2125{
2020 u16 bdf; 2126 return check_device(dev);
2021 struct pci_dev *pcidev;
2022
2023 /* No device or no PCI device */
2024 if (!dev || dev->bus != &pci_bus_type)
2025 return 0;
2026
2027 pcidev = to_pci_dev(dev);
2028
2029 bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
2030
2031 /* Out of our scope? */
2032 if (bdf > amd_iommu_last_bdf)
2033 return 0;
2034
2035 return 1;
2036} 2127}
2037 2128
2038/* 2129/*
@@ -2046,25 +2137,30 @@ static void prealloc_protection_domains(void)
2046{ 2137{
2047 struct pci_dev *dev = NULL; 2138 struct pci_dev *dev = NULL;
2048 struct dma_ops_domain *dma_dom; 2139 struct dma_ops_domain *dma_dom;
2049 struct amd_iommu *iommu;
2050 u16 devid; 2140 u16 devid;
2051 2141
2052 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 2142 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
2053 devid = calc_devid(dev->bus->number, dev->devfn); 2143
2054 if (devid > amd_iommu_last_bdf) 2144 /* Do we handle this device? */
2055 continue; 2145 if (!check_device(&dev->dev))
2056 devid = amd_iommu_alias_table[devid];
2057 if (domain_for_device(devid))
2058 continue; 2146 continue;
2059 iommu = amd_iommu_rlookup_table[devid]; 2147
2060 if (!iommu) 2148 iommu_init_device(&dev->dev);
2149
2150 /* Is there already any domain for it? */
2151 if (domain_for_device(&dev->dev))
2061 continue; 2152 continue;
2062 dma_dom = dma_ops_domain_alloc(iommu); 2153
2154 devid = get_device_id(&dev->dev);
2155
2156 dma_dom = dma_ops_domain_alloc();
2063 if (!dma_dom) 2157 if (!dma_dom)
2064 continue; 2158 continue;
2065 init_unity_mappings_for_device(dma_dom, devid); 2159 init_unity_mappings_for_device(dma_dom, devid);
2066 dma_dom->target_dev = devid; 2160 dma_dom->target_dev = devid;
2067 2161
2162 attach_device(&dev->dev, &dma_dom->domain);
2163
2068 list_add_tail(&dma_dom->list, &iommu_pd_list); 2164 list_add_tail(&dma_dom->list, &iommu_pd_list);
2069 } 2165 }
2070} 2166}
@@ -2093,7 +2189,7 @@ int __init amd_iommu_init_dma_ops(void)
2093 * protection domain will be assigned to the default one. 2189 * protection domain will be assigned to the default one.
2094 */ 2190 */
2095 for_each_iommu(iommu) { 2191 for_each_iommu(iommu) {
2096 iommu->default_dom = dma_ops_domain_alloc(iommu); 2192 iommu->default_dom = dma_ops_domain_alloc();
2097 if (iommu->default_dom == NULL) 2193 if (iommu->default_dom == NULL)
2098 return -ENOMEM; 2194 return -ENOMEM;
2099 iommu->default_dom->domain.flags |= PD_DEFAULT_MASK; 2195 iommu->default_dom->domain.flags |= PD_DEFAULT_MASK;
@@ -2103,15 +2199,12 @@ int __init amd_iommu_init_dma_ops(void)
2103 } 2199 }
2104 2200
2105 /* 2201 /*
2106 * If device isolation is enabled, pre-allocate the protection 2202 * Pre-allocate the protection domains for each device.
2107 * domains for each device.
2108 */ 2203 */
2109 if (amd_iommu_isolate) 2204 prealloc_protection_domains();
2110 prealloc_protection_domains();
2111 2205
2112 iommu_detected = 1; 2206 iommu_detected = 1;
2113 force_iommu = 1; 2207 swiotlb = 0;
2114 bad_dma_address = 0;
2115#ifdef CONFIG_GART_IOMMU 2208#ifdef CONFIG_GART_IOMMU
2116 gart_iommu_aperture_disabled = 1; 2209 gart_iommu_aperture_disabled = 1;
2117 gart_iommu_aperture = 0; 2210 gart_iommu_aperture = 0;
@@ -2150,14 +2243,17 @@ free_domains:
2150 2243
2151static void cleanup_domain(struct protection_domain *domain) 2244static void cleanup_domain(struct protection_domain *domain)
2152{ 2245{
2246 struct iommu_dev_data *dev_data, *next;
2153 unsigned long flags; 2247 unsigned long flags;
2154 u16 devid;
2155 2248
2156 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 2249 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2157 2250
2158 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) 2251 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
2159 if (amd_iommu_pd_table[devid] == domain) 2252 struct device *dev = dev_data->dev;
2160 __detach_device(domain, devid); 2253
2254 do_detach(dev);
2255 atomic_set(&dev_data->bind, 0);
2256 }
2161 2257
2162 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 2258 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2163} 2259}
@@ -2167,6 +2263,8 @@ static void protection_domain_free(struct protection_domain *domain)
2167 if (!domain) 2263 if (!domain)
2168 return; 2264 return;
2169 2265
2266 del_domain_from_list(domain);
2267
2170 if (domain->id) 2268 if (domain->id)
2171 domain_id_free(domain->id); 2269 domain_id_free(domain->id);
2172 2270
@@ -2185,6 +2283,9 @@ static struct protection_domain *protection_domain_alloc(void)
2185 domain->id = domain_id_alloc(); 2283 domain->id = domain_id_alloc();
2186 if (!domain->id) 2284 if (!domain->id)
2187 goto out_err; 2285 goto out_err;
2286 INIT_LIST_HEAD(&domain->dev_list);
2287
2288 add_domain_to_list(domain);
2188 2289
2189 return domain; 2290 return domain;
2190 2291
@@ -2241,26 +2342,23 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
2241static void amd_iommu_detach_device(struct iommu_domain *dom, 2342static void amd_iommu_detach_device(struct iommu_domain *dom,
2242 struct device *dev) 2343 struct device *dev)
2243{ 2344{
2244 struct protection_domain *domain = dom->priv; 2345 struct iommu_dev_data *dev_data = dev->archdata.iommu;
2245 struct amd_iommu *iommu; 2346 struct amd_iommu *iommu;
2246 struct pci_dev *pdev;
2247 u16 devid; 2347 u16 devid;
2248 2348
2249 if (dev->bus != &pci_bus_type) 2349 if (!check_device(dev))
2250 return; 2350 return;
2251 2351
2252 pdev = to_pci_dev(dev); 2352 devid = get_device_id(dev);
2253
2254 devid = calc_devid(pdev->bus->number, pdev->devfn);
2255 2353
2256 if (devid > 0) 2354 if (dev_data->domain != NULL)
2257 detach_device(domain, devid); 2355 detach_device(dev);
2258 2356
2259 iommu = amd_iommu_rlookup_table[devid]; 2357 iommu = amd_iommu_rlookup_table[devid];
2260 if (!iommu) 2358 if (!iommu)
2261 return; 2359 return;
2262 2360
2263 iommu_queue_inv_dev_entry(iommu, devid); 2361 iommu_flush_device(dev);
2264 iommu_completion_wait(iommu); 2362 iommu_completion_wait(iommu);
2265} 2363}
2266 2364
@@ -2268,35 +2366,30 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
2268 struct device *dev) 2366 struct device *dev)
2269{ 2367{
2270 struct protection_domain *domain = dom->priv; 2368 struct protection_domain *domain = dom->priv;
2271 struct protection_domain *old_domain; 2369 struct iommu_dev_data *dev_data;
2272 struct amd_iommu *iommu; 2370 struct amd_iommu *iommu;
2273 struct pci_dev *pdev; 2371 int ret;
2274 u16 devid; 2372 u16 devid;
2275 2373
2276 if (dev->bus != &pci_bus_type) 2374 if (!check_device(dev))
2277 return -EINVAL; 2375 return -EINVAL;
2278 2376
2279 pdev = to_pci_dev(dev); 2377 dev_data = dev->archdata.iommu;
2280 2378
2281 devid = calc_devid(pdev->bus->number, pdev->devfn); 2379 devid = get_device_id(dev);
2282
2283 if (devid >= amd_iommu_last_bdf ||
2284 devid != amd_iommu_alias_table[devid])
2285 return -EINVAL;
2286 2380
2287 iommu = amd_iommu_rlookup_table[devid]; 2381 iommu = amd_iommu_rlookup_table[devid];
2288 if (!iommu) 2382 if (!iommu)
2289 return -EINVAL; 2383 return -EINVAL;
2290 2384
2291 old_domain = domain_for_device(devid); 2385 if (dev_data->domain)
2292 if (old_domain) 2386 detach_device(dev);
2293 detach_device(old_domain, devid);
2294 2387
2295 attach_device(iommu, domain, devid); 2388 ret = attach_device(dev, domain);
2296 2389
2297 iommu_completion_wait(iommu); 2390 iommu_completion_wait(iommu);
2298 2391
2299 return 0; 2392 return ret;
2300} 2393}
2301 2394
2302static int amd_iommu_map_range(struct iommu_domain *dom, 2395static int amd_iommu_map_range(struct iommu_domain *dom,
@@ -2342,7 +2435,7 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom,
2342 iova += PAGE_SIZE; 2435 iova += PAGE_SIZE;
2343 } 2436 }
2344 2437
2345 iommu_flush_domain(domain->id); 2438 iommu_flush_tlb_pde(domain);
2346} 2439}
2347 2440
2348static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, 2441static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
@@ -2393,8 +2486,9 @@ static struct iommu_ops amd_iommu_ops = {
2393 2486
2394int __init amd_iommu_init_passthrough(void) 2487int __init amd_iommu_init_passthrough(void)
2395{ 2488{
2489 struct amd_iommu *iommu;
2396 struct pci_dev *dev = NULL; 2490 struct pci_dev *dev = NULL;
2397 u16 devid, devid2; 2491 u16 devid;
2398 2492
2399 /* allocate passthroug domain */ 2493 /* allocate passthroug domain */
2400 pt_domain = protection_domain_alloc(); 2494 pt_domain = protection_domain_alloc();
@@ -2404,20 +2498,17 @@ int __init amd_iommu_init_passthrough(void)
2404 pt_domain->mode |= PAGE_MODE_NONE; 2498 pt_domain->mode |= PAGE_MODE_NONE;
2405 2499
2406 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 2500 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
2407 struct amd_iommu *iommu;
2408 2501
2409 devid = calc_devid(dev->bus->number, dev->devfn); 2502 if (!check_device(&dev->dev))
2410 if (devid > amd_iommu_last_bdf)
2411 continue; 2503 continue;
2412 2504
2413 devid2 = amd_iommu_alias_table[devid]; 2505 devid = get_device_id(&dev->dev);
2414 2506
2415 iommu = amd_iommu_rlookup_table[devid2]; 2507 iommu = amd_iommu_rlookup_table[devid];
2416 if (!iommu) 2508 if (!iommu)
2417 continue; 2509 continue;
2418 2510
2419 __attach_device(iommu, pt_domain, devid); 2511 attach_device(&dev->dev, pt_domain);
2420 __attach_device(iommu, pt_domain, devid2);
2421 } 2512 }
2422 2513
2423 pr_info("AMD-Vi: Initialized for Passthrough Mode\n"); 2514 pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index c20001e4f556..7ffc39965233 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 2 * Copyright (C) 2007-2009 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com> 3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 * Leo Duran <leo.duran@amd.com> 4 * Leo Duran <leo.duran@amd.com>
5 * 5 *
@@ -25,10 +25,12 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/msi.h> 26#include <linux/msi.h>
27#include <asm/pci-direct.h> 27#include <asm/pci-direct.h>
28#include <asm/amd_iommu_proto.h>
28#include <asm/amd_iommu_types.h> 29#include <asm/amd_iommu_types.h>
29#include <asm/amd_iommu.h> 30#include <asm/amd_iommu.h>
30#include <asm/iommu.h> 31#include <asm/iommu.h>
31#include <asm/gart.h> 32#include <asm/gart.h>
33#include <asm/x86_init.h>
32 34
33/* 35/*
34 * definitions for the ACPI scanning code 36 * definitions for the ACPI scanning code
@@ -123,18 +125,24 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have
123 to handle */ 125 to handle */
124LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings 126LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
125 we find in ACPI */ 127 we find in ACPI */
126#ifdef CONFIG_IOMMU_STRESS
127bool amd_iommu_isolate = false;
128#else
129bool amd_iommu_isolate = true; /* if true, device isolation is
130 enabled */
131#endif
132
133bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ 128bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
134 129
135LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the 130LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
136 system */ 131 system */
137 132
133/* Array to assign indices to IOMMUs*/
134struct amd_iommu *amd_iommus[MAX_IOMMUS];
135int amd_iommus_present;
136
137/* IOMMUs have a non-present cache? */
138bool amd_iommu_np_cache __read_mostly;
139
140/*
141 * List of protection domains - used during resume
142 */
143LIST_HEAD(amd_iommu_pd_list);
144spinlock_t amd_iommu_pd_lock;
145
138/* 146/*
139 * Pointer to the device table which is shared by all AMD IOMMUs 147 * Pointer to the device table which is shared by all AMD IOMMUs
140 * it is indexed by the PCI device id or the HT unit id and contains 148 * it is indexed by the PCI device id or the HT unit id and contains
@@ -157,12 +165,6 @@ u16 *amd_iommu_alias_table;
157struct amd_iommu **amd_iommu_rlookup_table; 165struct amd_iommu **amd_iommu_rlookup_table;
158 166
159/* 167/*
160 * The pd table (protection domain table) is used to find the protection domain
161 * data structure a device belongs to. Indexed with the PCI device id too.
162 */
163struct protection_domain **amd_iommu_pd_table;
164
165/*
166 * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap 168 * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
167 * to know which ones are already in use. 169 * to know which ones are already in use.
168 */ 170 */
@@ -838,7 +840,18 @@ static void __init free_iommu_all(void)
838static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) 840static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
839{ 841{
840 spin_lock_init(&iommu->lock); 842 spin_lock_init(&iommu->lock);
843
844 /* Add IOMMU to internal data structures */
841 list_add_tail(&iommu->list, &amd_iommu_list); 845 list_add_tail(&iommu->list, &amd_iommu_list);
846 iommu->index = amd_iommus_present++;
847
848 if (unlikely(iommu->index >= MAX_IOMMUS)) {
849 WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n");
850 return -ENOSYS;
851 }
852
853 /* Index is fine - add IOMMU to the array */
854 amd_iommus[iommu->index] = iommu;
842 855
843 /* 856 /*
844 * Copy data from ACPI table entry to the iommu struct 857 * Copy data from ACPI table entry to the iommu struct
@@ -868,6 +881,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
868 init_iommu_from_acpi(iommu, h); 881 init_iommu_from_acpi(iommu, h);
869 init_iommu_devices(iommu); 882 init_iommu_devices(iommu);
870 883
884 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
885 amd_iommu_np_cache = true;
886
871 return pci_enable_device(iommu->dev); 887 return pci_enable_device(iommu->dev);
872} 888}
873 889
@@ -925,7 +941,7 @@ static int __init init_iommu_all(struct acpi_table_header *table)
925 * 941 *
926 ****************************************************************************/ 942 ****************************************************************************/
927 943
928static int __init iommu_setup_msi(struct amd_iommu *iommu) 944static int iommu_setup_msi(struct amd_iommu *iommu)
929{ 945{
930 int r; 946 int r;
931 947
@@ -1176,19 +1192,10 @@ static struct sys_device device_amd_iommu = {
1176 * functions. Finally it prints some information about AMD IOMMUs and 1192 * functions. Finally it prints some information about AMD IOMMUs and
1177 * the driver state and enables the hardware. 1193 * the driver state and enables the hardware.
1178 */ 1194 */
1179int __init amd_iommu_init(void) 1195static int __init amd_iommu_init(void)
1180{ 1196{
1181 int i, ret = 0; 1197 int i, ret = 0;
1182 1198
1183
1184 if (no_iommu) {
1185 printk(KERN_INFO "AMD-Vi disabled by kernel command line\n");
1186 return 0;
1187 }
1188
1189 if (!amd_iommu_detected)
1190 return -ENODEV;
1191
1192 /* 1199 /*
1193 * First parse ACPI tables to find the largest Bus/Dev/Func 1200 * First parse ACPI tables to find the largest Bus/Dev/Func
1194 * we need to handle. Upon this information the shared data 1201 * we need to handle. Upon this information the shared data
@@ -1225,15 +1232,6 @@ int __init amd_iommu_init(void)
1225 if (amd_iommu_rlookup_table == NULL) 1232 if (amd_iommu_rlookup_table == NULL)
1226 goto free; 1233 goto free;
1227 1234
1228 /*
1229 * Protection Domain table - maps devices to protection domains
1230 * This table has the same size as the rlookup_table
1231 */
1232 amd_iommu_pd_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1233 get_order(rlookup_table_size));
1234 if (amd_iommu_pd_table == NULL)
1235 goto free;
1236
1237 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( 1235 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
1238 GFP_KERNEL | __GFP_ZERO, 1236 GFP_KERNEL | __GFP_ZERO,
1239 get_order(MAX_DOMAIN_ID/8)); 1237 get_order(MAX_DOMAIN_ID/8));
@@ -1255,6 +1253,8 @@ int __init amd_iommu_init(void)
1255 */ 1253 */
1256 amd_iommu_pd_alloc_bitmap[0] = 1; 1254 amd_iommu_pd_alloc_bitmap[0] = 1;
1257 1255
1256 spin_lock_init(&amd_iommu_pd_lock);
1257
1258 /* 1258 /*
1259 * now the data structures are allocated and basically initialized 1259 * now the data structures are allocated and basically initialized
1260 * start the real acpi table scan 1260 * start the real acpi table scan
@@ -1286,17 +1286,12 @@ int __init amd_iommu_init(void)
1286 if (iommu_pass_through) 1286 if (iommu_pass_through)
1287 goto out; 1287 goto out;
1288 1288
1289 printk(KERN_INFO "AMD-Vi: device isolation ");
1290 if (amd_iommu_isolate)
1291 printk("enabled\n");
1292 else
1293 printk("disabled\n");
1294
1295 if (amd_iommu_unmap_flush) 1289 if (amd_iommu_unmap_flush)
1296 printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); 1290 printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n");
1297 else 1291 else
1298 printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); 1292 printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
1299 1293
1294 x86_platform.iommu_shutdown = disable_iommus;
1300out: 1295out:
1301 return ret; 1296 return ret;
1302 1297
@@ -1304,9 +1299,6 @@ free:
1304 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, 1299 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
1305 get_order(MAX_DOMAIN_ID/8)); 1300 get_order(MAX_DOMAIN_ID/8));
1306 1301
1307 free_pages((unsigned long)amd_iommu_pd_table,
1308 get_order(rlookup_table_size));
1309
1310 free_pages((unsigned long)amd_iommu_rlookup_table, 1302 free_pages((unsigned long)amd_iommu_rlookup_table,
1311 get_order(rlookup_table_size)); 1303 get_order(rlookup_table_size));
1312 1304
@@ -1323,11 +1315,6 @@ free:
1323 goto out; 1315 goto out;
1324} 1316}
1325 1317
1326void amd_iommu_shutdown(void)
1327{
1328 disable_iommus();
1329}
1330
1331/**************************************************************************** 1318/****************************************************************************
1332 * 1319 *
1333 * Early detect code. This code runs at IOMMU detection time in the DMA 1320 * Early detect code. This code runs at IOMMU detection time in the DMA
@@ -1342,16 +1329,13 @@ static int __init early_amd_iommu_detect(struct acpi_table_header *table)
1342 1329
1343void __init amd_iommu_detect(void) 1330void __init amd_iommu_detect(void)
1344{ 1331{
1345 if (swiotlb || no_iommu || (iommu_detected && !gart_iommu_aperture)) 1332 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
1346 return; 1333 return;
1347 1334
1348 if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { 1335 if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) {
1349 iommu_detected = 1; 1336 iommu_detected = 1;
1350 amd_iommu_detected = 1; 1337 amd_iommu_detected = 1;
1351#ifdef CONFIG_GART_IOMMU 1338 x86_init.iommu.iommu_init = amd_iommu_init;
1352 gart_iommu_aperture_disabled = 1;
1353 gart_iommu_aperture = 0;
1354#endif
1355 } 1339 }
1356} 1340}
1357 1341
@@ -1372,10 +1356,6 @@ static int __init parse_amd_iommu_dump(char *str)
1372static int __init parse_amd_iommu_options(char *str) 1356static int __init parse_amd_iommu_options(char *str)
1373{ 1357{
1374 for (; *str; ++str) { 1358 for (; *str; ++str) {
1375 if (strncmp(str, "isolate", 7) == 0)
1376 amd_iommu_isolate = true;
1377 if (strncmp(str, "share", 5) == 0)
1378 amd_iommu_isolate = false;
1379 if (strncmp(str, "fullflush", 9) == 0) 1359 if (strncmp(str, "fullflush", 9) == 0)
1380 amd_iommu_unmap_flush = true; 1360 amd_iommu_unmap_flush = true;
1381 } 1361 }
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 128111d8ffe0..e0dfb6856aa2 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -28,6 +28,7 @@
28#include <asm/pci-direct.h> 28#include <asm/pci-direct.h>
29#include <asm/dma.h> 29#include <asm/dma.h>
30#include <asm/k8.h> 30#include <asm/k8.h>
31#include <asm/x86_init.h>
31 32
32int gart_iommu_aperture; 33int gart_iommu_aperture;
33int gart_iommu_aperture_disabled __initdata; 34int gart_iommu_aperture_disabled __initdata;
@@ -400,6 +401,7 @@ void __init gart_iommu_hole_init(void)
400 401
401 iommu_detected = 1; 402 iommu_detected = 1;
402 gart_iommu_aperture = 1; 403 gart_iommu_aperture = 1;
404 x86_init.iommu.iommu_init = gart_iommu_init;
403 405
404 aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7; 406 aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7;
405 aper_size = (32 * 1024 * 1024) << aper_order; 407 aper_size = (32 * 1024 * 1024) << aper_order;
@@ -456,7 +458,7 @@ out:
456 458
457 if (aper_alloc) { 459 if (aper_alloc) {
458 /* Got the aperture from the AGP bridge */ 460 /* Got the aperture from the AGP bridge */
459 } else if (swiotlb && !valid_agp) { 461 } else if (!valid_agp) {
460 /* Do nothing */ 462 /* Do nothing */
461 } else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) || 463 } else if ((!no_iommu && max_pfn > MAX_DMA32_PFN) ||
462 force_iommu || 464 force_iommu ||
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 7d5c3b0ea8da..8b581d3905cb 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -526,15 +526,21 @@ static const struct dmi_system_id sw_any_bug_dmi_table[] = {
526 526
527static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) 527static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
528{ 528{
529 /* http://www.intel.com/Assets/PDF/specupdate/314554.pdf 529 /* Intel Xeon Processor 7100 Series Specification Update
530 * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
530 * AL30: A Machine Check Exception (MCE) Occurring during an 531 * AL30: A Machine Check Exception (MCE) Occurring during an
531 * Enhanced Intel SpeedStep Technology Ratio Change May Cause 532 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
532 * Both Processor Cores to Lock Up when HT is enabled*/ 533 * Both Processor Cores to Lock Up. */
533 if (c->x86_vendor == X86_VENDOR_INTEL) { 534 if (c->x86_vendor == X86_VENDOR_INTEL) {
534 if ((c->x86 == 15) && 535 if ((c->x86 == 15) &&
535 (c->x86_model == 6) && 536 (c->x86_model == 6) &&
536 (c->x86_mask == 8) && smt_capable()) 537 (c->x86_mask == 8)) {
538 printk(KERN_INFO "acpi-cpufreq: Intel(R) "
539 "Xeon(R) 7100 Errata AL30, processors may "
540 "lock up on frequency changes: disabling "
541 "acpi-cpufreq.\n");
537 return -ENODEV; 542 return -ENODEV;
543 }
538 } 544 }
539 return 0; 545 return 0;
540} 546}
@@ -549,13 +555,18 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
549 unsigned int result = 0; 555 unsigned int result = 0;
550 struct cpuinfo_x86 *c = &cpu_data(policy->cpu); 556 struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
551 struct acpi_processor_performance *perf; 557 struct acpi_processor_performance *perf;
558#ifdef CONFIG_SMP
559 static int blacklisted;
560#endif
552 561
553 dprintk("acpi_cpufreq_cpu_init\n"); 562 dprintk("acpi_cpufreq_cpu_init\n");
554 563
555#ifdef CONFIG_SMP 564#ifdef CONFIG_SMP
556 result = acpi_cpufreq_blacklist(c); 565 if (blacklisted)
557 if (result) 566 return blacklisted;
558 return result; 567 blacklisted = acpi_cpufreq_blacklist(c);
568 if (blacklisted)
569 return blacklisted;
559#endif 570#endif
560 571
561 data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); 572 data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c
index ce2ed3e4aad9..cabd2fa3fc93 100644
--- a/arch/x86/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c
@@ -813,7 +813,7 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
813 memcpy(eblcr, samuel2_eblcr, sizeof(samuel2_eblcr)); 813 memcpy(eblcr, samuel2_eblcr, sizeof(samuel2_eblcr));
814 break; 814 break;
815 case 1 ... 15: 815 case 1 ... 15:
816 longhaul_version = TYPE_LONGHAUL_V1; 816 longhaul_version = TYPE_LONGHAUL_V2;
817 if (c->x86_mask < 8) { 817 if (c->x86_mask < 8) {
818 cpu_model = CPU_SAMUEL2; 818 cpu_model = CPU_SAMUEL2;
819 cpuname = "C3 'Samuel 2' [C5B]"; 819 cpuname = "C3 'Samuel 2' [C5B]";
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 6394aa5c7985..3f12dabeab52 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1022,7 +1022,7 @@ static int get_transition_latency(struct powernow_k8_data *data)
1022 * set it to 1 to avoid problems in the future. 1022 * set it to 1 to avoid problems in the future.
1023 * For all others it's a BIOS bug. 1023 * For all others it's a BIOS bug.
1024 */ 1024 */
1025 if (!boot_cpu_data.x86 == 0x11) 1025 if (boot_cpu_data.x86 != 0x11)
1026 printk(KERN_ERR FW_WARN PFX "Invalid zero transition " 1026 printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
1027 "latency\n"); 1027 "latency\n");
1028 max_latency = 1; 1028 max_latency = 1;
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
index 6911e91fb4f6..3ae5a7a3a500 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
@@ -232,28 +232,23 @@ static unsigned int speedstep_detect_chipset(void)
232 return 0; 232 return 0;
233} 233}
234 234
235struct get_freq_data { 235static void get_freq_data(void *_speed)
236 unsigned int speed;
237 unsigned int processor;
238};
239
240static void get_freq_data(void *_data)
241{ 236{
242 struct get_freq_data *data = _data; 237 unsigned int *speed = _speed;
243 238
244 data->speed = speedstep_get_frequency(data->processor); 239 *speed = speedstep_get_frequency(speedstep_processor);
245} 240}
246 241
247static unsigned int speedstep_get(unsigned int cpu) 242static unsigned int speedstep_get(unsigned int cpu)
248{ 243{
249 struct get_freq_data data = { .processor = cpu }; 244 unsigned int speed;
250 245
251 /* You're supposed to ensure CPU is online. */ 246 /* You're supposed to ensure CPU is online. */
252 if (smp_call_function_single(cpu, get_freq_data, &data, 1) != 0) 247 if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0)
253 BUG(); 248 BUG();
254 249
255 dprintk("detected %u kHz as current frequency\n", data.speed); 250 dprintk("detected %u kHz as current frequency\n", speed);
256 return data.speed; 251 return speed;
257} 252}
258 253
259/** 254/**
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 5e409dc298a4..a4849c10a77e 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -27,8 +27,7 @@
27#include <asm/cpu.h> 27#include <asm/cpu.h>
28#include <asm/reboot.h> 28#include <asm/reboot.h>
29#include <asm/virtext.h> 29#include <asm/virtext.h>
30#include <asm/iommu.h> 30#include <asm/x86_init.h>
31
32 31
33#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) 32#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
34 33
@@ -106,7 +105,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
106#endif 105#endif
107 106
108#ifdef CONFIG_X86_64 107#ifdef CONFIG_X86_64
109 pci_iommu_shutdown(); 108 x86_platform.iommu_shutdown();
110#endif 109#endif
111 110
112 crash_save_cpu(regs, safe_smp_processor_id()); 111 crash_save_cpu(regs, safe_smp_processor_id());
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 971a3bec47a8..c563e4c8ff39 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -46,6 +46,7 @@
46#include <asm/dma.h> 46#include <asm/dma.h>
47#include <asm/rio.h> 47#include <asm/rio.h>
48#include <asm/bios_ebda.h> 48#include <asm/bios_ebda.h>
49#include <asm/x86_init.h>
49 50
50#ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT 51#ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
51int use_calgary __read_mostly = 1; 52int use_calgary __read_mostly = 1;
@@ -244,7 +245,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
244 if (panic_on_overflow) 245 if (panic_on_overflow)
245 panic("Calgary: fix the allocator.\n"); 246 panic("Calgary: fix the allocator.\n");
246 else 247 else
247 return bad_dma_address; 248 return DMA_ERROR_CODE;
248 } 249 }
249 } 250 }
250 251
@@ -260,12 +261,15 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
260 void *vaddr, unsigned int npages, int direction) 261 void *vaddr, unsigned int npages, int direction)
261{ 262{
262 unsigned long entry; 263 unsigned long entry;
263 dma_addr_t ret = bad_dma_address; 264 dma_addr_t ret;
264 265
265 entry = iommu_range_alloc(dev, tbl, npages); 266 entry = iommu_range_alloc(dev, tbl, npages);
266 267
267 if (unlikely(entry == bad_dma_address)) 268 if (unlikely(entry == DMA_ERROR_CODE)) {
268 goto error; 269 printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
270 "iommu %p\n", npages, tbl);
271 return DMA_ERROR_CODE;
272 }
269 273
270 /* set the return dma address */ 274 /* set the return dma address */
271 ret = (entry << PAGE_SHIFT) | ((unsigned long)vaddr & ~PAGE_MASK); 275 ret = (entry << PAGE_SHIFT) | ((unsigned long)vaddr & ~PAGE_MASK);
@@ -273,13 +277,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
273 /* put the TCEs in the HW table */ 277 /* put the TCEs in the HW table */
274 tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK, 278 tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK,
275 direction); 279 direction);
276
277 return ret; 280 return ret;
278
279error:
280 printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
281 "iommu %p\n", npages, tbl);
282 return bad_dma_address;
283} 281}
284 282
285static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, 283static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
@@ -290,8 +288,8 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
290 unsigned long flags; 288 unsigned long flags;
291 289
292 /* were we called with bad_dma_address? */ 290 /* were we called with bad_dma_address? */
293 badend = bad_dma_address + (EMERGENCY_PAGES * PAGE_SIZE); 291 badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE);
294 if (unlikely((dma_addr >= bad_dma_address) && (dma_addr < badend))) { 292 if (unlikely((dma_addr >= DMA_ERROR_CODE) && (dma_addr < badend))) {
295 WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA " 293 WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
296 "address 0x%Lx\n", dma_addr); 294 "address 0x%Lx\n", dma_addr);
297 return; 295 return;
@@ -318,13 +316,15 @@ static inline struct iommu_table *find_iommu_table(struct device *dev)
318 316
319 pdev = to_pci_dev(dev); 317 pdev = to_pci_dev(dev);
320 318
319 /* search up the device tree for an iommu */
321 pbus = pdev->bus; 320 pbus = pdev->bus;
322 321 do {
323 /* is the device behind a bridge? Look for the root bus */ 322 tbl = pci_iommu(pbus);
324 while (pbus->parent) 323 if (tbl && tbl->it_busno == pbus->number)
324 break;
325 tbl = NULL;
325 pbus = pbus->parent; 326 pbus = pbus->parent;
326 327 } while (pbus);
327 tbl = pci_iommu(pbus);
328 328
329 BUG_ON(tbl && (tbl->it_busno != pbus->number)); 329 BUG_ON(tbl && (tbl->it_busno != pbus->number));
330 330
@@ -373,7 +373,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
373 npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE); 373 npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE);
374 374
375 entry = iommu_range_alloc(dev, tbl, npages); 375 entry = iommu_range_alloc(dev, tbl, npages);
376 if (entry == bad_dma_address) { 376 if (entry == DMA_ERROR_CODE) {
377 /* makes sure unmap knows to stop */ 377 /* makes sure unmap knows to stop */
378 s->dma_length = 0; 378 s->dma_length = 0;
379 goto error; 379 goto error;
@@ -391,7 +391,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
391error: 391error:
392 calgary_unmap_sg(dev, sg, nelems, dir, NULL); 392 calgary_unmap_sg(dev, sg, nelems, dir, NULL);
393 for_each_sg(sg, s, nelems, i) { 393 for_each_sg(sg, s, nelems, i) {
394 sg->dma_address = bad_dma_address; 394 sg->dma_address = DMA_ERROR_CODE;
395 sg->dma_length = 0; 395 sg->dma_length = 0;
396 } 396 }
397 return 0; 397 return 0;
@@ -446,7 +446,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
446 446
447 /* set up tces to cover the allocated range */ 447 /* set up tces to cover the allocated range */
448 mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL); 448 mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
449 if (mapping == bad_dma_address) 449 if (mapping == DMA_ERROR_CODE)
450 goto free; 450 goto free;
451 *dma_handle = mapping; 451 *dma_handle = mapping;
452 return ret; 452 return ret;
@@ -727,7 +727,7 @@ static void __init calgary_reserve_regions(struct pci_dev *dev)
727 struct iommu_table *tbl = pci_iommu(dev->bus); 727 struct iommu_table *tbl = pci_iommu(dev->bus);
728 728
729 /* reserve EMERGENCY_PAGES from bad_dma_address and up */ 729 /* reserve EMERGENCY_PAGES from bad_dma_address and up */
730 iommu_range_reserve(tbl, bad_dma_address, EMERGENCY_PAGES); 730 iommu_range_reserve(tbl, DMA_ERROR_CODE, EMERGENCY_PAGES);
731 731
732 /* avoid the BIOS/VGA first 640KB-1MB region */ 732 /* avoid the BIOS/VGA first 640KB-1MB region */
733 /* for CalIOC2 - avoid the entire first MB */ 733 /* for CalIOC2 - avoid the entire first MB */
@@ -1344,6 +1344,23 @@ static void __init get_tce_space_from_tar(void)
1344 return; 1344 return;
1345} 1345}
1346 1346
1347static int __init calgary_iommu_init(void)
1348{
1349 int ret;
1350
1351 /* ok, we're trying to use Calgary - let's roll */
1352 printk(KERN_INFO "PCI-DMA: Using Calgary IOMMU\n");
1353
1354 ret = calgary_init();
1355 if (ret) {
1356 printk(KERN_ERR "PCI-DMA: Calgary init failed %d, "
1357 "falling back to no_iommu\n", ret);
1358 return ret;
1359 }
1360
1361 return 0;
1362}
1363
1347void __init detect_calgary(void) 1364void __init detect_calgary(void)
1348{ 1365{
1349 int bus; 1366 int bus;
@@ -1357,7 +1374,7 @@ void __init detect_calgary(void)
1357 * if the user specified iommu=off or iommu=soft or we found 1374 * if the user specified iommu=off or iommu=soft or we found
1358 * another HW IOMMU already, bail out. 1375 * another HW IOMMU already, bail out.
1359 */ 1376 */
1360 if (swiotlb || no_iommu || iommu_detected) 1377 if (no_iommu || iommu_detected)
1361 return; 1378 return;
1362 1379
1363 if (!use_calgary) 1380 if (!use_calgary)
@@ -1442,9 +1459,7 @@ void __init detect_calgary(void)
1442 printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d\n", 1459 printk(KERN_INFO "PCI-DMA: Calgary TCE table spec is %d\n",
1443 specified_table_size); 1460 specified_table_size);
1444 1461
1445 /* swiotlb for devices that aren't behind the Calgary. */ 1462 x86_init.iommu.iommu_init = calgary_iommu_init;
1446 if (max_pfn > MAX_DMA32_PFN)
1447 swiotlb = 1;
1448 } 1463 }
1449 return; 1464 return;
1450 1465
@@ -1457,35 +1472,6 @@ cleanup:
1457 } 1472 }
1458} 1473}
1459 1474
1460int __init calgary_iommu_init(void)
1461{
1462 int ret;
1463
1464 if (no_iommu || (swiotlb && !calgary_detected))
1465 return -ENODEV;
1466
1467 if (!calgary_detected)
1468 return -ENODEV;
1469
1470 /* ok, we're trying to use Calgary - let's roll */
1471 printk(KERN_INFO "PCI-DMA: Using Calgary IOMMU\n");
1472
1473 ret = calgary_init();
1474 if (ret) {
1475 printk(KERN_ERR "PCI-DMA: Calgary init failed %d, "
1476 "falling back to no_iommu\n", ret);
1477 return ret;
1478 }
1479
1480 force_iommu = 1;
1481 bad_dma_address = 0x0;
1482 /* dma_ops is set to swiotlb or nommu */
1483 if (!dma_ops)
1484 dma_ops = &nommu_dma_ops;
1485
1486 return 0;
1487}
1488
1489static int __init calgary_parse_options(char *p) 1475static int __init calgary_parse_options(char *p)
1490{ 1476{
1491 unsigned int bridge; 1477 unsigned int bridge;
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index a6e804d16c35..afcc58b69c7c 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -11,10 +11,11 @@
11#include <asm/gart.h> 11#include <asm/gart.h>
12#include <asm/calgary.h> 12#include <asm/calgary.h>
13#include <asm/amd_iommu.h> 13#include <asm/amd_iommu.h>
14#include <asm/x86_init.h>
14 15
15static int forbid_dac __read_mostly; 16static int forbid_dac __read_mostly;
16 17
17struct dma_map_ops *dma_ops; 18struct dma_map_ops *dma_ops = &nommu_dma_ops;
18EXPORT_SYMBOL(dma_ops); 19EXPORT_SYMBOL(dma_ops);
19 20
20static int iommu_sac_force __read_mostly; 21static int iommu_sac_force __read_mostly;
@@ -42,9 +43,6 @@ int iommu_detected __read_mostly = 0;
42 */ 43 */
43int iommu_pass_through __read_mostly; 44int iommu_pass_through __read_mostly;
44 45
45dma_addr_t bad_dma_address __read_mostly = 0;
46EXPORT_SYMBOL(bad_dma_address);
47
48/* Dummy device used for NULL arguments (normally ISA). */ 46/* Dummy device used for NULL arguments (normally ISA). */
49struct device x86_dma_fallback_dev = { 47struct device x86_dma_fallback_dev = {
50 .init_name = "fallback device", 48 .init_name = "fallback device",
@@ -126,20 +124,17 @@ void __init pci_iommu_alloc(void)
126 /* free the range so iommu could get some range less than 4G */ 124 /* free the range so iommu could get some range less than 4G */
127 dma32_free_bootmem(); 125 dma32_free_bootmem();
128#endif 126#endif
127 if (pci_swiotlb_init())
128 return;
129 129
130 /*
131 * The order of these functions is important for
132 * fall-back/fail-over reasons
133 */
134 gart_iommu_hole_init(); 130 gart_iommu_hole_init();
135 131
136 detect_calgary(); 132 detect_calgary();
137 133
138 detect_intel_iommu(); 134 detect_intel_iommu();
139 135
136 /* needs to be called after gart_iommu_hole_init */
140 amd_iommu_detect(); 137 amd_iommu_detect();
141
142 pci_swiotlb_init();
143} 138}
144 139
145void *dma_generic_alloc_coherent(struct device *dev, size_t size, 140void *dma_generic_alloc_coherent(struct device *dev, size_t size,
@@ -214,7 +209,7 @@ static __init int iommu_setup(char *p)
214 if (!strncmp(p, "allowdac", 8)) 209 if (!strncmp(p, "allowdac", 8))
215 forbid_dac = 0; 210 forbid_dac = 0;
216 if (!strncmp(p, "nodac", 5)) 211 if (!strncmp(p, "nodac", 5))
217 forbid_dac = -1; 212 forbid_dac = 1;
218 if (!strncmp(p, "usedac", 6)) { 213 if (!strncmp(p, "usedac", 6)) {
219 forbid_dac = -1; 214 forbid_dac = -1;
220 return 1; 215 return 1;
@@ -289,25 +284,17 @@ static int __init pci_iommu_init(void)
289#ifdef CONFIG_PCI 284#ifdef CONFIG_PCI
290 dma_debug_add_bus(&pci_bus_type); 285 dma_debug_add_bus(&pci_bus_type);
291#endif 286#endif
287 x86_init.iommu.iommu_init();
292 288
293 calgary_iommu_init(); 289 if (swiotlb) {
294 290 printk(KERN_INFO "PCI-DMA: "
295 intel_iommu_init(); 291 "Using software bounce buffering for IO (SWIOTLB)\n");
292 swiotlb_print_info();
293 } else
294 swiotlb_free();
296 295
297 amd_iommu_init();
298
299 gart_iommu_init();
300
301 no_iommu_init();
302 return 0; 296 return 0;
303} 297}
304
305void pci_iommu_shutdown(void)
306{
307 gart_iommu_shutdown();
308
309 amd_iommu_shutdown();
310}
311/* Must execute after PCI subsystem */ 298/* Must execute after PCI subsystem */
312rootfs_initcall(pci_iommu_init); 299rootfs_initcall(pci_iommu_init);
313 300
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index a7f1b64f86e0..e6a0d402f171 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -39,6 +39,7 @@
39#include <asm/swiotlb.h> 39#include <asm/swiotlb.h>
40#include <asm/dma.h> 40#include <asm/dma.h>
41#include <asm/k8.h> 41#include <asm/k8.h>
42#include <asm/x86_init.h>
42 43
43static unsigned long iommu_bus_base; /* GART remapping area (physical) */ 44static unsigned long iommu_bus_base; /* GART remapping area (physical) */
44static unsigned long iommu_size; /* size of remapping area bytes */ 45static unsigned long iommu_size; /* size of remapping area bytes */
@@ -46,6 +47,8 @@ static unsigned long iommu_pages; /* .. and in pages */
46 47
47static u32 *iommu_gatt_base; /* Remapping table */ 48static u32 *iommu_gatt_base; /* Remapping table */
48 49
50static dma_addr_t bad_dma_addr;
51
49/* 52/*
50 * If this is disabled the IOMMU will use an optimized flushing strategy 53 * If this is disabled the IOMMU will use an optimized flushing strategy
51 * of only flushing when an mapping is reused. With it true the GART is 54 * of only flushing when an mapping is reused. With it true the GART is
@@ -92,7 +95,7 @@ static unsigned long alloc_iommu(struct device *dev, int size,
92 95
93 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), 96 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
94 PAGE_SIZE) >> PAGE_SHIFT; 97 PAGE_SIZE) >> PAGE_SHIFT;
95 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1, 98 boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1,
96 PAGE_SIZE) >> PAGE_SHIFT; 99 PAGE_SIZE) >> PAGE_SHIFT;
97 100
98 spin_lock_irqsave(&iommu_bitmap_lock, flags); 101 spin_lock_irqsave(&iommu_bitmap_lock, flags);
@@ -216,7 +219,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
216 if (panic_on_overflow) 219 if (panic_on_overflow)
217 panic("dma_map_area overflow %lu bytes\n", size); 220 panic("dma_map_area overflow %lu bytes\n", size);
218 iommu_full(dev, size, dir); 221 iommu_full(dev, size, dir);
219 return bad_dma_address; 222 return bad_dma_addr;
220 } 223 }
221 224
222 for (i = 0; i < npages; i++) { 225 for (i = 0; i < npages; i++) {
@@ -294,7 +297,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
294 int i; 297 int i;
295 298
296#ifdef CONFIG_IOMMU_DEBUG 299#ifdef CONFIG_IOMMU_DEBUG
297 printk(KERN_DEBUG "dma_map_sg overflow\n"); 300 pr_debug("dma_map_sg overflow\n");
298#endif 301#endif
299 302
300 for_each_sg(sg, s, nents, i) { 303 for_each_sg(sg, s, nents, i) {
@@ -302,7 +305,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
302 305
303 if (nonforced_iommu(dev, addr, s->length)) { 306 if (nonforced_iommu(dev, addr, s->length)) {
304 addr = dma_map_area(dev, addr, s->length, dir, 0); 307 addr = dma_map_area(dev, addr, s->length, dir, 0);
305 if (addr == bad_dma_address) { 308 if (addr == bad_dma_addr) {
306 if (i > 0) 309 if (i > 0)
307 gart_unmap_sg(dev, sg, i, dir, NULL); 310 gart_unmap_sg(dev, sg, i, dir, NULL);
308 nents = 0; 311 nents = 0;
@@ -389,12 +392,14 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
389 if (!dev) 392 if (!dev)
390 dev = &x86_dma_fallback_dev; 393 dev = &x86_dma_fallback_dev;
391 394
392 out = 0; 395 out = 0;
393 start = 0; 396 start = 0;
394 start_sg = sgmap = sg; 397 start_sg = sg;
395 seg_size = 0; 398 sgmap = sg;
396 max_seg_size = dma_get_max_seg_size(dev); 399 seg_size = 0;
397 ps = NULL; /* shut up gcc */ 400 max_seg_size = dma_get_max_seg_size(dev);
401 ps = NULL; /* shut up gcc */
402
398 for_each_sg(sg, s, nents, i) { 403 for_each_sg(sg, s, nents, i) {
399 dma_addr_t addr = sg_phys(s); 404 dma_addr_t addr = sg_phys(s);
400 405
@@ -417,11 +422,12 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
417 sgmap, pages, need) < 0) 422 sgmap, pages, need) < 0)
418 goto error; 423 goto error;
419 out++; 424 out++;
420 seg_size = 0; 425
421 sgmap = sg_next(sgmap); 426 seg_size = 0;
422 pages = 0; 427 sgmap = sg_next(sgmap);
423 start = i; 428 pages = 0;
424 start_sg = s; 429 start = i;
430 start_sg = s;
425 } 431 }
426 } 432 }
427 433
@@ -455,7 +461,7 @@ error:
455 461
456 iommu_full(dev, pages << PAGE_SHIFT, dir); 462 iommu_full(dev, pages << PAGE_SHIFT, dir);
457 for_each_sg(sg, s, nents, i) 463 for_each_sg(sg, s, nents, i)
458 s->dma_address = bad_dma_address; 464 s->dma_address = bad_dma_addr;
459 return 0; 465 return 0;
460} 466}
461 467
@@ -479,7 +485,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
479 DMA_BIDIRECTIONAL, align_mask); 485 DMA_BIDIRECTIONAL, align_mask);
480 486
481 flush_gart(); 487 flush_gart();
482 if (paddr != bad_dma_address) { 488 if (paddr != bad_dma_addr) {
483 *dma_addr = paddr; 489 *dma_addr = paddr;
484 return page_address(page); 490 return page_address(page);
485 } 491 }
@@ -499,6 +505,11 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr,
499 free_pages((unsigned long)vaddr, get_order(size)); 505 free_pages((unsigned long)vaddr, get_order(size));
500} 506}
501 507
508static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
509{
510 return (dma_addr == bad_dma_addr);
511}
512
502static int no_agp; 513static int no_agp;
503 514
504static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) 515static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
@@ -515,7 +526,7 @@ static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
515 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a; 526 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
516 527
517 if (iommu_size < 64*1024*1024) { 528 if (iommu_size < 64*1024*1024) {
518 printk(KERN_WARNING 529 pr_warning(
519 "PCI-DMA: Warning: Small IOMMU %luMB." 530 "PCI-DMA: Warning: Small IOMMU %luMB."
520 " Consider increasing the AGP aperture in BIOS\n", 531 " Consider increasing the AGP aperture in BIOS\n",
521 iommu_size >> 20); 532 iommu_size >> 20);
@@ -570,28 +581,32 @@ void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
570 aperture_alloc = aper_alloc; 581 aperture_alloc = aper_alloc;
571} 582}
572 583
573static int gart_resume(struct sys_device *dev) 584static void gart_fixup_northbridges(struct sys_device *dev)
574{ 585{
575 printk(KERN_INFO "PCI-DMA: Resuming GART IOMMU\n"); 586 int i;
576 587
577 if (fix_up_north_bridges) { 588 if (!fix_up_north_bridges)
578 int i; 589 return;
579 590
580 printk(KERN_INFO "PCI-DMA: Restoring GART aperture settings\n"); 591 pr_info("PCI-DMA: Restoring GART aperture settings\n");
581 592
582 for (i = 0; i < num_k8_northbridges; i++) { 593 for (i = 0; i < num_k8_northbridges; i++) {
583 struct pci_dev *dev = k8_northbridges[i]; 594 struct pci_dev *dev = k8_northbridges[i];
584 595
585 /* 596 /*
586 * Don't enable translations just yet. That is the next 597 * Don't enable translations just yet. That is the next
587 * step. Restore the pre-suspend aperture settings. 598 * step. Restore the pre-suspend aperture settings.
588 */ 599 */
589 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, 600 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, aperture_order << 1);
590 aperture_order << 1); 601 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
591 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE,
592 aperture_alloc >> 25);
593 }
594 } 602 }
603}
604
605static int gart_resume(struct sys_device *dev)
606{
607 pr_info("PCI-DMA: Resuming GART IOMMU\n");
608
609 gart_fixup_northbridges(dev);
595 610
596 enable_gart_translations(); 611 enable_gart_translations();
597 612
@@ -604,15 +619,14 @@ static int gart_suspend(struct sys_device *dev, pm_message_t state)
604} 619}
605 620
606static struct sysdev_class gart_sysdev_class = { 621static struct sysdev_class gart_sysdev_class = {
607 .name = "gart", 622 .name = "gart",
608 .suspend = gart_suspend, 623 .suspend = gart_suspend,
609 .resume = gart_resume, 624 .resume = gart_resume,
610 625
611}; 626};
612 627
613static struct sys_device device_gart = { 628static struct sys_device device_gart = {
614 .id = 0, 629 .cls = &gart_sysdev_class,
615 .cls = &gart_sysdev_class,
616}; 630};
617 631
618/* 632/*
@@ -627,7 +641,8 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
627 void *gatt; 641 void *gatt;
628 int i, error; 642 int i, error;
629 643
630 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); 644 pr_info("PCI-DMA: Disabling AGP.\n");
645
631 aper_size = aper_base = info->aper_size = 0; 646 aper_size = aper_base = info->aper_size = 0;
632 dev = NULL; 647 dev = NULL;
633 for (i = 0; i < num_k8_northbridges; i++) { 648 for (i = 0; i < num_k8_northbridges; i++) {
@@ -645,6 +660,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
645 } 660 }
646 if (!aper_base) 661 if (!aper_base)
647 goto nommu; 662 goto nommu;
663
648 info->aper_base = aper_base; 664 info->aper_base = aper_base;
649 info->aper_size = aper_size >> 20; 665 info->aper_size = aper_size >> 20;
650 666
@@ -667,14 +683,14 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
667 683
668 flush_gart(); 684 flush_gart();
669 685
670 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", 686 pr_info("PCI-DMA: aperture base @ %x size %u KB\n",
671 aper_base, aper_size>>10); 687 aper_base, aper_size>>10);
672 688
673 return 0; 689 return 0;
674 690
675 nommu: 691 nommu:
676 /* Should not happen anymore */ 692 /* Should not happen anymore */
677 printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n" 693 pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n"
678 "falling back to iommu=soft.\n"); 694 "falling back to iommu=soft.\n");
679 return -1; 695 return -1;
680} 696}
@@ -686,14 +702,15 @@ static struct dma_map_ops gart_dma_ops = {
686 .unmap_page = gart_unmap_page, 702 .unmap_page = gart_unmap_page,
687 .alloc_coherent = gart_alloc_coherent, 703 .alloc_coherent = gart_alloc_coherent,
688 .free_coherent = gart_free_coherent, 704 .free_coherent = gart_free_coherent,
705 .mapping_error = gart_mapping_error,
689}; 706};
690 707
691void gart_iommu_shutdown(void) 708static void gart_iommu_shutdown(void)
692{ 709{
693 struct pci_dev *dev; 710 struct pci_dev *dev;
694 int i; 711 int i;
695 712
696 if (no_agp && (dma_ops != &gart_dma_ops)) 713 if (no_agp)
697 return; 714 return;
698 715
699 for (i = 0; i < num_k8_northbridges; i++) { 716 for (i = 0; i < num_k8_northbridges; i++) {
@@ -708,7 +725,7 @@ void gart_iommu_shutdown(void)
708 } 725 }
709} 726}
710 727
711void __init gart_iommu_init(void) 728int __init gart_iommu_init(void)
712{ 729{
713 struct agp_kern_info info; 730 struct agp_kern_info info;
714 unsigned long iommu_start; 731 unsigned long iommu_start;
@@ -718,7 +735,7 @@ void __init gart_iommu_init(void)
718 long i; 735 long i;
719 736
720 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) 737 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
721 return; 738 return 0;
722 739
723#ifndef CONFIG_AGP_AMD64 740#ifndef CONFIG_AGP_AMD64
724 no_agp = 1; 741 no_agp = 1;
@@ -730,35 +747,28 @@ void __init gart_iommu_init(void)
730 (agp_copy_info(agp_bridge, &info) < 0); 747 (agp_copy_info(agp_bridge, &info) < 0);
731#endif 748#endif
732 749
733 if (swiotlb)
734 return;
735
736 /* Did we detect a different HW IOMMU? */
737 if (iommu_detected && !gart_iommu_aperture)
738 return;
739
740 if (no_iommu || 750 if (no_iommu ||
741 (!force_iommu && max_pfn <= MAX_DMA32_PFN) || 751 (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
742 !gart_iommu_aperture || 752 !gart_iommu_aperture ||
743 (no_agp && init_k8_gatt(&info) < 0)) { 753 (no_agp && init_k8_gatt(&info) < 0)) {
744 if (max_pfn > MAX_DMA32_PFN) { 754 if (max_pfn > MAX_DMA32_PFN) {
745 printk(KERN_WARNING "More than 4GB of memory " 755 pr_warning("More than 4GB of memory but GART IOMMU not available.\n");
746 "but GART IOMMU not available.\n"); 756 pr_warning("falling back to iommu=soft.\n");
747 printk(KERN_WARNING "falling back to iommu=soft.\n");
748 } 757 }
749 return; 758 return 0;
750 } 759 }
751 760
752 /* need to map that range */ 761 /* need to map that range */
753 aper_size = info.aper_size << 20; 762 aper_size = info.aper_size << 20;
754 aper_base = info.aper_base; 763 aper_base = info.aper_base;
755 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT); 764 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
765
756 if (end_pfn > max_low_pfn_mapped) { 766 if (end_pfn > max_low_pfn_mapped) {
757 start_pfn = (aper_base>>PAGE_SHIFT); 767 start_pfn = (aper_base>>PAGE_SHIFT);
758 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); 768 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
759 } 769 }
760 770
761 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); 771 pr_info("PCI-DMA: using GART IOMMU.\n");
762 iommu_size = check_iommu_size(info.aper_base, aper_size); 772 iommu_size = check_iommu_size(info.aper_base, aper_size);
763 iommu_pages = iommu_size >> PAGE_SHIFT; 773 iommu_pages = iommu_size >> PAGE_SHIFT;
764 774
@@ -773,8 +783,7 @@ void __init gart_iommu_init(void)
773 783
774 ret = dma_debug_resize_entries(iommu_pages); 784 ret = dma_debug_resize_entries(iommu_pages);
775 if (ret) 785 if (ret)
776 printk(KERN_DEBUG 786 pr_debug("PCI-DMA: Cannot trace all the entries\n");
777 "PCI-DMA: Cannot trace all the entries\n");
778 } 787 }
779#endif 788#endif
780 789
@@ -784,15 +793,14 @@ void __init gart_iommu_init(void)
784 */ 793 */
785 iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES); 794 iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
786 795
787 agp_memory_reserved = iommu_size; 796 pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
788 printk(KERN_INFO
789 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
790 iommu_size >> 20); 797 iommu_size >> 20);
791 798
792 iommu_start = aper_size - iommu_size; 799 agp_memory_reserved = iommu_size;
793 iommu_bus_base = info.aper_base + iommu_start; 800 iommu_start = aper_size - iommu_size;
794 bad_dma_address = iommu_bus_base; 801 iommu_bus_base = info.aper_base + iommu_start;
795 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT); 802 bad_dma_addr = iommu_bus_base;
803 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
796 804
797 /* 805 /*
798 * Unmap the IOMMU part of the GART. The alias of the page is 806 * Unmap the IOMMU part of the GART. The alias of the page is
@@ -814,7 +822,7 @@ void __init gart_iommu_init(void)
814 * the pages as Not-Present: 822 * the pages as Not-Present:
815 */ 823 */
816 wbinvd(); 824 wbinvd();
817 825
818 /* 826 /*
819 * Now all caches are flushed and we can safely enable 827 * Now all caches are flushed and we can safely enable
820 * GART hardware. Doing it early leaves the possibility 828 * GART hardware. Doing it early leaves the possibility
@@ -838,6 +846,10 @@ void __init gart_iommu_init(void)
838 846
839 flush_gart(); 847 flush_gart();
840 dma_ops = &gart_dma_ops; 848 dma_ops = &gart_dma_ops;
849 x86_platform.iommu_shutdown = gart_iommu_shutdown;
850 swiotlb = 0;
851
852 return 0;
841} 853}
842 854
843void __init gart_parse_options(char *p) 855void __init gart_parse_options(char *p)
@@ -856,7 +868,7 @@ void __init gart_parse_options(char *p)
856#endif 868#endif
857 if (isdigit(*p) && get_option(&p, &arg)) 869 if (isdigit(*p) && get_option(&p, &arg))
858 iommu_size = arg; 870 iommu_size = arg;
859 if (!strncmp(p, "fullflush", 8)) 871 if (!strncmp(p, "fullflush", 9))
860 iommu_fullflush = 1; 872 iommu_fullflush = 1;
861 if (!strncmp(p, "nofullflush", 11)) 873 if (!strncmp(p, "nofullflush", 11))
862 iommu_fullflush = 0; 874 iommu_fullflush = 0;
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index a3933d4330cd..22be12b60a8f 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -33,7 +33,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
33 dma_addr_t bus = page_to_phys(page) + offset; 33 dma_addr_t bus = page_to_phys(page) + offset;
34 WARN_ON(size == 0); 34 WARN_ON(size == 0);
35 if (!check_addr("map_single", dev, bus, size)) 35 if (!check_addr("map_single", dev, bus, size))
36 return bad_dma_address; 36 return DMA_ERROR_CODE;
37 flush_write_buffers(); 37 flush_write_buffers();
38 return bus; 38 return bus;
39} 39}
@@ -103,12 +103,3 @@ struct dma_map_ops nommu_dma_ops = {
103 .sync_sg_for_device = nommu_sync_sg_for_device, 103 .sync_sg_for_device = nommu_sync_sg_for_device,
104 .is_phys = 1, 104 .is_phys = 1,
105}; 105};
106
107void __init no_iommu_init(void)
108{
109 if (dma_ops)
110 return;
111
112 force_iommu = 0; /* no HW IOMMU */
113 dma_ops = &nommu_dma_ops;
114}
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index aaa6b7839f1e..e3c0a66b9e77 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -42,18 +42,28 @@ static struct dma_map_ops swiotlb_dma_ops = {
42 .dma_supported = NULL, 42 .dma_supported = NULL,
43}; 43};
44 44
45void __init pci_swiotlb_init(void) 45/*
46 * pci_swiotlb_init - initialize swiotlb if necessary
47 *
48 * This returns non-zero if we are forced to use swiotlb (by the boot
49 * option).
50 */
51int __init pci_swiotlb_init(void)
46{ 52{
53 int use_swiotlb = swiotlb | swiotlb_force;
54
47 /* don't initialize swiotlb if iommu=off (no_iommu=1) */ 55 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
48#ifdef CONFIG_X86_64 56#ifdef CONFIG_X86_64
49 if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN)) 57 if (!no_iommu && max_pfn > MAX_DMA32_PFN)
50 swiotlb = 1; 58 swiotlb = 1;
51#endif 59#endif
52 if (swiotlb_force) 60 if (swiotlb_force)
53 swiotlb = 1; 61 swiotlb = 1;
62
54 if (swiotlb) { 63 if (swiotlb) {
55 printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n"); 64 swiotlb_init(0);
56 swiotlb_init();
57 dma_ops = &swiotlb_dma_ops; 65 dma_ops = &swiotlb_dma_ops;
58 } 66 }
67
68 return use_swiotlb;
59} 69}
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index f93078746e00..2b97fc5b124e 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -23,7 +23,7 @@
23# include <linux/ctype.h> 23# include <linux/ctype.h>
24# include <linux/mc146818rtc.h> 24# include <linux/mc146818rtc.h>
25#else 25#else
26# include <asm/iommu.h> 26# include <asm/x86_init.h>
27#endif 27#endif
28 28
29/* 29/*
@@ -622,7 +622,7 @@ void native_machine_shutdown(void)
622#endif 622#endif
623 623
624#ifdef CONFIG_X86_64 624#ifdef CONFIG_X86_64
625 pci_iommu_shutdown(); 625 x86_platform.iommu_shutdown();
626#endif 626#endif
627} 627}
628 628
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 4449a4a2c2ed..d11c5ff7c65e 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -14,10 +14,13 @@
14#include <asm/time.h> 14#include <asm/time.h>
15#include <asm/irq.h> 15#include <asm/irq.h>
16#include <asm/tsc.h> 16#include <asm/tsc.h>
17#include <asm/iommu.h>
17 18
18void __cpuinit x86_init_noop(void) { } 19void __cpuinit x86_init_noop(void) { }
19void __init x86_init_uint_noop(unsigned int unused) { } 20void __init x86_init_uint_noop(unsigned int unused) { }
20void __init x86_init_pgd_noop(pgd_t *unused) { } 21void __init x86_init_pgd_noop(pgd_t *unused) { }
22int __init iommu_init_noop(void) { return 0; }
23void iommu_shutdown_noop(void) { }
21 24
22/* 25/*
23 * The platform setup functions are preset with the default functions 26 * The platform setup functions are preset with the default functions
@@ -62,6 +65,10 @@ struct x86_init_ops x86_init __initdata = {
62 .tsc_pre_init = x86_init_noop, 65 .tsc_pre_init = x86_init_noop,
63 .timer_init = hpet_time_init, 66 .timer_init = hpet_time_init,
64 }, 67 },
68
69 .iommu = {
70 .iommu_init = iommu_init_noop,
71 },
65}; 72};
66 73
67struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { 74struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
@@ -72,4 +79,5 @@ struct x86_platform_ops x86_platform = {
72 .calibrate_tsc = native_calibrate_tsc, 79 .calibrate_tsc = native_calibrate_tsc,
73 .get_wallclock = mach_get_cmos_time, 80 .get_wallclock = mach_get_cmos_time,
74 .set_wallclock = mach_set_rtc_mmss, 81 .set_wallclock = mach_set_rtc_mmss,
82 .iommu_shutdown = iommu_shutdown_noop,
75}; 83};
diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig
index e5aeb2b79e6f..e28e276ac611 100644
--- a/crypto/async_tx/Kconfig
+++ b/crypto/async_tx/Kconfig
@@ -23,3 +23,8 @@ config ASYNC_RAID6_RECOV
23 select ASYNC_CORE 23 select ASYNC_CORE
24 select ASYNC_PQ 24 select ASYNC_PQ
25 25
26config ASYNC_TX_DISABLE_PQ_VAL_DMA
27 bool
28
29config ASYNC_TX_DISABLE_XOR_VAL_DMA
30 bool
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 6b5cc4fba59f..ec87f53d5059 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -240,6 +240,16 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
240} 240}
241EXPORT_SYMBOL_GPL(async_gen_syndrome); 241EXPORT_SYMBOL_GPL(async_gen_syndrome);
242 242
243static inline struct dma_chan *
244pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
245{
246 #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
247 return NULL;
248 #endif
249 return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks,
250 disks, len);
251}
252
243/** 253/**
244 * async_syndrome_val - asynchronously validate a raid6 syndrome 254 * async_syndrome_val - asynchronously validate a raid6 syndrome
245 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 255 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
@@ -260,9 +270,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
260 size_t len, enum sum_check_flags *pqres, struct page *spare, 270 size_t len, enum sum_check_flags *pqres, struct page *spare,
261 struct async_submit_ctl *submit) 271 struct async_submit_ctl *submit)
262{ 272{
263 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ_VAL, 273 struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
264 NULL, 0, blocks, disks,
265 len);
266 struct dma_device *device = chan ? chan->device : NULL; 274 struct dma_device *device = chan ? chan->device : NULL;
267 struct dma_async_tx_descriptor *tx; 275 struct dma_async_tx_descriptor *tx;
268 unsigned char coefs[disks-2]; 276 unsigned char coefs[disks-2];
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 79182dcb91b7..079ae8ca590b 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -234,6 +234,17 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len)
234 memcmp(a, a + 4, len - 4) == 0); 234 memcmp(a, a + 4, len - 4) == 0);
235} 235}
236 236
237static inline struct dma_chan *
238xor_val_chan(struct async_submit_ctl *submit, struct page *dest,
239 struct page **src_list, int src_cnt, size_t len)
240{
241 #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
242 return NULL;
243 #endif
244 return async_tx_find_channel(submit, DMA_XOR_VAL, &dest, 1, src_list,
245 src_cnt, len);
246}
247
237/** 248/**
238 * async_xor_val - attempt a xor parity check with a dma engine. 249 * async_xor_val - attempt a xor parity check with a dma engine.
239 * @dest: destination page used if the xor is performed synchronously 250 * @dest: destination page used if the xor is performed synchronously
@@ -255,9 +266,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
255 int src_cnt, size_t len, enum sum_check_flags *result, 266 int src_cnt, size_t len, enum sum_check_flags *result,
256 struct async_submit_ctl *submit) 267 struct async_submit_ctl *submit)
257{ 268{
258 struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL, 269 struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len);
259 &dest, 1, src_list,
260 src_cnt, len);
261 struct dma_device *device = chan ? chan->device : NULL; 270 struct dma_device *device = chan ? chan->device : NULL;
262 struct dma_async_tx_descriptor *tx = NULL; 271 struct dma_async_tx_descriptor *tx = NULL;
263 dma_addr_t *dma_src = NULL; 272 dma_addr_t *dma_src = NULL;
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 5fc3292483ef..c6547130624c 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -40,7 +40,7 @@ struct crypto_rfc4106_ctx {
40struct crypto_gcm_ghash_ctx { 40struct crypto_gcm_ghash_ctx {
41 unsigned int cryptlen; 41 unsigned int cryptlen;
42 struct scatterlist *src; 42 struct scatterlist *src;
43 crypto_completion_t complete; 43 void (*complete)(struct aead_request *req, int err);
44}; 44};
45 45
46struct crypto_gcm_req_priv_ctx { 46struct crypto_gcm_req_priv_ctx {
@@ -267,23 +267,26 @@ static int gcm_hash_final(struct aead_request *req,
267 return crypto_ahash_final(ahreq); 267 return crypto_ahash_final(ahreq);
268} 268}
269 269
270static void gcm_hash_final_done(struct crypto_async_request *areq, 270static void __gcm_hash_final_done(struct aead_request *req, int err)
271 int err)
272{ 271{
273 struct aead_request *req = areq->data;
274 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 272 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
275 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; 273 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
276 274
277 if (!err) 275 if (!err)
278 crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16); 276 crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
279 277
280 gctx->complete(areq, err); 278 gctx->complete(req, err);
281} 279}
282 280
283static void gcm_hash_len_done(struct crypto_async_request *areq, 281static void gcm_hash_final_done(struct crypto_async_request *areq, int err)
284 int err)
285{ 282{
286 struct aead_request *req = areq->data; 283 struct aead_request *req = areq->data;
284
285 __gcm_hash_final_done(req, err);
286}
287
288static void __gcm_hash_len_done(struct aead_request *req, int err)
289{
287 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 290 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
288 291
289 if (!err) { 292 if (!err) {
@@ -292,13 +295,18 @@ static void gcm_hash_len_done(struct crypto_async_request *areq,
292 return; 295 return;
293 } 296 }
294 297
295 gcm_hash_final_done(areq, err); 298 __gcm_hash_final_done(req, err);
296} 299}
297 300
298static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq, 301static void gcm_hash_len_done(struct crypto_async_request *areq, int err)
299 int err)
300{ 302{
301 struct aead_request *req = areq->data; 303 struct aead_request *req = areq->data;
304
305 __gcm_hash_len_done(req, err);
306}
307
308static void __gcm_hash_crypt_remain_done(struct aead_request *req, int err)
309{
302 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 310 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
303 311
304 if (!err) { 312 if (!err) {
@@ -307,13 +315,19 @@ static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
307 return; 315 return;
308 } 316 }
309 317
310 gcm_hash_len_done(areq, err); 318 __gcm_hash_len_done(req, err);
311} 319}
312 320
313static void gcm_hash_crypt_done(struct crypto_async_request *areq, 321static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
314 int err) 322 int err)
315{ 323{
316 struct aead_request *req = areq->data; 324 struct aead_request *req = areq->data;
325
326 __gcm_hash_crypt_remain_done(req, err);
327}
328
329static void __gcm_hash_crypt_done(struct aead_request *req, int err)
330{
317 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 331 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
318 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; 332 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
319 unsigned int remain; 333 unsigned int remain;
@@ -327,13 +341,18 @@ static void gcm_hash_crypt_done(struct crypto_async_request *areq,
327 return; 341 return;
328 } 342 }
329 343
330 gcm_hash_crypt_remain_done(areq, err); 344 __gcm_hash_crypt_remain_done(req, err);
331} 345}
332 346
333static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq, 347static void gcm_hash_crypt_done(struct crypto_async_request *areq, int err)
334 int err)
335{ 348{
336 struct aead_request *req = areq->data; 349 struct aead_request *req = areq->data;
350
351 __gcm_hash_crypt_done(req, err);
352}
353
354static void __gcm_hash_assoc_remain_done(struct aead_request *req, int err)
355{
337 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 356 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
338 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; 357 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
339 crypto_completion_t complete; 358 crypto_completion_t complete;
@@ -350,15 +369,21 @@ static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
350 } 369 }
351 370
352 if (remain) 371 if (remain)
353 gcm_hash_crypt_done(areq, err); 372 __gcm_hash_crypt_done(req, err);
354 else 373 else
355 gcm_hash_crypt_remain_done(areq, err); 374 __gcm_hash_crypt_remain_done(req, err);
356} 375}
357 376
358static void gcm_hash_assoc_done(struct crypto_async_request *areq, 377static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
359 int err) 378 int err)
360{ 379{
361 struct aead_request *req = areq->data; 380 struct aead_request *req = areq->data;
381
382 __gcm_hash_assoc_remain_done(req, err);
383}
384
385static void __gcm_hash_assoc_done(struct aead_request *req, int err)
386{
362 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 387 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
363 unsigned int remain; 388 unsigned int remain;
364 389
@@ -371,13 +396,18 @@ static void gcm_hash_assoc_done(struct crypto_async_request *areq,
371 return; 396 return;
372 } 397 }
373 398
374 gcm_hash_assoc_remain_done(areq, err); 399 __gcm_hash_assoc_remain_done(req, err);
375} 400}
376 401
377static void gcm_hash_init_done(struct crypto_async_request *areq, 402static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err)
378 int err)
379{ 403{
380 struct aead_request *req = areq->data; 404 struct aead_request *req = areq->data;
405
406 __gcm_hash_assoc_done(req, err);
407}
408
409static void __gcm_hash_init_done(struct aead_request *req, int err)
410{
381 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 411 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
382 crypto_completion_t complete; 412 crypto_completion_t complete;
383 unsigned int remain = 0; 413 unsigned int remain = 0;
@@ -393,9 +423,16 @@ static void gcm_hash_init_done(struct crypto_async_request *areq,
393 } 423 }
394 424
395 if (remain) 425 if (remain)
396 gcm_hash_assoc_done(areq, err); 426 __gcm_hash_assoc_done(req, err);
397 else 427 else
398 gcm_hash_assoc_remain_done(areq, err); 428 __gcm_hash_assoc_remain_done(req, err);
429}
430
431static void gcm_hash_init_done(struct crypto_async_request *areq, int err)
432{
433 struct aead_request *req = areq->data;
434
435 __gcm_hash_init_done(req, err);
399} 436}
400 437
401static int gcm_hash(struct aead_request *req, 438static int gcm_hash(struct aead_request *req,
@@ -457,10 +494,8 @@ static void gcm_enc_copy_hash(struct aead_request *req,
457 crypto_aead_authsize(aead), 1); 494 crypto_aead_authsize(aead), 1);
458} 495}
459 496
460static void gcm_enc_hash_done(struct crypto_async_request *areq, 497static void gcm_enc_hash_done(struct aead_request *req, int err)
461 int err)
462{ 498{
463 struct aead_request *req = areq->data;
464 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 499 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
465 500
466 if (!err) 501 if (!err)
@@ -469,8 +504,7 @@ static void gcm_enc_hash_done(struct crypto_async_request *areq,
469 aead_request_complete(req, err); 504 aead_request_complete(req, err);
470} 505}
471 506
472static void gcm_encrypt_done(struct crypto_async_request *areq, 507static void gcm_encrypt_done(struct crypto_async_request *areq, int err)
473 int err)
474{ 508{
475 struct aead_request *req = areq->data; 509 struct aead_request *req = areq->data;
476 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 510 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
@@ -479,9 +513,13 @@ static void gcm_encrypt_done(struct crypto_async_request *areq,
479 err = gcm_hash(req, pctx); 513 err = gcm_hash(req, pctx);
480 if (err == -EINPROGRESS || err == -EBUSY) 514 if (err == -EINPROGRESS || err == -EBUSY)
481 return; 515 return;
516 else if (!err) {
517 crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
518 gcm_enc_copy_hash(req, pctx);
519 }
482 } 520 }
483 521
484 gcm_enc_hash_done(areq, err); 522 aead_request_complete(req, err);
485} 523}
486 524
487static int crypto_gcm_encrypt(struct aead_request *req) 525static int crypto_gcm_encrypt(struct aead_request *req)
@@ -538,9 +576,8 @@ static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
538 aead_request_complete(req, err); 576 aead_request_complete(req, err);
539} 577}
540 578
541static void gcm_dec_hash_done(struct crypto_async_request *areq, int err) 579static void gcm_dec_hash_done(struct aead_request *req, int err)
542{ 580{
543 struct aead_request *req = areq->data;
544 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); 581 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
545 struct ablkcipher_request *abreq = &pctx->u.abreq; 582 struct ablkcipher_request *abreq = &pctx->u.abreq;
546 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; 583 struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
@@ -552,9 +589,11 @@ static void gcm_dec_hash_done(struct crypto_async_request *areq, int err)
552 err = crypto_ablkcipher_decrypt(abreq); 589 err = crypto_ablkcipher_decrypt(abreq);
553 if (err == -EINPROGRESS || err == -EBUSY) 590 if (err == -EINPROGRESS || err == -EBUSY)
554 return; 591 return;
592 else if (!err)
593 err = crypto_gcm_verify(req, pctx);
555 } 594 }
556 595
557 gcm_decrypt_done(areq, err); 596 aead_request_complete(req, err);
558} 597}
559 598
560static int crypto_gcm_decrypt(struct aead_request *req) 599static int crypto_gcm_decrypt(struct aead_request *req)
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index cd80d1dd1950..57bdaf6ffab1 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -203,8 +203,9 @@ static const union acpi_predefined_info predefined_names[] =
203 {{"_BCT", 1, ACPI_RTYPE_INTEGER}}, 203 {{"_BCT", 1, ACPI_RTYPE_INTEGER}},
204 {{"_BDN", 0, ACPI_RTYPE_INTEGER}}, 204 {{"_BDN", 0, ACPI_RTYPE_INTEGER}},
205 {{"_BFS", 1, 0}}, 205 {{"_BFS", 1, 0}},
206 {{"_BIF", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (9 Int),(4 Str) */ 206 {{"_BIF", 0, ACPI_RTYPE_PACKAGE} }, /* Fixed-length (9 Int),(4 Str/Buf) */
207 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 9, ACPI_RTYPE_STRING}, 4,0}}, 207 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 9,
208 ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER}, 4, 0} },
208 209
209 {{"_BIX", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int),(4 Str) */ 210 {{"_BIX", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int),(4 Str) */
210 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16, ACPI_RTYPE_STRING}, 4, 211 {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16, ACPI_RTYPE_STRING}, 4,
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index e56b2a7b53db..23e5a0519af5 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -224,6 +224,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
224 * _OSI(Linux) helps sound 224 * _OSI(Linux) helps sound
225 * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad R61"), 225 * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad R61"),
226 * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"), 226 * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"),
227 * T400, T500
227 * _OSI(Linux) has Linux specific hooks 228 * _OSI(Linux) has Linux specific hooks
228 * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"), 229 * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"),
229 * _OSI(Linux) is a NOP: 230 * _OSI(Linux) is a NOP:
@@ -254,6 +255,22 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
254 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"), 255 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"),
255 }, 256 },
256 }, 257 },
258 {
259 .callback = dmi_enable_osi_linux,
260 .ident = "Lenovo ThinkPad T400",
261 .matches = {
262 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
263 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T400"),
264 },
265 },
266 {
267 .callback = dmi_enable_osi_linux,
268 .ident = "Lenovo ThinkPad T500",
269 .matches = {
270 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
271 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"),
272 },
273 },
257 {} 274 {}
258}; 275};
259 276
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 4cc1b8116e76..5f2c379ab7bf 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -430,6 +430,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
430 }, 430 },
431 { 431 {
432 .callback = init_set_sci_en_on_resume, 432 .callback = init_set_sci_en_on_resume,
433 .ident = "Hewlett-Packard Compaq Presario C700 Notebook PC",
434 .matches = {
435 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
436 DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario C700 Notebook PC"),
437 },
438 },
439 {
440 .callback = init_set_sci_en_on_resume,
433 .ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC", 441 .ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC",
434 .matches = { 442 .matches = {
435 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 443 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index dc99e26f8e5b..1b392c9e8531 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -177,9 +177,6 @@ static struct ata_port_operations pcmcia_8bit_port_ops = {
177 .drain_fifo = pcmcia_8bit_drain_fifo, 177 .drain_fifo = pcmcia_8bit_drain_fifo,
178}; 178};
179 179
180#define CS_CHECK(fn, ret) \
181do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
182
183 180
184struct pcmcia_config_check { 181struct pcmcia_config_check {
185 unsigned long ctl_base; 182 unsigned long ctl_base;
@@ -252,7 +249,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
252 struct ata_port *ap; 249 struct ata_port *ap;
253 struct ata_pcmcia_info *info; 250 struct ata_pcmcia_info *info;
254 struct pcmcia_config_check *stk = NULL; 251 struct pcmcia_config_check *stk = NULL;
255 int last_ret = 0, last_fn = 0, is_kme = 0, ret = -ENOMEM, p; 252 int is_kme = 0, ret = -ENOMEM, p;
256 unsigned long io_base, ctl_base; 253 unsigned long io_base, ctl_base;
257 void __iomem *io_addr, *ctl_addr; 254 void __iomem *io_addr, *ctl_addr;
258 int n_ports = 1; 255 int n_ports = 1;
@@ -271,7 +268,6 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
271 pdev->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 268 pdev->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
272 pdev->io.IOAddrLines = 3; 269 pdev->io.IOAddrLines = 3;
273 pdev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 270 pdev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
274 pdev->irq.IRQInfo1 = IRQ_LEVEL_ID;
275 pdev->conf.Attributes = CONF_ENABLE_IRQ; 271 pdev->conf.Attributes = CONF_ENABLE_IRQ;
276 pdev->conf.IntType = INT_MEMORY_AND_IO; 272 pdev->conf.IntType = INT_MEMORY_AND_IO;
277 273
@@ -296,8 +292,13 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
296 } 292 }
297 io_base = pdev->io.BasePort1; 293 io_base = pdev->io.BasePort1;
298 ctl_base = stk->ctl_base; 294 ctl_base = stk->ctl_base;
299 CS_CHECK(RequestIRQ, pcmcia_request_irq(pdev, &pdev->irq)); 295 ret = pcmcia_request_irq(pdev, &pdev->irq);
300 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(pdev, &pdev->conf)); 296 if (ret)
297 goto failed;
298
299 ret = pcmcia_request_configuration(pdev, &pdev->conf);
300 if (ret)
301 goto failed;
301 302
302 /* iomap */ 303 /* iomap */
303 ret = -ENOMEM; 304 ret = -ENOMEM;
@@ -351,8 +352,6 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
351 kfree(stk); 352 kfree(stk);
352 return 0; 353 return 0;
353 354
354cs_failed:
355 cs_error(pdev, last_fn, last_ret);
356failed: 355failed:
357 kfree(stk); 356 kfree(stk);
358 info->ndev = 0; 357 info->ndev = 0;
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index d344db42a002..172b57e6543f 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -707,34 +707,17 @@ static unsigned int sata_fsl_dev_classify(struct ata_port *ap)
707 return ata_dev_classify(&tf); 707 return ata_dev_classify(&tf);
708} 708}
709 709
710static int sata_fsl_prereset(struct ata_link *link, unsigned long deadline) 710static int sata_fsl_hardreset(struct ata_link *link, unsigned int *class,
711{
712 /* FIXME: Never skip softreset, sata_fsl_softreset() is
713 * combination of soft and hard resets. sata_fsl_softreset()
714 * needs to be splitted into soft and hard resets.
715 */
716 return 0;
717}
718
719static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
720 unsigned long deadline) 711 unsigned long deadline)
721{ 712{
722 struct ata_port *ap = link->ap; 713 struct ata_port *ap = link->ap;
723 struct sata_fsl_port_priv *pp = ap->private_data;
724 struct sata_fsl_host_priv *host_priv = ap->host->private_data; 714 struct sata_fsl_host_priv *host_priv = ap->host->private_data;
725 void __iomem *hcr_base = host_priv->hcr_base; 715 void __iomem *hcr_base = host_priv->hcr_base;
726 int pmp = sata_srst_pmp(link);
727 u32 temp; 716 u32 temp;
728 struct ata_taskfile tf;
729 u8 *cfis;
730 u32 Serror;
731 int i = 0; 717 int i = 0;
732 unsigned long start_jiffies; 718 unsigned long start_jiffies;
733 719
734 DPRINTK("in xx_softreset\n"); 720 DPRINTK("in xx_hardreset\n");
735
736 if (pmp != SATA_PMP_CTRL_PORT)
737 goto issue_srst;
738 721
739try_offline_again: 722try_offline_again:
740 /* 723 /*
@@ -749,7 +732,7 @@ try_offline_again:
749 732
750 if (temp & ONLINE) { 733 if (temp & ONLINE) {
751 ata_port_printk(ap, KERN_ERR, 734 ata_port_printk(ap, KERN_ERR,
752 "Softreset failed, not off-lined %d\n", i); 735 "Hardreset failed, not off-lined %d\n", i);
753 736
754 /* 737 /*
755 * Try to offline controller atleast twice 738 * Try to offline controller atleast twice
@@ -761,7 +744,7 @@ try_offline_again:
761 goto try_offline_again; 744 goto try_offline_again;
762 } 745 }
763 746
764 DPRINTK("softreset, controller off-lined\n"); 747 DPRINTK("hardreset, controller off-lined\n");
765 VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS)); 748 VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
766 VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL)); 749 VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
767 750
@@ -786,11 +769,11 @@ try_offline_again:
786 769
787 if (!(temp & ONLINE)) { 770 if (!(temp & ONLINE)) {
788 ata_port_printk(ap, KERN_ERR, 771 ata_port_printk(ap, KERN_ERR,
789 "Softreset failed, not on-lined\n"); 772 "Hardreset failed, not on-lined\n");
790 goto err; 773 goto err;
791 } 774 }
792 775
793 DPRINTK("softreset, controller off-lined & on-lined\n"); 776 DPRINTK("hardreset, controller off-lined & on-lined\n");
794 VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS)); 777 VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
795 VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL)); 778 VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
796 779
@@ -806,7 +789,7 @@ try_offline_again:
806 "No Device OR PHYRDY change,Hstatus = 0x%x\n", 789 "No Device OR PHYRDY change,Hstatus = 0x%x\n",
807 ioread32(hcr_base + HSTATUS)); 790 ioread32(hcr_base + HSTATUS));
808 *class = ATA_DEV_NONE; 791 *class = ATA_DEV_NONE;
809 goto out; 792 return 0;
810 } 793 }
811 794
812 /* 795 /*
@@ -819,11 +802,44 @@ try_offline_again:
819 if ((temp & 0xFF) != 0x18) { 802 if ((temp & 0xFF) != 0x18) {
820 ata_port_printk(ap, KERN_WARNING, "No Signature Update\n"); 803 ata_port_printk(ap, KERN_WARNING, "No Signature Update\n");
821 *class = ATA_DEV_NONE; 804 *class = ATA_DEV_NONE;
822 goto out; 805 goto do_followup_srst;
823 } else { 806 } else {
824 ata_port_printk(ap, KERN_INFO, 807 ata_port_printk(ap, KERN_INFO,
825 "Signature Update detected @ %d msecs\n", 808 "Signature Update detected @ %d msecs\n",
826 jiffies_to_msecs(jiffies - start_jiffies)); 809 jiffies_to_msecs(jiffies - start_jiffies));
810 *class = sata_fsl_dev_classify(ap);
811 return 0;
812 }
813
814do_followup_srst:
815 /*
816 * request libATA to perform follow-up softreset
817 */
818 return -EAGAIN;
819
820err:
821 return -EIO;
822}
823
824static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
825 unsigned long deadline)
826{
827 struct ata_port *ap = link->ap;
828 struct sata_fsl_port_priv *pp = ap->private_data;
829 struct sata_fsl_host_priv *host_priv = ap->host->private_data;
830 void __iomem *hcr_base = host_priv->hcr_base;
831 int pmp = sata_srst_pmp(link);
832 u32 temp;
833 struct ata_taskfile tf;
834 u8 *cfis;
835 u32 Serror;
836
837 DPRINTK("in xx_softreset\n");
838
839 if (ata_link_offline(link)) {
840 DPRINTK("PHY reports no device\n");
841 *class = ATA_DEV_NONE;
842 return 0;
827 } 843 }
828 844
829 /* 845 /*
@@ -834,7 +850,6 @@ try_offline_again:
834 * reached here, we can send a command to the target device 850 * reached here, we can send a command to the target device
835 */ 851 */
836 852
837issue_srst:
838 DPRINTK("Sending SRST/device reset\n"); 853 DPRINTK("Sending SRST/device reset\n");
839 854
840 ata_tf_init(link->device, &tf); 855 ata_tf_init(link->device, &tf);
@@ -860,6 +875,8 @@ issue_srst:
860 ioread32(CA + hcr_base), ioread32(CC + hcr_base)); 875 ioread32(CA + hcr_base), ioread32(CC + hcr_base));
861 876
862 iowrite32(0xFFFF, CC + hcr_base); 877 iowrite32(0xFFFF, CC + hcr_base);
878 if (pmp != SATA_PMP_CTRL_PORT)
879 iowrite32(pmp, CQPMP + hcr_base);
863 iowrite32(1, CQ + hcr_base); 880 iowrite32(1, CQ + hcr_base);
864 881
865 temp = ata_wait_register(CQ + hcr_base, 0x1, 0x1, 1, 5000); 882 temp = ata_wait_register(CQ + hcr_base, 0x1, 0x1, 1, 5000);
@@ -926,7 +943,6 @@ issue_srst:
926 VPRINTK("cereg = 0x%x\n", ioread32(hcr_base + CE)); 943 VPRINTK("cereg = 0x%x\n", ioread32(hcr_base + CE));
927 } 944 }
928 945
929out:
930 return 0; 946 return 0;
931 947
932err: 948err:
@@ -988,18 +1004,6 @@ static void sata_fsl_error_intr(struct ata_port *ap)
988 ehi->err_mask |= AC_ERR_ATA_BUS; 1004 ehi->err_mask |= AC_ERR_ATA_BUS;
989 ehi->action |= ATA_EH_SOFTRESET; 1005 ehi->action |= ATA_EH_SOFTRESET;
990 1006
991 /*
992 * Ignore serror in case of fatal errors as we always want
993 * to do a soft-reset of the FSL SATA controller. Analyzing
994 * serror may cause libata to schedule a hard-reset action,
995 * and hard-reset currently does not do controller
996 * offline/online, causing command timeouts and leads to an
997 * un-recoverable state, hence make libATA ignore
998 * autopsy in case of fatal errors.
999 */
1000
1001 ehi->flags |= ATA_EHI_NO_AUTOPSY;
1002
1003 freeze = 1; 1007 freeze = 1;
1004 } 1008 }
1005 1009
@@ -1267,8 +1271,8 @@ static struct ata_port_operations sata_fsl_ops = {
1267 1271
1268 .freeze = sata_fsl_freeze, 1272 .freeze = sata_fsl_freeze,
1269 .thaw = sata_fsl_thaw, 1273 .thaw = sata_fsl_thaw,
1270 .prereset = sata_fsl_prereset,
1271 .softreset = sata_fsl_softreset, 1274 .softreset = sata_fsl_softreset,
1275 .hardreset = sata_fsl_hardreset,
1272 .pmp_softreset = sata_fsl_softreset, 1276 .pmp_softreset = sata_fsl_softreset,
1273 .error_handler = sata_fsl_error_handler, 1277 .error_handler = sata_fsl_error_handler,
1274 .post_internal_cmd = sata_fsl_post_internal_cmd, 1278 .post_internal_cmd = sata_fsl_post_internal_cmd,
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index a770498a74ec..846d89e3d122 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -328,11 +328,11 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
328 * necessary. 328 * necessary.
329 */ 329 */
330 parent = dev->parent; 330 parent = dev->parent;
331 spin_unlock_irq(&dev->power.lock); 331 spin_unlock(&dev->power.lock);
332 332
333 pm_runtime_get_noresume(parent); 333 pm_runtime_get_noresume(parent);
334 334
335 spin_lock_irq(&parent->power.lock); 335 spin_lock(&parent->power.lock);
336 /* 336 /*
337 * We can resume if the parent's run-time PM is disabled or it 337 * We can resume if the parent's run-time PM is disabled or it
338 * is set to ignore children. 338 * is set to ignore children.
@@ -343,9 +343,9 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
343 if (parent->power.runtime_status != RPM_ACTIVE) 343 if (parent->power.runtime_status != RPM_ACTIVE)
344 retval = -EBUSY; 344 retval = -EBUSY;
345 } 345 }
346 spin_unlock_irq(&parent->power.lock); 346 spin_unlock(&parent->power.lock);
347 347
348 spin_lock_irq(&dev->power.lock); 348 spin_lock(&dev->power.lock);
349 if (retval) 349 if (retval)
350 goto out; 350 goto out;
351 goto repeat; 351 goto repeat;
@@ -777,7 +777,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
777 } 777 }
778 778
779 if (parent) { 779 if (parent) {
780 spin_lock_irq(&parent->power.lock); 780 spin_lock(&parent->power.lock);
781 781
782 /* 782 /*
783 * It is invalid to put an active child under a parent that is 783 * It is invalid to put an active child under a parent that is
@@ -793,7 +793,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
793 atomic_inc(&parent->power.child_count); 793 atomic_inc(&parent->power.child_count);
794 } 794 }
795 795
796 spin_unlock_irq(&parent->power.lock); 796 spin_unlock(&parent->power.lock);
797 797
798 if (error) 798 if (error)
799 goto out; 799 goto out;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 965ece2c7e4d..13bb69d2abb3 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -735,6 +735,21 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector
735 part_stat_unlock(); 735 part_stat_unlock();
736} 736}
737 737
738/*
739 * Ensure we don't create aliases in VI caches
740 */
741static inline void
742killalias(struct bio *bio)
743{
744 struct bio_vec *bv;
745 int i;
746
747 if (bio_data_dir(bio) == READ)
748 __bio_for_each_segment(bv, bio, i, 0) {
749 flush_dcache_page(bv->bv_page);
750 }
751}
752
738void 753void
739aoecmd_ata_rsp(struct sk_buff *skb) 754aoecmd_ata_rsp(struct sk_buff *skb)
740{ 755{
@@ -853,8 +868,12 @@ aoecmd_ata_rsp(struct sk_buff *skb)
853 868
854 if (buf && --buf->nframesout == 0 && buf->resid == 0) { 869 if (buf && --buf->nframesout == 0 && buf->resid == 0) {
855 diskstats(d->gd, buf->bio, jiffies - buf->stime, buf->sector); 870 diskstats(d->gd, buf->bio, jiffies - buf->stime, buf->sector);
856 n = (buf->flags & BUFFL_FAIL) ? -EIO : 0; 871 if (buf->flags & BUFFL_FAIL)
857 bio_endio(buf->bio, n); 872 bio_endio(buf->bio, -EIO);
873 else {
874 killalias(buf->bio);
875 bio_endio(buf->bio, 0);
876 }
858 mempool_free(buf, d->bufpool); 877 mempool_free(buf, d->bufpool);
859 } 878 }
860 879
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 6399e5090df4..92b126394fa1 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -482,7 +482,7 @@ static ssize_t host_store_rescan(struct device *dev,
482 482
483 return count; 483 return count;
484} 484}
485DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 485static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
486 486
487static ssize_t dev_show_unique_id(struct device *dev, 487static ssize_t dev_show_unique_id(struct device *dev,
488 struct device_attribute *attr, 488 struct device_attribute *attr,
@@ -512,7 +512,7 @@ static ssize_t dev_show_unique_id(struct device *dev,
512 sn[8], sn[9], sn[10], sn[11], 512 sn[8], sn[9], sn[10], sn[11],
513 sn[12], sn[13], sn[14], sn[15]); 513 sn[12], sn[13], sn[14], sn[15]);
514} 514}
515DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL); 515static DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL);
516 516
517static ssize_t dev_show_vendor(struct device *dev, 517static ssize_t dev_show_vendor(struct device *dev,
518 struct device_attribute *attr, 518 struct device_attribute *attr,
@@ -536,7 +536,7 @@ static ssize_t dev_show_vendor(struct device *dev,
536 else 536 else
537 return snprintf(buf, sizeof(vendor) + 1, "%s\n", drv->vendor); 537 return snprintf(buf, sizeof(vendor) + 1, "%s\n", drv->vendor);
538} 538}
539DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL); 539static DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL);
540 540
541static ssize_t dev_show_model(struct device *dev, 541static ssize_t dev_show_model(struct device *dev,
542 struct device_attribute *attr, 542 struct device_attribute *attr,
@@ -560,7 +560,7 @@ static ssize_t dev_show_model(struct device *dev,
560 else 560 else
561 return snprintf(buf, sizeof(model) + 1, "%s\n", drv->model); 561 return snprintf(buf, sizeof(model) + 1, "%s\n", drv->model);
562} 562}
563DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL); 563static DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL);
564 564
565static ssize_t dev_show_rev(struct device *dev, 565static ssize_t dev_show_rev(struct device *dev,
566 struct device_attribute *attr, 566 struct device_attribute *attr,
@@ -584,7 +584,7 @@ static ssize_t dev_show_rev(struct device *dev,
584 else 584 else
585 return snprintf(buf, sizeof(rev) + 1, "%s\n", drv->rev); 585 return snprintf(buf, sizeof(rev) + 1, "%s\n", drv->rev);
586} 586}
587DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL); 587static DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL);
588 588
589static ssize_t cciss_show_lunid(struct device *dev, 589static ssize_t cciss_show_lunid(struct device *dev,
590 struct device_attribute *attr, char *buf) 590 struct device_attribute *attr, char *buf)
@@ -609,7 +609,7 @@ static ssize_t cciss_show_lunid(struct device *dev,
609 lunid[0], lunid[1], lunid[2], lunid[3], 609 lunid[0], lunid[1], lunid[2], lunid[3],
610 lunid[4], lunid[5], lunid[6], lunid[7]); 610 lunid[4], lunid[5], lunid[6], lunid[7]);
611} 611}
612DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL); 612static DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL);
613 613
614static ssize_t cciss_show_raid_level(struct device *dev, 614static ssize_t cciss_show_raid_level(struct device *dev,
615 struct device_attribute *attr, char *buf) 615 struct device_attribute *attr, char *buf)
@@ -632,7 +632,7 @@ static ssize_t cciss_show_raid_level(struct device *dev,
632 return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n", 632 return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n",
633 raid_label[raid]); 633 raid_label[raid]);
634} 634}
635DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL); 635static DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL);
636 636
637static ssize_t cciss_show_usage_count(struct device *dev, 637static ssize_t cciss_show_usage_count(struct device *dev,
638 struct device_attribute *attr, char *buf) 638 struct device_attribute *attr, char *buf)
@@ -651,7 +651,7 @@ static ssize_t cciss_show_usage_count(struct device *dev,
651 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 651 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
652 return snprintf(buf, 20, "%d\n", count); 652 return snprintf(buf, 20, "%d\n", count);
653} 653}
654DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL); 654static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
655 655
656static struct attribute *cciss_host_attrs[] = { 656static struct attribute *cciss_host_attrs[] = {
657 &dev_attr_rescan.attr, 657 &dev_attr_rescan.attr,
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index b0e569ba730d..2acdc605cb4b 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -867,11 +867,9 @@ static int bluecard_probe(struct pcmcia_device *link)
867 867
868 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 868 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
869 link->io.NumPorts1 = 8; 869 link->io.NumPorts1 = 8;
870 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 870 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
871 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
872 871
873 link->irq.Handler = bluecard_interrupt; 872 link->irq.Handler = bluecard_interrupt;
874 link->irq.Instance = info;
875 873
876 link->conf.Attributes = CONF_ENABLE_IRQ; 874 link->conf.Attributes = CONF_ENABLE_IRQ;
877 link->conf.IntType = INT_MEMORY_AND_IO; 875 link->conf.IntType = INT_MEMORY_AND_IO;
@@ -905,22 +903,16 @@ static int bluecard_config(struct pcmcia_device *link)
905 break; 903 break;
906 } 904 }
907 905
908 if (i != 0) { 906 if (i != 0)
909 cs_error(link, RequestIO, i);
910 goto failed; 907 goto failed;
911 }
912 908
913 i = pcmcia_request_irq(link, &link->irq); 909 i = pcmcia_request_irq(link, &link->irq);
914 if (i != 0) { 910 if (i != 0)
915 cs_error(link, RequestIRQ, i);
916 link->irq.AssignedIRQ = 0; 911 link->irq.AssignedIRQ = 0;
917 }
918 912
919 i = pcmcia_request_configuration(link, &link->conf); 913 i = pcmcia_request_configuration(link, &link->conf);
920 if (i != 0) { 914 if (i != 0)
921 cs_error(link, RequestConfiguration, i);
922 goto failed; 915 goto failed;
923 }
924 916
925 if (bluecard_open(info) != 0) 917 if (bluecard_open(info) != 0)
926 goto failed; 918 goto failed;
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index d58e22b9f06a..d814a2755ccb 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -659,11 +659,9 @@ static int bt3c_probe(struct pcmcia_device *link)
659 659
660 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 660 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
661 link->io.NumPorts1 = 8; 661 link->io.NumPorts1 = 8;
662 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 662 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
663 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
664 663
665 link->irq.Handler = bt3c_interrupt; 664 link->irq.Handler = bt3c_interrupt;
666 link->irq.Instance = info;
667 665
668 link->conf.Attributes = CONF_ENABLE_IRQ; 666 link->conf.Attributes = CONF_ENABLE_IRQ;
669 link->conf.IntType = INT_MEMORY_AND_IO; 667 link->conf.IntType = INT_MEMORY_AND_IO;
@@ -740,21 +738,16 @@ static int bt3c_config(struct pcmcia_device *link)
740 goto found_port; 738 goto found_port;
741 739
742 BT_ERR("No usable port range found"); 740 BT_ERR("No usable port range found");
743 cs_error(link, RequestIO, -ENODEV);
744 goto failed; 741 goto failed;
745 742
746found_port: 743found_port:
747 i = pcmcia_request_irq(link, &link->irq); 744 i = pcmcia_request_irq(link, &link->irq);
748 if (i != 0) { 745 if (i != 0)
749 cs_error(link, RequestIRQ, i);
750 link->irq.AssignedIRQ = 0; 746 link->irq.AssignedIRQ = 0;
751 }
752 747
753 i = pcmcia_request_configuration(link, &link->conf); 748 i = pcmcia_request_configuration(link, &link->conf);
754 if (i != 0) { 749 if (i != 0)
755 cs_error(link, RequestConfiguration, i);
756 goto failed; 750 goto failed;
757 }
758 751
759 if (bt3c_open(info) != 0) 752 if (bt3c_open(info) != 0)
760 goto failed; 753 goto failed;
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index efd689a062eb..d339464dc15e 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -588,11 +588,9 @@ static int btuart_probe(struct pcmcia_device *link)
588 588
589 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 589 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
590 link->io.NumPorts1 = 8; 590 link->io.NumPorts1 = 8;
591 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 591 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
592 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
593 592
594 link->irq.Handler = btuart_interrupt; 593 link->irq.Handler = btuart_interrupt;
595 link->irq.Instance = info;
596 594
597 link->conf.Attributes = CONF_ENABLE_IRQ; 595 link->conf.Attributes = CONF_ENABLE_IRQ;
598 link->conf.IntType = INT_MEMORY_AND_IO; 596 link->conf.IntType = INT_MEMORY_AND_IO;
@@ -669,21 +667,16 @@ static int btuart_config(struct pcmcia_device *link)
669 goto found_port; 667 goto found_port;
670 668
671 BT_ERR("No usable port range found"); 669 BT_ERR("No usable port range found");
672 cs_error(link, RequestIO, -ENODEV);
673 goto failed; 670 goto failed;
674 671
675found_port: 672found_port:
676 i = pcmcia_request_irq(link, &link->irq); 673 i = pcmcia_request_irq(link, &link->irq);
677 if (i != 0) { 674 if (i != 0)
678 cs_error(link, RequestIRQ, i);
679 link->irq.AssignedIRQ = 0; 675 link->irq.AssignedIRQ = 0;
680 }
681 676
682 i = pcmcia_request_configuration(link, &link->conf); 677 i = pcmcia_request_configuration(link, &link->conf);
683 if (i != 0) { 678 if (i != 0)
684 cs_error(link, RequestConfiguration, i);
685 goto failed; 679 goto failed;
686 }
687 680
688 if (btuart_open(info) != 0) 681 if (btuart_open(info) != 0)
689 goto failed; 682 goto failed;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 2fb38027f3bb..44bc8bbabf54 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -600,11 +600,13 @@ static int btusb_close(struct hci_dev *hdev)
600 btusb_stop_traffic(data); 600 btusb_stop_traffic(data);
601 err = usb_autopm_get_interface(data->intf); 601 err = usb_autopm_get_interface(data->intf);
602 if (err < 0) 602 if (err < 0)
603 return 0; 603 goto failed;
604 604
605 data->intf->needs_remote_wakeup = 0; 605 data->intf->needs_remote_wakeup = 0;
606 usb_autopm_put_interface(data->intf); 606 usb_autopm_put_interface(data->intf);
607 607
608failed:
609 usb_scuttle_anchored_urbs(&data->deferred);
608 return 0; 610 return 0;
609} 611}
610 612
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index b881a9cd8741..4f02a6f3c980 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -573,11 +573,9 @@ static int dtl1_probe(struct pcmcia_device *link)
573 573
574 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 574 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
575 link->io.NumPorts1 = 8; 575 link->io.NumPorts1 = 8;
576 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 576 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
577 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
578 577
579 link->irq.Handler = dtl1_interrupt; 578 link->irq.Handler = dtl1_interrupt;
580 link->irq.Instance = info;
581 579
582 link->conf.Attributes = CONF_ENABLE_IRQ; 580 link->conf.Attributes = CONF_ENABLE_IRQ;
583 link->conf.IntType = INT_MEMORY_AND_IO; 581 link->conf.IntType = INT_MEMORY_AND_IO;
@@ -622,16 +620,12 @@ static int dtl1_config(struct pcmcia_device *link)
622 goto failed; 620 goto failed;
623 621
624 i = pcmcia_request_irq(link, &link->irq); 622 i = pcmcia_request_irq(link, &link->irq);
625 if (i != 0) { 623 if (i != 0)
626 cs_error(link, RequestIRQ, i);
627 link->irq.AssignedIRQ = 0; 624 link->irq.AssignedIRQ = 0;
628 }
629 625
630 i = pcmcia_request_configuration(link, &link->conf); 626 i = pcmcia_request_configuration(link, &link->conf);
631 if (i != 0) { 627 if (i != 0)
632 cs_error(link, RequestConfiguration, i);
633 goto failed; 628 goto failed;
634 }
635 629
636 if (dtl1_open(info) != 0) 630 if (dtl1_open(info) != 0)
637 goto failed; 631 goto failed;
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index ccb1fa89de29..2fb3a480f6b0 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -56,9 +56,8 @@ config AGP_AMD
56 X on AMD Irongate, 761, and 762 chipsets. 56 X on AMD Irongate, 761, and 762 chipsets.
57 57
58config AGP_AMD64 58config AGP_AMD64
59 tristate "AMD Opteron/Athlon64 on-CPU GART support" if !GART_IOMMU 59 tristate "AMD Opteron/Athlon64 on-CPU GART support"
60 depends on AGP && X86 60 depends on AGP && X86
61 default y if GART_IOMMU
62 help 61 help
63 This option gives you AGP support for the GLX component of 62 This option gives you AGP support for the GLX component of
64 X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs. 63 X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 4068467ce7b9..3cb56a049e24 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -62,6 +62,7 @@
62#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042 62#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042
63#define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044 63#define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044
64#define PCI_DEVICE_ID_INTEL_IGDNG_MA_HB 0x0062 64#define PCI_DEVICE_ID_INTEL_IGDNG_MA_HB 0x0062
65#define PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB 0x006a
65#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046 66#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046
66 67
67/* cover 915 and 945 variants */ 68/* cover 915 and 945 variants */
@@ -96,7 +97,8 @@
96 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \ 97 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
97 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \ 98 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
98 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \ 99 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \
99 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB) 100 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB || \
101 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB)
100 102
101extern int agp_memory_reserved; 103extern int agp_memory_reserved;
102 104
@@ -1161,12 +1163,6 @@ static int intel_i915_configure(void)
1161 1163
1162 intel_i9xx_setup_flush(); 1164 intel_i9xx_setup_flush();
1163 1165
1164#ifdef USE_PCI_DMA_API
1165 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
1166 dev_err(&intel_private.pcidev->dev,
1167 "set gfx device dma mask 36bit failed!\n");
1168#endif
1169
1170 return 0; 1166 return 0;
1171} 1167}
1172 1168
@@ -1364,6 +1360,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1364 case PCI_DEVICE_ID_INTEL_IGDNG_D_HB: 1360 case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
1365 case PCI_DEVICE_ID_INTEL_IGDNG_M_HB: 1361 case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
1366 case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB: 1362 case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB:
1363 case PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB:
1367 *gtt_offset = *gtt_size = MB(2); 1364 *gtt_offset = *gtt_size = MB(2);
1368 break; 1365 break;
1369 default: 1366 default:
@@ -2365,6 +2362,8 @@ static const struct intel_driver_description {
2365 "IGDNG/M", NULL, &intel_i965_driver }, 2362 "IGDNG/M", NULL, &intel_i965_driver },
2366 { PCI_DEVICE_ID_INTEL_IGDNG_MA_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0, 2363 { PCI_DEVICE_ID_INTEL_IGDNG_MA_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
2367 "IGDNG/MA", NULL, &intel_i965_driver }, 2364 "IGDNG/MA", NULL, &intel_i965_driver },
2365 { PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
2366 "IGDNG/MC2", NULL, &intel_i965_driver },
2368 { 0, 0, 0, NULL, NULL, NULL } 2367 { 0, 0, 0, NULL, NULL, NULL }
2369}; 2368};
2370 2369
@@ -2456,6 +2455,11 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
2456 &bridge->mode); 2455 &bridge->mode);
2457 } 2456 }
2458 2457
2458 if (bridge->driver->mask_memory == intel_i965_mask_memory)
2459 if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
2460 dev_err(&intel_private.pcidev->dev,
2461 "set gfx device dma mask 36bit failed!\n");
2462
2459 pci_set_drvdata(pdev, bridge); 2463 pci_set_drvdata(pdev, bridge);
2460 return agp_add_bridge(bridge); 2464 return agp_add_bridge(bridge);
2461} 2465}
@@ -2561,6 +2565,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
2561 ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB), 2565 ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
2562 ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB), 2566 ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
2563 ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB), 2567 ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB),
2568 ID(PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB),
2564 { } 2569 { }
2565}; 2570};
2566 2571
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index 737be953cc58..950837cf9e9c 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -1249,7 +1249,7 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
1249 1249
1250 if (keycode >= NR_KEYS) 1250 if (keycode >= NR_KEYS)
1251 if (keycode >= KEY_BRL_DOT1 && keycode <= KEY_BRL_DOT8) 1251 if (keycode >= KEY_BRL_DOT1 && keycode <= KEY_BRL_DOT8)
1252 keysym = K(KT_BRL, keycode - KEY_BRL_DOT1 + 1); 1252 keysym = U(K(KT_BRL, keycode - KEY_BRL_DOT1 + 1));
1253 else 1253 else
1254 return; 1254 return;
1255 else 1255 else
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index c250a31efa53..2db4c0a29b05 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -23,8 +23,6 @@
23 * All rights reserved. Licensed under dual BSD/GPL license. 23 * All rights reserved. Licensed under dual BSD/GPL license.
24 */ 24 */
25 25
26/* #define PCMCIA_DEBUG 6 */
27
28#include <linux/kernel.h> 26#include <linux/kernel.h>
29#include <linux/module.h> 27#include <linux/module.h>
30#include <linux/slab.h> 28#include <linux/slab.h>
@@ -47,18 +45,17 @@
47 45
48/* #define ATR_CSUM */ 46/* #define ATR_CSUM */
49 47
50#ifdef PCMCIA_DEBUG 48#define reader_to_dev(x) (&x->p_dev->dev)
51#define reader_to_dev(x) (&handle_to_dev(x->p_dev)) 49
52static int pc_debug = PCMCIA_DEBUG; 50/* n (debug level) is ignored */
53module_param(pc_debug, int, 0600); 51/* additional debug output may be enabled by re-compiling with
54#define DEBUGP(n, rdr, x, args...) do { \ 52 * CM4000_DEBUG set */
55 if (pc_debug >= (n)) \ 53/* #define CM4000_DEBUG */
56 dev_printk(KERN_DEBUG, reader_to_dev(rdr), "%s:" x, \ 54#define DEBUGP(n, rdr, x, args...) do { \
57 __func__ , ## args); \ 55 dev_dbg(reader_to_dev(rdr), "%s:" x, \
56 __func__ , ## args); \
58 } while (0) 57 } while (0)
59#else 58
60#define DEBUGP(n, rdr, x, args...)
61#endif
62static char *version = "cm4000_cs.c v2.4.0gm6 - All bugs added by Harald Welte"; 59static char *version = "cm4000_cs.c v2.4.0gm6 - All bugs added by Harald Welte";
63 60
64#define T_1SEC (HZ) 61#define T_1SEC (HZ)
@@ -174,14 +171,13 @@ static unsigned char fi_di_table[10][14] = {
174/* 9 */ {0x09,0x19,0x29,0x39,0x49,0x59,0x69,0x11,0x11,0x99,0xA9,0xB9,0xC9,0xD9} 171/* 9 */ {0x09,0x19,0x29,0x39,0x49,0x59,0x69,0x11,0x11,0x99,0xA9,0xB9,0xC9,0xD9}
175}; 172};
176 173
177#ifndef PCMCIA_DEBUG 174#ifndef CM4000_DEBUG
178#define xoutb outb 175#define xoutb outb
179#define xinb inb 176#define xinb inb
180#else 177#else
181static inline void xoutb(unsigned char val, unsigned short port) 178static inline void xoutb(unsigned char val, unsigned short port)
182{ 179{
183 if (pc_debug >= 7) 180 pr_debug("outb(val=%.2x,port=%.4x)\n", val, port);
184 printk(KERN_DEBUG "outb(val=%.2x,port=%.4x)\n", val, port);
185 outb(val, port); 181 outb(val, port);
186} 182}
187static inline unsigned char xinb(unsigned short port) 183static inline unsigned char xinb(unsigned short port)
@@ -189,8 +185,7 @@ static inline unsigned char xinb(unsigned short port)
189 unsigned char val; 185 unsigned char val;
190 186
191 val = inb(port); 187 val = inb(port);
192 if (pc_debug >= 7) 188 pr_debug("%.2x=inb(%.4x)\n", val, port);
193 printk(KERN_DEBUG "%.2x=inb(%.4x)\n", val, port);
194 189
195 return val; 190 return val;
196} 191}
@@ -514,12 +509,10 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
514 for (i = 0; i < 4; i++) { 509 for (i = 0; i < 4; i++) {
515 xoutb(i, REG_BUF_ADDR(iobase)); 510 xoutb(i, REG_BUF_ADDR(iobase));
516 xoutb(dev->pts[i], REG_BUF_DATA(iobase)); /* buf data */ 511 xoutb(dev->pts[i], REG_BUF_DATA(iobase)); /* buf data */
517#ifdef PCMCIA_DEBUG 512#ifdef CM4000_DEBUG
518 if (pc_debug >= 5) 513 pr_debug("0x%.2x ", dev->pts[i]);
519 printk("0x%.2x ", dev->pts[i]);
520 } 514 }
521 if (pc_debug >= 5) 515 pr_debug("\n");
522 printk("\n");
523#else 516#else
524 } 517 }
525#endif 518#endif
@@ -579,14 +572,13 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
579 pts_reply[i] = inb(REG_BUF_DATA(iobase)); 572 pts_reply[i] = inb(REG_BUF_DATA(iobase));
580 } 573 }
581 574
582#ifdef PCMCIA_DEBUG 575#ifdef CM4000_DEBUG
583 DEBUGP(2, dev, "PTSreply: "); 576 DEBUGP(2, dev, "PTSreply: ");
584 for (i = 0; i < num_bytes_read; i++) { 577 for (i = 0; i < num_bytes_read; i++) {
585 if (pc_debug >= 5) 578 pr_debug("0x%.2x ", pts_reply[i]);
586 printk("0x%.2x ", pts_reply[i]);
587 } 579 }
588 printk("\n"); 580 pr_debug("\n");
589#endif /* PCMCIA_DEBUG */ 581#endif /* CM4000_DEBUG */
590 582
591 DEBUGP(5, dev, "Clear Tactive in Flags1\n"); 583 DEBUGP(5, dev, "Clear Tactive in Flags1\n");
592 xoutb(0x20, REG_FLAGS1(iobase)); 584 xoutb(0x20, REG_FLAGS1(iobase));
@@ -655,7 +647,7 @@ static void terminate_monitor(struct cm4000_dev *dev)
655 647
656 DEBUGP(5, dev, "Delete timer\n"); 648 DEBUGP(5, dev, "Delete timer\n");
657 del_timer_sync(&dev->timer); 649 del_timer_sync(&dev->timer);
658#ifdef PCMCIA_DEBUG 650#ifdef CM4000_DEBUG
659 dev->monitor_running = 0; 651 dev->monitor_running = 0;
660#endif 652#endif
661 653
@@ -898,7 +890,7 @@ static void monitor_card(unsigned long p)
898 DEBUGP(4, dev, "ATR checksum (0x%.2x, should " 890 DEBUGP(4, dev, "ATR checksum (0x%.2x, should "
899 "be zero) failed\n", dev->atr_csum); 891 "be zero) failed\n", dev->atr_csum);
900 } 892 }
901#ifdef PCMCIA_DEBUG 893#ifdef CM4000_DEBUG
902 else if (test_bit(IS_BAD_LENGTH, &dev->flags)) { 894 else if (test_bit(IS_BAD_LENGTH, &dev->flags)) {
903 DEBUGP(4, dev, "ATR length error\n"); 895 DEBUGP(4, dev, "ATR length error\n");
904 } else { 896 } else {
@@ -1415,7 +1407,7 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1415 int size; 1407 int size;
1416 int rc; 1408 int rc;
1417 void __user *argp = (void __user *)arg; 1409 void __user *argp = (void __user *)arg;
1418#ifdef PCMCIA_DEBUG 1410#ifdef CM4000_DEBUG
1419 char *ioctl_names[CM_IOC_MAXNR + 1] = { 1411 char *ioctl_names[CM_IOC_MAXNR + 1] = {
1420 [_IOC_NR(CM_IOCGSTATUS)] "CM_IOCGSTATUS", 1412 [_IOC_NR(CM_IOCGSTATUS)] "CM_IOCGSTATUS",
1421 [_IOC_NR(CM_IOCGATR)] "CM_IOCGATR", 1413 [_IOC_NR(CM_IOCGATR)] "CM_IOCGATR",
@@ -1423,9 +1415,9 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1423 [_IOC_NR(CM_IOCSPTS)] "CM_IOCSPTS", 1415 [_IOC_NR(CM_IOCSPTS)] "CM_IOCSPTS",
1424 [_IOC_NR(CM_IOSDBGLVL)] "CM4000_DBGLVL", 1416 [_IOC_NR(CM_IOSDBGLVL)] "CM4000_DBGLVL",
1425 }; 1417 };
1426#endif
1427 DEBUGP(3, dev, "cmm_ioctl(device=%d.%d) %s\n", imajor(inode), 1418 DEBUGP(3, dev, "cmm_ioctl(device=%d.%d) %s\n", imajor(inode),
1428 iminor(inode), ioctl_names[_IOC_NR(cmd)]); 1419 iminor(inode), ioctl_names[_IOC_NR(cmd)]);
1420#endif
1429 1421
1430 lock_kernel(); 1422 lock_kernel();
1431 rc = -ENODEV; 1423 rc = -ENODEV;
@@ -1523,7 +1515,7 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1523 } 1515 }
1524 case CM_IOCARDOFF: 1516 case CM_IOCARDOFF:
1525 1517
1526#ifdef PCMCIA_DEBUG 1518#ifdef CM4000_DEBUG
1527 DEBUGP(4, dev, "... in CM_IOCARDOFF\n"); 1519 DEBUGP(4, dev, "... in CM_IOCARDOFF\n");
1528 if (dev->flags0 & 0x01) { 1520 if (dev->flags0 & 0x01) {
1529 DEBUGP(4, dev, " Card inserted\n"); 1521 DEBUGP(4, dev, " Card inserted\n");
@@ -1625,18 +1617,9 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1625 1617
1626 } 1618 }
1627 break; 1619 break;
1628#ifdef PCMCIA_DEBUG 1620#ifdef CM4000_DEBUG
1629 case CM_IOSDBGLVL: /* set debug log level */ 1621 case CM_IOSDBGLVL:
1630 { 1622 rc = -ENOTTY;
1631 int old_pc_debug = 0;
1632
1633 old_pc_debug = pc_debug;
1634 if (copy_from_user(&pc_debug, argp, sizeof(int)))
1635 rc = -EFAULT;
1636 else if (old_pc_debug != pc_debug)
1637 DEBUGP(0, dev, "Changed debug log level "
1638 "to %i\n", pc_debug);
1639 }
1640 break; 1623 break;
1641#endif 1624#endif
1642 default: 1625 default:
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index 4f0723b07974..a6a70e476bea 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -17,8 +17,6 @@
17 * All rights reserved, Dual BSD/GPL Licensed. 17 * All rights reserved, Dual BSD/GPL Licensed.
18 */ 18 */
19 19
20/* #define PCMCIA_DEBUG 6 */
21
22#include <linux/kernel.h> 20#include <linux/kernel.h>
23#include <linux/module.h> 21#include <linux/module.h>
24#include <linux/slab.h> 22#include <linux/slab.h>
@@ -41,18 +39,16 @@
41#include "cm4040_cs.h" 39#include "cm4040_cs.h"
42 40
43 41
44#ifdef PCMCIA_DEBUG 42#define reader_to_dev(x) (&x->p_dev->dev)
45#define reader_to_dev(x) (&handle_to_dev(x->p_dev)) 43
46static int pc_debug = PCMCIA_DEBUG; 44/* n (debug level) is ignored */
47module_param(pc_debug, int, 0600); 45/* additional debug output may be enabled by re-compiling with
48#define DEBUGP(n, rdr, x, args...) do { \ 46 * CM4040_DEBUG set */
49 if (pc_debug >= (n)) \ 47/* #define CM4040_DEBUG */
50 dev_printk(KERN_DEBUG, reader_to_dev(rdr), "%s:" x, \ 48#define DEBUGP(n, rdr, x, args...) do { \
51 __func__ , ##args); \ 49 dev_dbg(reader_to_dev(rdr), "%s:" x, \
50 __func__ , ## args); \
52 } while (0) 51 } while (0)
53#else
54#define DEBUGP(n, rdr, x, args...)
55#endif
56 52
57static char *version = 53static char *version =
58"OMNIKEY CardMan 4040 v1.1.0gm5 - All bugs added by Harald Welte"; 54"OMNIKEY CardMan 4040 v1.1.0gm5 - All bugs added by Harald Welte";
@@ -90,14 +86,13 @@ struct reader_dev {
90 86
91static struct pcmcia_device *dev_table[CM_MAX_DEV]; 87static struct pcmcia_device *dev_table[CM_MAX_DEV];
92 88
93#ifndef PCMCIA_DEBUG 89#ifndef CM4040_DEBUG
94#define xoutb outb 90#define xoutb outb
95#define xinb inb 91#define xinb inb
96#else 92#else
97static inline void xoutb(unsigned char val, unsigned short port) 93static inline void xoutb(unsigned char val, unsigned short port)
98{ 94{
99 if (pc_debug >= 7) 95 pr_debug("outb(val=%.2x,port=%.4x)\n", val, port);
100 printk(KERN_DEBUG "outb(val=%.2x,port=%.4x)\n", val, port);
101 outb(val, port); 96 outb(val, port);
102} 97}
103 98
@@ -106,8 +101,7 @@ static inline unsigned char xinb(unsigned short port)
106 unsigned char val; 101 unsigned char val;
107 102
108 val = inb(port); 103 val = inb(port);
109 if (pc_debug >= 7) 104 pr_debug("%.2x=inb(%.4x)\n", val, port);
110 printk(KERN_DEBUG "%.2x=inb(%.4x)\n", val, port);
111 return val; 105 return val;
112} 106}
113#endif 107#endif
@@ -260,23 +254,22 @@ static ssize_t cm4040_read(struct file *filp, char __user *buf,
260 return -EIO; 254 return -EIO;
261 } 255 }
262 dev->r_buf[i] = xinb(iobase + REG_OFFSET_BULK_IN); 256 dev->r_buf[i] = xinb(iobase + REG_OFFSET_BULK_IN);
263#ifdef PCMCIA_DEBUG 257#ifdef CM4040_DEBUG
264 if (pc_debug >= 6) 258 pr_debug("%lu:%2x ", i, dev->r_buf[i]);
265 printk(KERN_DEBUG "%lu:%2x ", i, dev->r_buf[i]);
266 } 259 }
267 printk("\n"); 260 pr_debug("\n");
268#else 261#else
269 } 262 }
270#endif 263#endif
271 264
272 bytes_to_read = 5 + le32_to_cpu(*(__le32 *)&dev->r_buf[1]); 265 bytes_to_read = 5 + le32_to_cpu(*(__le32 *)&dev->r_buf[1]);
273 266
274 DEBUGP(6, dev, "BytesToRead=%lu\n", bytes_to_read); 267 DEBUGP(6, dev, "BytesToRead=%zu\n", bytes_to_read);
275 268
276 min_bytes_to_read = min(count, bytes_to_read + 5); 269 min_bytes_to_read = min(count, bytes_to_read + 5);
277 min_bytes_to_read = min_t(size_t, min_bytes_to_read, READ_WRITE_BUFFER_SIZE); 270 min_bytes_to_read = min_t(size_t, min_bytes_to_read, READ_WRITE_BUFFER_SIZE);
278 271
279 DEBUGP(6, dev, "Min=%lu\n", min_bytes_to_read); 272 DEBUGP(6, dev, "Min=%zu\n", min_bytes_to_read);
280 273
281 for (i = 0; i < (min_bytes_to_read-5); i++) { 274 for (i = 0; i < (min_bytes_to_read-5); i++) {
282 rc = wait_for_bulk_in_ready(dev); 275 rc = wait_for_bulk_in_ready(dev);
@@ -288,11 +281,10 @@ static ssize_t cm4040_read(struct file *filp, char __user *buf,
288 return -EIO; 281 return -EIO;
289 } 282 }
290 dev->r_buf[i+5] = xinb(iobase + REG_OFFSET_BULK_IN); 283 dev->r_buf[i+5] = xinb(iobase + REG_OFFSET_BULK_IN);
291#ifdef PCMCIA_DEBUG 284#ifdef CM4040_DEBUG
292 if (pc_debug >= 6) 285 pr_debug("%lu:%2x ", i, dev->r_buf[i]);
293 printk(KERN_DEBUG "%lu:%2x ", i, dev->r_buf[i]);
294 } 286 }
295 printk("\n"); 287 pr_debug("\n");
296#else 288#else
297 } 289 }
298#endif 290#endif
@@ -547,7 +539,7 @@ static int cm4040_config_check(struct pcmcia_device *p_dev,
547 p_dev->io.IOAddrLines = cfg->io.flags & CISTPL_IO_LINES_MASK; 539 p_dev->io.IOAddrLines = cfg->io.flags & CISTPL_IO_LINES_MASK;
548 540
549 rc = pcmcia_request_io(p_dev, &p_dev->io); 541 rc = pcmcia_request_io(p_dev, &p_dev->io);
550 dev_printk(KERN_INFO, &handle_to_dev(p_dev), 542 dev_printk(KERN_INFO, &p_dev->dev,
551 "pcmcia_request_io returned 0x%x\n", rc); 543 "pcmcia_request_io returned 0x%x\n", rc);
552 return rc; 544 return rc;
553} 545}
@@ -569,7 +561,7 @@ static int reader_config(struct pcmcia_device *link, int devno)
569 561
570 fail_rc = pcmcia_request_configuration(link, &link->conf); 562 fail_rc = pcmcia_request_configuration(link, &link->conf);
571 if (fail_rc != 0) { 563 if (fail_rc != 0) {
572 dev_printk(KERN_INFO, &handle_to_dev(link), 564 dev_printk(KERN_INFO, &link->dev,
573 "pcmcia_request_configuration failed 0x%x\n", 565 "pcmcia_request_configuration failed 0x%x\n",
574 fail_rc); 566 fail_rc);
575 goto cs_release; 567 goto cs_release;
diff --git a/drivers/char/pcmcia/ipwireless/hardware.c b/drivers/char/pcmcia/ipwireless/hardware.c
index 4c1820cad712..99cffdab1056 100644
--- a/drivers/char/pcmcia/ipwireless/hardware.c
+++ b/drivers/char/pcmcia/ipwireless/hardware.c
@@ -1213,12 +1213,12 @@ static irqreturn_t ipwireless_handle_v2_v3_interrupt(int irq,
1213 1213
1214irqreturn_t ipwireless_interrupt(int irq, void *dev_id) 1214irqreturn_t ipwireless_interrupt(int irq, void *dev_id)
1215{ 1215{
1216 struct ipw_hardware *hw = dev_id; 1216 struct ipw_dev *ipw = dev_id;
1217 1217
1218 if (hw->hw_version == HW_VERSION_1) 1218 if (ipw->hardware->hw_version == HW_VERSION_1)
1219 return ipwireless_handle_v1_interrupt(irq, hw); 1219 return ipwireless_handle_v1_interrupt(irq, ipw->hardware);
1220 else 1220 else
1221 return ipwireless_handle_v2_v3_interrupt(irq, hw); 1221 return ipwireless_handle_v2_v3_interrupt(irq, ipw->hardware);
1222} 1222}
1223 1223
1224static void flush_packets_to_hw(struct ipw_hardware *hw) 1224static void flush_packets_to_hw(struct ipw_hardware *hw)
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c
index 5216fce0c62d..dff24dae1485 100644
--- a/drivers/char/pcmcia/ipwireless/main.c
+++ b/drivers/char/pcmcia/ipwireless/main.c
@@ -65,10 +65,7 @@ static void signalled_reboot_work(struct work_struct *work_reboot)
65 struct ipw_dev *ipw = container_of(work_reboot, struct ipw_dev, 65 struct ipw_dev *ipw = container_of(work_reboot, struct ipw_dev,
66 work_reboot); 66 work_reboot);
67 struct pcmcia_device *link = ipw->link; 67 struct pcmcia_device *link = ipw->link;
68 int ret = pcmcia_reset_card(link->socket); 68 pcmcia_reset_card(link->socket);
69
70 if (ret != 0)
71 cs_error(link, ResetCard, ret);
72} 69}
73 70
74static void signalled_reboot_callback(void *callback_data) 71static void signalled_reboot_callback(void *callback_data)
@@ -79,208 +76,127 @@ static void signalled_reboot_callback(void *callback_data)
79 schedule_work(&ipw->work_reboot); 76 schedule_work(&ipw->work_reboot);
80} 77}
81 78
82static int config_ipwireless(struct ipw_dev *ipw) 79static int ipwireless_probe(struct pcmcia_device *p_dev,
80 cistpl_cftable_entry_t *cfg,
81 cistpl_cftable_entry_t *dflt,
82 unsigned int vcc,
83 void *priv_data)
83{ 84{
84 struct pcmcia_device *link = ipw->link; 85 struct ipw_dev *ipw = priv_data;
85 int ret; 86 struct resource *io_resource;
86 tuple_t tuple;
87 unsigned short buf[64];
88 cisparse_t parse;
89 unsigned short cor_value;
90 memreq_t memreq_attr_memory; 87 memreq_t memreq_attr_memory;
91 memreq_t memreq_common_memory; 88 memreq_t memreq_common_memory;
89 int ret;
92 90
93 ipw->is_v2_card = 0; 91 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
94 92 p_dev->io.BasePort1 = cfg->io.win[0].base;
95 tuple.Attributes = 0; 93 p_dev->io.NumPorts1 = cfg->io.win[0].len;
96 tuple.TupleData = (cisdata_t *) buf; 94 p_dev->io.IOAddrLines = 16;
97 tuple.TupleDataMax = sizeof(buf);
98 tuple.TupleOffset = 0;
99
100 tuple.DesiredTuple = RETURN_FIRST_TUPLE;
101
102 ret = pcmcia_get_first_tuple(link, &tuple);
103
104 while (ret == 0) {
105 ret = pcmcia_get_tuple_data(link, &tuple);
106
107 if (ret != 0) {
108 cs_error(link, GetTupleData, ret);
109 goto exit0;
110 }
111 ret = pcmcia_get_next_tuple(link, &tuple);
112 }
113
114 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
115
116 ret = pcmcia_get_first_tuple(link, &tuple);
117
118 if (ret != 0) {
119 cs_error(link, GetFirstTuple, ret);
120 goto exit0;
121 }
122
123 ret = pcmcia_get_tuple_data(link, &tuple);
124
125 if (ret != 0) {
126 cs_error(link, GetTupleData, ret);
127 goto exit0;
128 }
129
130 ret = pcmcia_parse_tuple(&tuple, &parse);
131
132 if (ret != 0) {
133 cs_error(link, ParseTuple, ret);
134 goto exit0;
135 }
136
137 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
138 link->io.BasePort1 = parse.cftable_entry.io.win[0].base;
139 link->io.NumPorts1 = parse.cftable_entry.io.win[0].len;
140 link->io.IOAddrLines = 16;
141
142 link->irq.IRQInfo1 = parse.cftable_entry.irq.IRQInfo1;
143 95
144 /* 0x40 causes it to generate level mode interrupts. */ 96 /* 0x40 causes it to generate level mode interrupts. */
145 /* 0x04 enables IREQ pin. */ 97 /* 0x04 enables IREQ pin. */
146 cor_value = parse.cftable_entry.index | 0x44; 98 p_dev->conf.ConfigIndex = cfg->index | 0x44;
147 link->conf.ConfigIndex = cor_value; 99 ret = pcmcia_request_io(p_dev, &p_dev->io);
100 if (ret)
101 return ret;
148 102
149 /* IRQ and I/O settings */ 103 io_resource = request_region(p_dev->io.BasePort1, p_dev->io.NumPorts1,
150 tuple.DesiredTuple = CISTPL_CONFIG; 104 IPWIRELESS_PCCARD_NAME);
151 105
152 ret = pcmcia_get_first_tuple(link, &tuple); 106 if (cfg->mem.nwin == 0)
107 return 0;
153 108
154 if (ret != 0) { 109 ipw->request_common_memory.Attributes =
155 cs_error(link, GetFirstTuple, ret); 110 WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE;
156 goto exit0; 111 ipw->request_common_memory.Base = cfg->mem.win[0].host_addr;
157 } 112 ipw->request_common_memory.Size = cfg->mem.win[0].len;
113 if (ipw->request_common_memory.Size < 0x1000)
114 ipw->request_common_memory.Size = 0x1000;
115 ipw->request_common_memory.AccessSpeed = 0;
158 116
159 ret = pcmcia_get_tuple_data(link, &tuple); 117 ret = pcmcia_request_window(p_dev, &ipw->request_common_memory,
160 118 &ipw->handle_common_memory);
161 if (ret != 0) {
162 cs_error(link, GetTupleData, ret);
163 goto exit0;
164 }
165 119
166 ret = pcmcia_parse_tuple(&tuple, &parse); 120 if (ret != 0)
121 goto exit1;
167 122
168 if (ret != 0) { 123 memreq_common_memory.CardOffset = cfg->mem.win[0].card_addr;
169 cs_error(link, GetTupleData, ret); 124 memreq_common_memory.Page = 0;
170 goto exit0;
171 }
172 link->conf.Attributes = CONF_ENABLE_IRQ;
173 link->conf.ConfigBase = parse.config.base;
174 link->conf.Present = parse.config.rmask[0];
175 link->conf.IntType = INT_MEMORY_AND_IO;
176 125
177 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 126 ret = pcmcia_map_mem_page(p_dev, ipw->handle_common_memory,
178 link->irq.Handler = ipwireless_interrupt; 127 &memreq_common_memory);
179 link->irq.Instance = ipw->hardware;
180 128
181 ret = pcmcia_request_io(link, &link->io); 129 if (ret != 0)
130 goto exit2;
182 131
183 if (ret != 0) { 132 ipw->is_v2_card = cfg->mem.win[0].len == 0x100;
184 cs_error(link, RequestIO, ret);
185 goto exit0;
186 }
187 133
188 request_region(link->io.BasePort1, link->io.NumPorts1, 134 ipw->common_memory = ioremap(ipw->request_common_memory.Base,
135 ipw->request_common_memory.Size);
136 request_mem_region(ipw->request_common_memory.Base,
137 ipw->request_common_memory.Size,
189 IPWIRELESS_PCCARD_NAME); 138 IPWIRELESS_PCCARD_NAME);
190 139
191 /* memory settings */ 140 ipw->request_attr_memory.Attributes =
192 141 WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM | WIN_ENABLE;
193 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 142 ipw->request_attr_memory.Base = 0;
194 143 ipw->request_attr_memory.Size = 0; /* this used to be 0x1000 */
195 ret = pcmcia_get_first_tuple(link, &tuple); 144 ipw->request_attr_memory.AccessSpeed = 0;
196
197 if (ret != 0) {
198 cs_error(link, GetFirstTuple, ret);
199 goto exit1;
200 }
201
202 ret = pcmcia_get_tuple_data(link, &tuple);
203 145
204 if (ret != 0) { 146 ret = pcmcia_request_window(p_dev, &ipw->request_attr_memory,
205 cs_error(link, GetTupleData, ret); 147 &ipw->handle_attr_memory);
206 goto exit1;
207 }
208
209 ret = pcmcia_parse_tuple(&tuple, &parse);
210
211 if (ret != 0) {
212 cs_error(link, ParseTuple, ret);
213 goto exit1;
214 }
215 148
216 if (parse.cftable_entry.mem.nwin > 0) { 149 if (ret != 0)
217 ipw->request_common_memory.Attributes = 150 goto exit2;
218 WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE;
219 ipw->request_common_memory.Base =
220 parse.cftable_entry.mem.win[0].host_addr;
221 ipw->request_common_memory.Size = parse.cftable_entry.mem.win[0].len;
222 if (ipw->request_common_memory.Size < 0x1000)
223 ipw->request_common_memory.Size = 0x1000;
224 ipw->request_common_memory.AccessSpeed = 0;
225
226 ret = pcmcia_request_window(&link, &ipw->request_common_memory,
227 &ipw->handle_common_memory);
228 151
229 if (ret != 0) { 152 memreq_attr_memory.CardOffset = 0;
230 cs_error(link, RequestWindow, ret); 153 memreq_attr_memory.Page = 0;
231 goto exit1;
232 }
233 154
234 memreq_common_memory.CardOffset = 155 ret = pcmcia_map_mem_page(p_dev, ipw->handle_attr_memory,
235 parse.cftable_entry.mem.win[0].card_addr; 156 &memreq_attr_memory);
236 memreq_common_memory.Page = 0;
237 157
238 ret = pcmcia_map_mem_page(ipw->handle_common_memory, 158 if (ret != 0)
239 &memreq_common_memory); 159 goto exit3;
240 160
241 if (ret != 0) { 161 ipw->attr_memory = ioremap(ipw->request_attr_memory.Base,
242 cs_error(link, MapMemPage, ret); 162 ipw->request_attr_memory.Size);
243 goto exit1; 163 request_mem_region(ipw->request_attr_memory.Base,
244 } 164 ipw->request_attr_memory.Size, IPWIRELESS_PCCARD_NAME);
245 165
246 ipw->is_v2_card = 166 return 0;
247 parse.cftable_entry.mem.win[0].len == 0x100;
248 167
249 ipw->common_memory = ioremap(ipw->request_common_memory.Base, 168exit3:
169 pcmcia_release_window(p_dev, ipw->handle_attr_memory);
170exit2:
171 if (ipw->common_memory) {
172 release_mem_region(ipw->request_common_memory.Base,
250 ipw->request_common_memory.Size); 173 ipw->request_common_memory.Size);
251 request_mem_region(ipw->request_common_memory.Base, 174 iounmap(ipw->common_memory);
252 ipw->request_common_memory.Size, IPWIRELESS_PCCARD_NAME); 175 pcmcia_release_window(p_dev, ipw->handle_common_memory);
253 176 } else
254 ipw->request_attr_memory.Attributes = 177 pcmcia_release_window(p_dev, ipw->handle_common_memory);
255 WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM | WIN_ENABLE; 178exit1:
256 ipw->request_attr_memory.Base = 0; 179 release_resource(io_resource);
257 ipw->request_attr_memory.Size = 0; /* this used to be 0x1000 */ 180 pcmcia_disable_device(p_dev);
258 ipw->request_attr_memory.AccessSpeed = 0; 181 return -1;
259 182}
260 ret = pcmcia_request_window(&link, &ipw->request_attr_memory,
261 &ipw->handle_attr_memory);
262 183
263 if (ret != 0) { 184static int config_ipwireless(struct ipw_dev *ipw)
264 cs_error(link, RequestWindow, ret); 185{
265 goto exit2; 186 struct pcmcia_device *link = ipw->link;
266 } 187 int ret = 0;
267 188
268 memreq_attr_memory.CardOffset = 0; 189 ipw->is_v2_card = 0;
269 memreq_attr_memory.Page = 0;
270 190
271 ret = pcmcia_map_mem_page(ipw->handle_attr_memory, 191 ret = pcmcia_loop_config(link, ipwireless_probe, ipw);
272 &memreq_attr_memory); 192 if (ret != 0)
193 return ret;
273 194
274 if (ret != 0) { 195 link->conf.Attributes = CONF_ENABLE_IRQ;
275 cs_error(link, MapMemPage, ret); 196 link->conf.IntType = INT_MEMORY_AND_IO;
276 goto exit2;
277 }
278 197
279 ipw->attr_memory = ioremap(ipw->request_attr_memory.Base, 198 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
280 ipw->request_attr_memory.Size); 199 link->irq.Handler = ipwireless_interrupt;
281 request_mem_region(ipw->request_attr_memory.Base, ipw->request_attr_memory.Size,
282 IPWIRELESS_PCCARD_NAME);
283 }
284 200
285 INIT_WORK(&ipw->work_reboot, signalled_reboot_work); 201 INIT_WORK(&ipw->work_reboot, signalled_reboot_work);
286 202
@@ -291,10 +207,8 @@ static int config_ipwireless(struct ipw_dev *ipw)
291 207
292 ret = pcmcia_request_irq(link, &link->irq); 208 ret = pcmcia_request_irq(link, &link->irq);
293 209
294 if (ret != 0) { 210 if (ret != 0)
295 cs_error(link, RequestIRQ, ret); 211 goto exit;
296 goto exit3;
297 }
298 212
299 printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": Card type %s\n", 213 printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": Card type %s\n",
300 ipw->is_v2_card ? "V2/V3" : "V1"); 214 ipw->is_v2_card ? "V2/V3" : "V1");
@@ -316,12 +230,12 @@ static int config_ipwireless(struct ipw_dev *ipw)
316 230
317 ipw->network = ipwireless_network_create(ipw->hardware); 231 ipw->network = ipwireless_network_create(ipw->hardware);
318 if (!ipw->network) 232 if (!ipw->network)
319 goto exit3; 233 goto exit;
320 234
321 ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network, 235 ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network,
322 ipw->nodes); 236 ipw->nodes);
323 if (!ipw->tty) 237 if (!ipw->tty)
324 goto exit3; 238 goto exit;
325 239
326 ipwireless_init_hardware_v2_v3(ipw->hardware); 240 ipwireless_init_hardware_v2_v3(ipw->hardware);
327 241
@@ -331,35 +245,27 @@ static int config_ipwireless(struct ipw_dev *ipw)
331 */ 245 */
332 ret = pcmcia_request_configuration(link, &link->conf); 246 ret = pcmcia_request_configuration(link, &link->conf);
333 247
334 if (ret != 0) { 248 if (ret != 0)
335 cs_error(link, RequestConfiguration, ret); 249 goto exit;
336 goto exit4;
337 }
338 250
339 link->dev_node = &ipw->nodes[0]; 251 link->dev_node = &ipw->nodes[0];
340 252
341 return 0; 253 return 0;
342 254
343exit4: 255exit:
344 pcmcia_disable_device(link);
345exit3:
346 if (ipw->attr_memory) { 256 if (ipw->attr_memory) {
347 release_mem_region(ipw->request_attr_memory.Base, 257 release_mem_region(ipw->request_attr_memory.Base,
348 ipw->request_attr_memory.Size); 258 ipw->request_attr_memory.Size);
349 iounmap(ipw->attr_memory); 259 iounmap(ipw->attr_memory);
350 pcmcia_release_window(ipw->handle_attr_memory); 260 pcmcia_release_window(link, ipw->handle_attr_memory);
351 pcmcia_disable_device(link);
352 } 261 }
353exit2:
354 if (ipw->common_memory) { 262 if (ipw->common_memory) {
355 release_mem_region(ipw->request_common_memory.Base, 263 release_mem_region(ipw->request_common_memory.Base,
356 ipw->request_common_memory.Size); 264 ipw->request_common_memory.Size);
357 iounmap(ipw->common_memory); 265 iounmap(ipw->common_memory);
358 pcmcia_release_window(ipw->handle_common_memory); 266 pcmcia_release_window(link, ipw->handle_common_memory);
359 } 267 }
360exit1:
361 pcmcia_disable_device(link); 268 pcmcia_disable_device(link);
362exit0:
363 return -1; 269 return -1;
364} 270}
365 271
@@ -378,9 +284,9 @@ static void release_ipwireless(struct ipw_dev *ipw)
378 iounmap(ipw->attr_memory); 284 iounmap(ipw->attr_memory);
379 } 285 }
380 if (ipw->common_memory) 286 if (ipw->common_memory)
381 pcmcia_release_window(ipw->handle_common_memory); 287 pcmcia_release_window(ipw->link, ipw->handle_common_memory);
382 if (ipw->attr_memory) 288 if (ipw->attr_memory)
383 pcmcia_release_window(ipw->handle_attr_memory); 289 pcmcia_release_window(ipw->link, ipw->handle_attr_memory);
384 290
385 /* Break the link with Card Services */ 291 /* Break the link with Card Services */
386 pcmcia_disable_device(ipw->link); 292 pcmcia_disable_device(ipw->link);
@@ -406,7 +312,6 @@ static int ipwireless_attach(struct pcmcia_device *link)
406 312
407 ipw->link = link; 313 ipw->link = link;
408 link->priv = ipw; 314 link->priv = ipw;
409 link->irq.Instance = ipw;
410 315
411 /* Link this device into our device list. */ 316 /* Link this device into our device list. */
412 link->dev_node = &ipw->nodes[0]; 317 link->dev_node = &ipw->nodes[0];
@@ -421,7 +326,6 @@ static int ipwireless_attach(struct pcmcia_device *link)
421 ret = config_ipwireless(ipw); 326 ret = config_ipwireless(ipw);
422 327
423 if (ret != 0) { 328 if (ret != 0) {
424 cs_error(link, RegisterClient, ret);
425 ipwireless_detach(link); 329 ipwireless_detach(link);
426 return ret; 330 return ret;
427 } 331 }
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index caf6e4d19469..c31a0d913d37 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -554,7 +554,6 @@ static int mgslpc_probe(struct pcmcia_device *link)
554 554
555 /* Interrupt setup */ 555 /* Interrupt setup */
556 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 556 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
557 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
558 link->irq.Handler = NULL; 557 link->irq.Handler = NULL;
559 558
560 link->conf.Attributes = 0; 559 link->conf.Attributes = 0;
@@ -572,69 +571,51 @@ static int mgslpc_probe(struct pcmcia_device *link)
572/* Card has been inserted. 571/* Card has been inserted.
573 */ 572 */
574 573
575#define CS_CHECK(fn, ret) \ 574static int mgslpc_ioprobe(struct pcmcia_device *p_dev,
576do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 575 cistpl_cftable_entry_t *cfg,
576 cistpl_cftable_entry_t *dflt,
577 unsigned int vcc,
578 void *priv_data)
579{
580 if (cfg->io.nwin > 0) {
581 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
582 if (!(cfg->io.flags & CISTPL_IO_8BIT))
583 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
584 if (!(cfg->io.flags & CISTPL_IO_16BIT))
585 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
586 p_dev->io.IOAddrLines = cfg->io.flags & CISTPL_IO_LINES_MASK;
587 p_dev->io.BasePort1 = cfg->io.win[0].base;
588 p_dev->io.NumPorts1 = cfg->io.win[0].len;
589 return pcmcia_request_io(p_dev, &p_dev->io);
590 }
591 return -ENODEV;
592}
577 593
578static int mgslpc_config(struct pcmcia_device *link) 594static int mgslpc_config(struct pcmcia_device *link)
579{ 595{
580 MGSLPC_INFO *info = link->priv; 596 MGSLPC_INFO *info = link->priv;
581 tuple_t tuple; 597 int ret;
582 cisparse_t parse;
583 int last_fn, last_ret;
584 u_char buf[64];
585 cistpl_cftable_entry_t dflt = { 0 };
586 cistpl_cftable_entry_t *cfg;
587 598
588 if (debug_level >= DEBUG_LEVEL_INFO) 599 if (debug_level >= DEBUG_LEVEL_INFO)
589 printk("mgslpc_config(0x%p)\n", link); 600 printk("mgslpc_config(0x%p)\n", link);
590 601
591 tuple.Attributes = 0; 602 ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL);
592 tuple.TupleData = buf; 603 if (ret != 0)
593 tuple.TupleDataMax = sizeof(buf); 604 goto failed;
594 tuple.TupleOffset = 0;
595
596 /* get CIS configuration entry */
597
598 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
599 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
600
601 cfg = &(parse.cftable_entry);
602 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
603 CS_CHECK(ParseTuple, pcmcia_parse_tuple(&tuple, &parse));
604
605 if (cfg->flags & CISTPL_CFTABLE_DEFAULT) dflt = *cfg;
606 if (cfg->index == 0)
607 goto cs_failed;
608
609 link->conf.ConfigIndex = cfg->index;
610 link->conf.Attributes |= CONF_ENABLE_IRQ;
611
612 /* IO window settings */
613 link->io.NumPorts1 = 0;
614 if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
615 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io;
616 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
617 if (!(io->flags & CISTPL_IO_8BIT))
618 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
619 if (!(io->flags & CISTPL_IO_16BIT))
620 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
621 link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
622 link->io.BasePort1 = io->win[0].base;
623 link->io.NumPorts1 = io->win[0].len;
624 CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
625 }
626 605
627 link->conf.Attributes = CONF_ENABLE_IRQ; 606 link->conf.Attributes = CONF_ENABLE_IRQ;
628 link->conf.IntType = INT_MEMORY_AND_IO; 607 link->conf.IntType = INT_MEMORY_AND_IO;
629 link->conf.ConfigIndex = 8; 608 link->conf.ConfigIndex = 8;
630 link->conf.Present = PRESENT_OPTION; 609 link->conf.Present = PRESENT_OPTION;
631 610
632 link->irq.Attributes |= IRQ_HANDLE_PRESENT;
633 link->irq.Handler = mgslpc_isr; 611 link->irq.Handler = mgslpc_isr;
634 link->irq.Instance = info;
635 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
636 612
637 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 613 ret = pcmcia_request_irq(link, &link->irq);
614 if (ret)
615 goto failed;
616 ret = pcmcia_request_configuration(link, &link->conf);
617 if (ret)
618 goto failed;
638 619
639 info->io_base = link->io.BasePort1; 620 info->io_base = link->io.BasePort1;
640 info->irq_level = link->irq.AssignedIRQ; 621 info->irq_level = link->irq.AssignedIRQ;
@@ -654,8 +635,7 @@ static int mgslpc_config(struct pcmcia_device *link)
654 printk("\n"); 635 printk("\n");
655 return 0; 636 return 0;
656 637
657cs_failed: 638failed:
658 cs_error(link, last_fn, last_ret);
659 mgslpc_release((u_long)link); 639 mgslpc_release((u_long)link);
660 return -ENODEV; 640 return -ENODEV;
661} 641}
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 47c2d2763456..f06bb37defb1 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -31,7 +31,7 @@
31 31
32enum tpm_const { 32enum tpm_const {
33 TPM_MINOR = 224, /* officially assigned */ 33 TPM_MINOR = 224, /* officially assigned */
34 TPM_BUFSIZE = 2048, 34 TPM_BUFSIZE = 4096,
35 TPM_NUM_DEVICES = 256, 35 TPM_NUM_DEVICES = 256,
36}; 36};
37 37
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 0b73e4ec1add..2405f17b29dd 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -257,6 +257,10 @@ out:
257 return size; 257 return size;
258} 258}
259 259
260static int itpm;
261module_param(itpm, bool, 0444);
262MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
263
260/* 264/*
261 * If interrupts are used (signaled by an irq set in the vendor structure) 265 * If interrupts are used (signaled by an irq set in the vendor structure)
262 * tpm.c can skip polling for the data to be available as the interrupt is 266 * tpm.c can skip polling for the data to be available as the interrupt is
@@ -293,7 +297,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
293 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, 297 wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
294 &chip->vendor.int_queue); 298 &chip->vendor.int_queue);
295 status = tpm_tis_status(chip); 299 status = tpm_tis_status(chip);
296 if ((status & TPM_STS_DATA_EXPECT) == 0) { 300 if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
297 rc = -EIO; 301 rc = -EIO;
298 goto out_err; 302 goto out_err;
299 } 303 }
@@ -467,6 +471,10 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
467 "1.2 TPM (device-id 0x%X, rev-id %d)\n", 471 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
468 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); 472 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
469 473
474 if (itpm)
475 dev_info(dev, "Intel iTPM workaround enabled\n");
476
477
470 /* Figure out the capabilities */ 478 /* Figure out the capabilities */
471 intfcaps = 479 intfcaps =
472 ioread32(chip->vendor.iobase + 480 ioread32(chip->vendor.iobase +
@@ -629,6 +637,7 @@ static struct pnp_device_id tpm_pnp_tbl[] __devinitdata = {
629 {"", 0}, /* User Specified */ 637 {"", 0}, /* User Specified */
630 {"", 0} /* Terminator */ 638 {"", 0} /* Terminator */
631}; 639};
640MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
632 641
633static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev) 642static __devexit void tpm_tis_pnp_remove(struct pnp_dev *dev)
634{ 643{
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c
index a4bbb28f10be..c63f3d33914a 100644
--- a/drivers/char/tty_port.c
+++ b/drivers/char/tty_port.c
@@ -219,8 +219,14 @@ int tty_port_block_til_ready(struct tty_port *port,
219 219
220 /* if non-blocking mode is set we can pass directly to open unless 220 /* if non-blocking mode is set we can pass directly to open unless
221 the port has just hung up or is in another error state */ 221 the port has just hung up or is in another error state */
222 if ((filp->f_flags & O_NONBLOCK) || 222 if (tty->flags & (1 << TTY_IO_ERROR)) {
223 (tty->flags & (1 << TTY_IO_ERROR))) { 223 port->flags |= ASYNC_NORMAL_ACTIVE;
224 return 0;
225 }
226 if (filp->f_flags & O_NONBLOCK) {
227 /* Indicate we are open */
228 if (tty->termios->c_cflag & CBAUD)
229 tty_port_raise_dtr_rts(port);
224 port->flags |= ASYNC_NORMAL_ACTIVE; 230 port->flags |= ASYNC_NORMAL_ACTIVE;
225 return 0; 231 return 0;
226 } 232 }
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index ed86d3bf249a..6aa10284104a 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -103,8 +103,8 @@ void vt_event_post(unsigned int event, unsigned int old, unsigned int new)
103 ve->event.event = event; 103 ve->event.event = event;
104 /* kernel view is consoles 0..n-1, user space view is 104 /* kernel view is consoles 0..n-1, user space view is
105 console 1..n with 0 meaning current, so we must bias */ 105 console 1..n with 0 meaning current, so we must bias */
106 ve->event.old = old + 1; 106 ve->event.oldev = old + 1;
107 ve->event.new = new + 1; 107 ve->event.newev = new + 1;
108 wake = 1; 108 wake = 1;
109 ve->done = 1; 109 ve->done = 1;
110 } 110 }
@@ -186,7 +186,7 @@ int vt_waitactive(int n)
186 vt_event_wait(&vw); 186 vt_event_wait(&vw);
187 if (vw.done == 0) 187 if (vw.done == 0)
188 return -EINTR; 188 return -EINTR;
189 } while (vw.event.new != n); 189 } while (vw.event.newev != n);
190 return 0; 190 return 0;
191} 191}
192 192
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 3938c7817095..ff57c40e9b8b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -41,7 +41,7 @@ static struct cpufreq_driver *cpufreq_driver;
41static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); 41static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
42#ifdef CONFIG_HOTPLUG_CPU 42#ifdef CONFIG_HOTPLUG_CPU
43/* This one keeps track of the previously set governor of a removed CPU */ 43/* This one keeps track of the previously set governor of a removed CPU */
44static DEFINE_PER_CPU(struct cpufreq_governor *, cpufreq_cpu_governor); 44static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
45#endif 45#endif
46static DEFINE_SPINLOCK(cpufreq_driver_lock); 46static DEFINE_SPINLOCK(cpufreq_driver_lock);
47 47
@@ -774,10 +774,12 @@ int cpufreq_add_dev_policy(unsigned int cpu, struct cpufreq_policy *policy,
774#ifdef CONFIG_SMP 774#ifdef CONFIG_SMP
775 unsigned long flags; 775 unsigned long flags;
776 unsigned int j; 776 unsigned int j;
777
778#ifdef CONFIG_HOTPLUG_CPU 777#ifdef CONFIG_HOTPLUG_CPU
779 if (per_cpu(cpufreq_cpu_governor, cpu)) { 778 struct cpufreq_governor *gov;
780 policy->governor = per_cpu(cpufreq_cpu_governor, cpu); 779
780 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
781 if (gov) {
782 policy->governor = gov;
781 dprintk("Restoring governor %s for cpu %d\n", 783 dprintk("Restoring governor %s for cpu %d\n",
782 policy->governor->name, cpu); 784 policy->governor->name, cpu);
783 } 785 }
@@ -949,10 +951,13 @@ err_out_kobj_put:
949static int cpufreq_add_dev(struct sys_device *sys_dev) 951static int cpufreq_add_dev(struct sys_device *sys_dev)
950{ 952{
951 unsigned int cpu = sys_dev->id; 953 unsigned int cpu = sys_dev->id;
952 int ret = 0; 954 int ret = 0, found = 0;
953 struct cpufreq_policy *policy; 955 struct cpufreq_policy *policy;
954 unsigned long flags; 956 unsigned long flags;
955 unsigned int j; 957 unsigned int j;
958#ifdef CONFIG_HOTPLUG_CPU
959 int sibling;
960#endif
956 961
957 if (cpu_is_offline(cpu)) 962 if (cpu_is_offline(cpu))
958 return 0; 963 return 0;
@@ -999,7 +1004,19 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
999 INIT_WORK(&policy->update, handle_update); 1004 INIT_WORK(&policy->update, handle_update);
1000 1005
1001 /* Set governor before ->init, so that driver could check it */ 1006 /* Set governor before ->init, so that driver could check it */
1002 policy->governor = CPUFREQ_DEFAULT_GOVERNOR; 1007#ifdef CONFIG_HOTPLUG_CPU
1008 for_each_online_cpu(sibling) {
1009 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
1010 if (cp && cp->governor &&
1011 (cpumask_test_cpu(cpu, cp->related_cpus))) {
1012 policy->governor = cp->governor;
1013 found = 1;
1014 break;
1015 }
1016 }
1017#endif
1018 if (!found)
1019 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1003 /* call driver. From then on the cpufreq must be able 1020 /* call driver. From then on the cpufreq must be able
1004 * to accept all calls to ->verify and ->setpolicy for this CPU 1021 * to accept all calls to ->verify and ->setpolicy for this CPU
1005 */ 1022 */
@@ -1111,7 +1128,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1111#ifdef CONFIG_SMP 1128#ifdef CONFIG_SMP
1112 1129
1113#ifdef CONFIG_HOTPLUG_CPU 1130#ifdef CONFIG_HOTPLUG_CPU
1114 per_cpu(cpufreq_cpu_governor, cpu) = data->governor; 1131 strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
1132 CPUFREQ_NAME_LEN);
1115#endif 1133#endif
1116 1134
1117 /* if we have other CPUs still registered, we need to unlink them, 1135 /* if we have other CPUs still registered, we need to unlink them,
@@ -1135,7 +1153,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1135 continue; 1153 continue;
1136 dprintk("removing link for cpu %u\n", j); 1154 dprintk("removing link for cpu %u\n", j);
1137#ifdef CONFIG_HOTPLUG_CPU 1155#ifdef CONFIG_HOTPLUG_CPU
1138 per_cpu(cpufreq_cpu_governor, j) = data->governor; 1156 strncpy(per_cpu(cpufreq_cpu_governor, j),
1157 data->governor->name, CPUFREQ_NAME_LEN);
1139#endif 1158#endif
1140 cpu_sys_dev = get_cpu_sysdev(j); 1159 cpu_sys_dev = get_cpu_sysdev(j);
1141 sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq"); 1160 sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
@@ -1606,9 +1625,22 @@ EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1606 1625
1607void cpufreq_unregister_governor(struct cpufreq_governor *governor) 1626void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1608{ 1627{
1628#ifdef CONFIG_HOTPLUG_CPU
1629 int cpu;
1630#endif
1631
1609 if (!governor) 1632 if (!governor)
1610 return; 1633 return;
1611 1634
1635#ifdef CONFIG_HOTPLUG_CPU
1636 for_each_present_cpu(cpu) {
1637 if (cpu_online(cpu))
1638 continue;
1639 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1640 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1641 }
1642#endif
1643
1612 mutex_lock(&cpufreq_governor_mutex); 1644 mutex_lock(&cpufreq_governor_mutex);
1613 list_del(&governor->governor_list); 1645 list_del(&governor->governor_list);
1614 mutex_unlock(&cpufreq_governor_mutex); 1646 mutex_unlock(&cpufreq_governor_mutex);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index bc33ddc9c97c..c7b081b839ff 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -116,9 +116,9 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
116 116
117 idle_time = cputime64_sub(cur_wall_time, busy_time); 117 idle_time = cputime64_sub(cur_wall_time, busy_time);
118 if (wall) 118 if (wall)
119 *wall = cur_wall_time; 119 *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
120 120
121 return idle_time; 121 return (cputime64_t)jiffies_to_usecs(idle_time);;
122} 122}
123 123
124static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) 124static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 071699de50ee..4b34ade2332b 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -133,9 +133,9 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
133 133
134 idle_time = cputime64_sub(cur_wall_time, busy_time); 134 idle_time = cputime64_sub(cur_wall_time, busy_time);
135 if (wall) 135 if (wall)
136 *wall = cur_wall_time; 136 *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
137 137
138 return idle_time; 138 return (cputime64_t)jiffies_to_usecs(idle_time);
139} 139}
140 140
141static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) 141static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index a9952b1236b0..84c51e177269 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -236,7 +236,7 @@ static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
236 /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data. 236 /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
237 * We could avoid some copying here but it's probably not worth it. 237 * We could avoid some copying here but it's probably not worth it.
238 */ 238 */
239 if (unlikely(((unsigned long)in & PAGE_SIZE) + ecb_fetch_bytes > PAGE_SIZE)) { 239 if (unlikely(((unsigned long)in & ~PAGE_MASK) + ecb_fetch_bytes > PAGE_SIZE)) {
240 ecb_crypt_copy(in, out, key, cword, count); 240 ecb_crypt_copy(in, out, key, cword, count);
241 return; 241 return;
242 } 242 }
@@ -248,7 +248,7 @@ static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
248 u8 *iv, struct cword *cword, int count) 248 u8 *iv, struct cword *cword, int count)
249{ 249{
250 /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */ 250 /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
251 if (unlikely(((unsigned long)in & PAGE_SIZE) + cbc_fetch_bytes > PAGE_SIZE)) 251 if (unlikely(((unsigned long)in & ~PAGE_MASK) + cbc_fetch_bytes > PAGE_SIZE))
252 return cbc_crypt_copy(in, out, key, iv, cword, count); 252 return cbc_crypt_copy(in, out, key, iv, cword, count);
253 253
254 return rep_xcrypt_cbc(in, out, key, iv, cword, count); 254 return rep_xcrypt_cbc(in, out, key, iv, cword, count);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 5903a88351bf..b401dadad4a8 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -26,6 +26,8 @@ config INTEL_IOATDMA
26 select DMA_ENGINE 26 select DMA_ENGINE
27 select DCA 27 select DCA
28 select ASYNC_TX_DISABLE_CHANNEL_SWITCH 28 select ASYNC_TX_DISABLE_CHANNEL_SWITCH
29 select ASYNC_TX_DISABLE_PQ_VAL_DMA
30 select ASYNC_TX_DISABLE_XOR_VAL_DMA
29 help 31 help
30 Enable support for the Intel(R) I/OAT DMA engine present 32 Enable support for the Intel(R) I/OAT DMA engine present
31 in recent Intel Xeon chipsets. 33 in recent Intel Xeon chipsets.
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index bd0b248de2cf..8f99354082ce 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -632,11 +632,21 @@ static bool device_has_all_tx_types(struct dma_device *device)
632 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) 632 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
633 if (!dma_has_cap(DMA_XOR, device->cap_mask)) 633 if (!dma_has_cap(DMA_XOR, device->cap_mask))
634 return false; 634 return false;
635
636 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
637 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
638 return false;
639 #endif
635 #endif 640 #endif
636 641
637 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) 642 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
638 if (!dma_has_cap(DMA_PQ, device->cap_mask)) 643 if (!dma_has_cap(DMA_PQ, device->cap_mask))
639 return false; 644 return false;
645
646 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
647 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
648 return false;
649 #endif
640 #endif 650 #endif
641 651
642 return true; 652 return true;
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 69d02615c4d6..abd9038e06b1 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -98,17 +98,17 @@ static int dca_enabled_in_bios(struct pci_dev *pdev)
98 cpuid_level_9 = cpuid_eax(9); 98 cpuid_level_9 = cpuid_eax(9);
99 res = test_bit(0, &cpuid_level_9); 99 res = test_bit(0, &cpuid_level_9);
100 if (!res) 100 if (!res)
101 dev_err(&pdev->dev, "DCA is disabled in BIOS\n"); 101 dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n");
102 102
103 return res; 103 return res;
104} 104}
105 105
106static int system_has_dca_enabled(struct pci_dev *pdev) 106int system_has_dca_enabled(struct pci_dev *pdev)
107{ 107{
108 if (boot_cpu_has(X86_FEATURE_DCA)) 108 if (boot_cpu_has(X86_FEATURE_DCA))
109 return dca_enabled_in_bios(pdev); 109 return dca_enabled_in_bios(pdev);
110 110
111 dev_err(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n"); 111 dev_dbg(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n");
112 return 0; 112 return 0;
113} 113}
114 114
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index c14fdfeb7f33..45edde996480 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -297,9 +297,7 @@ static inline bool is_ioat_suspended(unsigned long status)
297/* channel was fatally programmed */ 297/* channel was fatally programmed */
298static inline bool is_ioat_bug(unsigned long err) 298static inline bool is_ioat_bug(unsigned long err)
299{ 299{
300 return !!(err & (IOAT_CHANERR_SRC_ADDR_ERR|IOAT_CHANERR_DEST_ADDR_ERR| 300 return !!err;
301 IOAT_CHANERR_NEXT_ADDR_ERR|IOAT_CHANERR_CONTROL_ERR|
302 IOAT_CHANERR_LENGTH_ERR));
303} 301}
304 302
305static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, 303static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 96ffab7d37a7..8f1f7f05deaa 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -279,6 +279,8 @@ void ioat2_timer_event(unsigned long data)
279 u32 chanerr; 279 u32 chanerr;
280 280
281 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 281 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
282 dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
283 __func__, chanerr);
282 BUG_ON(is_ioat_bug(chanerr)); 284 BUG_ON(is_ioat_bug(chanerr));
283 } 285 }
284 286
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 35d1e33afd5b..42f6f10fb0cc 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -378,6 +378,8 @@ static void ioat3_timer_event(unsigned long data)
378 u32 chanerr; 378 u32 chanerr;
379 379
380 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 380 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
381 dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
382 __func__, chanerr);
381 BUG_ON(is_ioat_bug(chanerr)); 383 BUG_ON(is_ioat_bug(chanerr));
382 } 384 }
383 385
@@ -569,7 +571,7 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
569 dump_desc_dbg(ioat, compl_desc); 571 dump_desc_dbg(ioat, compl_desc);
570 572
571 /* we leave the channel locked to ensure in order submission */ 573 /* we leave the channel locked to ensure in order submission */
572 return &desc->txd; 574 return &compl_desc->txd;
573} 575}
574 576
575static struct dma_async_tx_descriptor * 577static struct dma_async_tx_descriptor *
@@ -728,7 +730,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
728 dump_desc_dbg(ioat, compl_desc); 730 dump_desc_dbg(ioat, compl_desc);
729 731
730 /* we leave the channel locked to ensure in order submission */ 732 /* we leave the channel locked to ensure in order submission */
731 return &desc->txd; 733 return &compl_desc->txd;
732} 734}
733 735
734static struct dma_async_tx_descriptor * 736static struct dma_async_tx_descriptor *
@@ -736,10 +738,16 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
736 unsigned int src_cnt, const unsigned char *scf, size_t len, 738 unsigned int src_cnt, const unsigned char *scf, size_t len,
737 unsigned long flags) 739 unsigned long flags)
738{ 740{
741 /* specify valid address for disabled result */
742 if (flags & DMA_PREP_PQ_DISABLE_P)
743 dst[0] = dst[1];
744 if (flags & DMA_PREP_PQ_DISABLE_Q)
745 dst[1] = dst[0];
746
739 /* handle the single source multiply case from the raid6 747 /* handle the single source multiply case from the raid6
740 * recovery path 748 * recovery path
741 */ 749 */
742 if (unlikely((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1)) { 750 if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
743 dma_addr_t single_source[2]; 751 dma_addr_t single_source[2];
744 unsigned char single_source_coef[2]; 752 unsigned char single_source_coef[2];
745 753
@@ -761,6 +769,12 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
761 unsigned int src_cnt, const unsigned char *scf, size_t len, 769 unsigned int src_cnt, const unsigned char *scf, size_t len,
762 enum sum_check_flags *pqres, unsigned long flags) 770 enum sum_check_flags *pqres, unsigned long flags)
763{ 771{
772 /* specify valid address for disabled result */
773 if (flags & DMA_PREP_PQ_DISABLE_P)
774 pq[0] = pq[1];
775 if (flags & DMA_PREP_PQ_DISABLE_Q)
776 pq[1] = pq[0];
777
764 /* the cleanup routine only sets bits on validate failure, it 778 /* the cleanup routine only sets bits on validate failure, it
765 * does not clear bits on validate success... so clear it here 779 * does not clear bits on validate success... so clear it here
766 */ 780 */
@@ -778,9 +792,9 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
778 dma_addr_t pq[2]; 792 dma_addr_t pq[2];
779 793
780 memset(scf, 0, src_cnt); 794 memset(scf, 0, src_cnt);
781 flags |= DMA_PREP_PQ_DISABLE_Q;
782 pq[0] = dst; 795 pq[0] = dst;
783 pq[1] = ~0; 796 flags |= DMA_PREP_PQ_DISABLE_Q;
797 pq[1] = dst; /* specify valid address for disabled result */
784 798
785 return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, 799 return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
786 flags); 800 flags);
@@ -800,9 +814,9 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
800 *result = 0; 814 *result = 0;
801 815
802 memset(scf, 0, src_cnt); 816 memset(scf, 0, src_cnt);
803 flags |= DMA_PREP_PQ_DISABLE_Q;
804 pq[0] = src[0]; 817 pq[0] = src[0];
805 pq[1] = ~0; 818 flags |= DMA_PREP_PQ_DISABLE_Q;
819 pq[1] = pq[0]; /* specify valid address for disabled result */
806 820
807 return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf, 821 return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf,
808 len, flags); 822 len, flags);
@@ -1117,6 +1131,7 @@ static int __devinit ioat3_dma_self_test(struct ioatdma_device *device)
1117int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) 1131int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1118{ 1132{
1119 struct pci_dev *pdev = device->pdev; 1133 struct pci_dev *pdev = device->pdev;
1134 int dca_en = system_has_dca_enabled(pdev);
1120 struct dma_device *dma; 1135 struct dma_device *dma;
1121 struct dma_chan *c; 1136 struct dma_chan *c;
1122 struct ioat_chan_common *chan; 1137 struct ioat_chan_common *chan;
@@ -1137,6 +1152,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1137 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; 1152 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
1138 1153
1139 cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); 1154 cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
1155
1156 /* dca is incompatible with raid operations */
1157 if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
1158 cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
1159
1140 if (cap & IOAT_CAP_XOR) { 1160 if (cap & IOAT_CAP_XOR) {
1141 is_raid_device = true; 1161 is_raid_device = true;
1142 dma->max_xor = 8; 1162 dma->max_xor = 8;
@@ -1186,6 +1206,16 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1186 device->timer_fn = ioat2_timer_event; 1206 device->timer_fn = ioat2_timer_event;
1187 } 1207 }
1188 1208
1209 #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
1210 dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
1211 dma->device_prep_dma_pq_val = NULL;
1212 #endif
1213
1214 #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
1215 dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
1216 dma->device_prep_dma_xor_val = NULL;
1217 #endif
1218
1189 /* -= IOAT ver.3 workarounds =- */ 1219 /* -= IOAT ver.3 workarounds =- */
1190 /* Write CHANERRMSK_INT with 3E07h to mask out the errors 1220 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
1191 * that can cause stability issues for IOAT ver.3 1221 * that can cause stability issues for IOAT ver.3
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 99afb12bd409..60e675455b6a 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -39,6 +39,8 @@
39#define IOAT_VER_3_0 0x30 /* Version 3.0 */ 39#define IOAT_VER_3_0 0x30 /* Version 3.0 */
40#define IOAT_VER_3_2 0x32 /* Version 3.2 */ 40#define IOAT_VER_3_2 0x32 /* Version 3.2 */
41 41
42int system_has_dca_enabled(struct pci_dev *pdev);
43
42struct ioat_dma_descriptor { 44struct ioat_dma_descriptor {
43 uint32_t size; 45 uint32_t size;
44 union { 46 union {
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 63038e18ab03..f015ec196700 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -92,9 +92,7 @@
92#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 92#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
93#define IOAT_CHANCTRL_INT_REARM 0x0001 93#define IOAT_CHANCTRL_INT_REARM 0x0001
94#define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\ 94#define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\
95 IOAT_CHANCTRL_ERR_COMPLETION_EN |\ 95 IOAT_CHANCTRL_ANY_ERR_ABORT_EN)
96 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |\
97 IOAT_CHANCTRL_ERR_INT_EN)
98 96
99#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */ 97#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */
100#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */ 98#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index b3b065c4e5c1..034ecf0ace03 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -640,17 +640,16 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
640#endif 640#endif
641 struct sh_dmae_device *shdev; 641 struct sh_dmae_device *shdev;
642 642
643 /* get platform data */
644 if (!pdev->dev.platform_data)
645 return -ENODEV;
646
643 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); 647 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
644 if (!shdev) { 648 if (!shdev) {
645 dev_err(&pdev->dev, "No enough memory\n"); 649 dev_err(&pdev->dev, "No enough memory\n");
646 err = -ENOMEM; 650 return -ENOMEM;
647 goto shdev_err;
648 } 651 }
649 652
650 /* get platform data */
651 if (!pdev->dev.platform_data)
652 goto shdev_err;
653
654 /* platform data */ 653 /* platform data */
655 memcpy(&shdev->pdata, pdev->dev.platform_data, 654 memcpy(&shdev->pdata, pdev->dev.platform_data,
656 sizeof(struct sh_dmae_pdata)); 655 sizeof(struct sh_dmae_pdata));
@@ -722,7 +721,6 @@ eirq_err:
722rst_err: 721rst_err:
723 kfree(shdev); 722 kfree(shdev);
724 723
725shdev_err:
726 return err; 724 return err;
727} 725}
728 726
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 5d524254499e..94260aa76aa3 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -275,7 +275,7 @@ static void log_irqs(u32 evt)
275 !(evt & OHCI1394_busReset)) 275 !(evt & OHCI1394_busReset))
276 return; 276 return;
277 277
278 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, 278 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
279 evt & OHCI1394_selfIDComplete ? " selfID" : "", 279 evt & OHCI1394_selfIDComplete ? " selfID" : "",
280 evt & OHCI1394_RQPkt ? " AR_req" : "", 280 evt & OHCI1394_RQPkt ? " AR_req" : "",
281 evt & OHCI1394_RSPkt ? " AR_resp" : "", 281 evt & OHCI1394_RSPkt ? " AR_resp" : "",
@@ -286,6 +286,7 @@ static void log_irqs(u32 evt)
286 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", 286 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
287 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", 287 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
288 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "", 288 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
289 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
289 evt & OHCI1394_regAccessFail ? " regAccessFail" : "", 290 evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
290 evt & OHCI1394_busReset ? " busReset" : "", 291 evt & OHCI1394_busReset ? " busReset" : "",
291 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt | 292 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
@@ -293,6 +294,7 @@ static void log_irqs(u32 evt)
293 OHCI1394_respTxComplete | OHCI1394_isochRx | 294 OHCI1394_respTxComplete | OHCI1394_isochRx |
294 OHCI1394_isochTx | OHCI1394_postedWriteErr | 295 OHCI1394_isochTx | OHCI1394_postedWriteErr |
295 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds | 296 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
297 OHCI1394_cycleInconsistent |
296 OHCI1394_regAccessFail | OHCI1394_busReset) 298 OHCI1394_regAccessFail | OHCI1394_busReset)
297 ? " ?" : ""); 299 ? " ?" : "");
298} 300}
@@ -1439,6 +1441,17 @@ static irqreturn_t irq_handler(int irq, void *data)
1439 OHCI1394_LinkControl_cycleMaster); 1441 OHCI1394_LinkControl_cycleMaster);
1440 } 1442 }
1441 1443
1444 if (unlikely(event & OHCI1394_cycleInconsistent)) {
1445 /*
1446 * We need to clear this event bit in order to make
1447 * cycleMatch isochronous I/O work. In theory we should
1448 * stop active cycleMatch iso contexts now and restart
1449 * them at least two cycles later. (FIXME?)
1450 */
1451 if (printk_ratelimit())
1452 fw_notify("isochronous cycle inconsistent\n");
1453 }
1454
1442 if (event & OHCI1394_cycle64Seconds) { 1455 if (event & OHCI1394_cycle64Seconds) {
1443 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1456 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1444 if ((cycle_time & 0x80000000) == 0) 1457 if ((cycle_time & 0x80000000) == 0)
@@ -1528,6 +1541,7 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1528 OHCI1394_reqTxComplete | OHCI1394_respTxComplete | 1541 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1529 OHCI1394_isochRx | OHCI1394_isochTx | 1542 OHCI1394_isochRx | OHCI1394_isochTx |
1530 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong | 1543 OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
1544 OHCI1394_cycleInconsistent |
1531 OHCI1394_cycle64Seconds | OHCI1394_regAccessFail | 1545 OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
1532 OHCI1394_masterIntEnable); 1546 OHCI1394_masterIntEnable);
1533 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) 1547 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
@@ -1890,15 +1904,30 @@ static int handle_it_packet(struct context *context,
1890{ 1904{
1891 struct iso_context *ctx = 1905 struct iso_context *ctx =
1892 container_of(context, struct iso_context, context); 1906 container_of(context, struct iso_context, context);
1907 int i;
1908 struct descriptor *pd;
1893 1909
1894 if (last->transfer_status == 0) 1910 for (pd = d; pd <= last; pd++)
1895 /* This descriptor isn't done yet, stop iteration. */ 1911 if (pd->transfer_status)
1912 break;
1913 if (pd > last)
1914 /* Descriptor(s) not done yet, stop iteration */
1896 return 0; 1915 return 0;
1897 1916
1898 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) 1917 i = ctx->header_length;
1918 if (i + 4 < PAGE_SIZE) {
1919 /* Present this value as big-endian to match the receive code */
1920 *(__be32 *)(ctx->header + i) = cpu_to_be32(
1921 ((u32)le16_to_cpu(pd->transfer_status) << 16) |
1922 le16_to_cpu(pd->res_count));
1923 ctx->header_length += 4;
1924 }
1925 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
1899 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count), 1926 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
1900 0, NULL, ctx->base.callback_data); 1927 ctx->header_length, ctx->header,
1901 1928 ctx->base.callback_data);
1929 ctx->header_length = 0;
1930 }
1902 return 1; 1931 return 1;
1903} 1932}
1904 1933
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 5711ce5353c6..4baf3d7d0f8e 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -144,13 +144,6 @@ static int lnw_irq_type(unsigned irq, unsigned type)
144 144
145static void lnw_irq_unmask(unsigned irq) 145static void lnw_irq_unmask(unsigned irq)
146{ 146{
147 struct lnw_gpio *lnw = get_irq_chip_data(irq);
148 u32 gpio = irq - lnw->irq_base;
149 u8 reg = gpio / 32;
150 void __iomem *gedr;
151
152 gedr = (void __iomem *)(&lnw->reg_base->GEDR[reg]);
153 writel(BIT(gpio % 32), gedr);
154}; 147};
155 148
156static void lnw_irq_mask(unsigned irq) 149static void lnw_irq_mask(unsigned irq)
@@ -183,13 +176,11 @@ static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
183 gedr_v = readl(gedr); 176 gedr_v = readl(gedr);
184 if (!gedr_v) 177 if (!gedr_v)
185 continue; 178 continue;
186 for (gpio = reg*32; gpio < reg*32+32; gpio++) { 179 for (gpio = reg*32; gpio < reg*32+32; gpio++)
187 gedr_v = readl(gedr);
188 if (gedr_v & BIT(gpio % 32)) { 180 if (gedr_v & BIT(gpio % 32)) {
189 pr_debug("pin %d triggered\n", gpio); 181 pr_debug("pin %d triggered\n", gpio);
190 generic_handle_irq(lnw->irq_base + gpio); 182 generic_handle_irq(lnw->irq_base + gpio);
191 } 183 }
192 }
193 /* clear the edge detect status bit */ 184 /* clear the edge detect status bit */
194 writel(gedr_v, gedr); 185 writel(gedr_v, gedr);
195 } 186 }
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f831ea159291..96eddd17e050 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -92,6 +92,7 @@ config DRM_I830
92config DRM_I915 92config DRM_I915
93 tristate "i915 driver" 93 tristate "i915 driver"
94 depends on AGP_INTEL 94 depends on AGP_INTEL
95 select SHMEM
95 select DRM_KMS_HELPER 96 select DRM_KMS_HELPER
96 select FB_CFB_FILLRECT 97 select FB_CFB_FILLRECT
97 select FB_CFB_COPYAREA 98 select FB_CFB_COPYAREA
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index cea665d86dd3..b54ba63d506e 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -662,6 +662,12 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
662 return NULL; 662 return NULL;
663 } 663 }
664 664
665 /* Some EDIDs have bogus h/vtotal values */
666 if (mode->hsync_end > mode->htotal)
667 mode->htotal = mode->hsync_end + 1;
668 if (mode->vsync_end > mode->vtotal)
669 mode->vtotal = mode->vsync_end + 1;
670
665 drm_mode_set_name(mode); 671 drm_mode_set_name(mode);
666 672
667 if (pt->misc & DRM_EDID_PT_INTERLACED) 673 if (pt->misc & DRM_EDID_PT_INTERLACED)
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index dc8e374a0b55..65ef011fa8ba 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -599,7 +599,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
599 struct drm_framebuffer *fb = fb_helper->fb; 599 struct drm_framebuffer *fb = fb_helper->fb;
600 int depth; 600 int depth;
601 601
602 if (var->pixclock == -1 || !var->pixclock) 602 if (var->pixclock != 0)
603 return -EINVAL; 603 return -EINVAL;
604 604
605 /* Need to resize the fb object !!! */ 605 /* Need to resize the fb object !!! */
@@ -691,7 +691,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
691 int ret; 691 int ret;
692 int i; 692 int i;
693 693
694 if (var->pixclock != -1) { 694 if (var->pixclock != 0) {
695 DRM_ERROR("PIXEL CLCOK SET\n"); 695 DRM_ERROR("PIXEL CLCOK SET\n");
696 return -EINVAL; 696 return -EINVAL;
697 } 697 }
@@ -904,7 +904,7 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
904 fb_helper->fb = fb; 904 fb_helper->fb = fb;
905 905
906 if (new_fb) { 906 if (new_fb) {
907 info->var.pixclock = -1; 907 info->var.pixclock = 0;
908 if (register_framebuffer(info) < 0) 908 if (register_framebuffer(info) < 0)
909 return -EINVAL; 909 return -EINVAL;
910 } else { 910 } else {
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 80391995bdec..e9dbb481c469 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -552,7 +552,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
552 vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; 552 vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
553 vma->vm_ops = obj->dev->driver->gem_vm_ops; 553 vma->vm_ops = obj->dev->driver->gem_vm_ops;
554 vma->vm_private_data = map->handle; 554 vma->vm_private_data = map->handle;
555 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 555 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
556 556
557 /* Take a ref for this mapping of the object, so that the fault 557 /* Take a ref for this mapping of the object, so that the fault
558 * handler can dereference the mmap offset's pointer to the object. 558 * handler can dereference the mmap offset's pointer to the object.
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index c861d80fd779..97dc5a4f0de4 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -103,6 +103,11 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
103 return child; 103 return child;
104} 104}
105 105
106/* drm_mm_pre_get() - pre allocate drm_mm_node structure
107 * drm_mm: memory manager struct we are pre-allocating for
108 *
109 * Returns 0 on success or -ENOMEM if allocation fails.
110 */
106int drm_mm_pre_get(struct drm_mm *mm) 111int drm_mm_pre_get(struct drm_mm *mm)
107{ 112{
108 struct drm_mm_node *node; 113 struct drm_mm_node *node;
@@ -253,12 +258,14 @@ void drm_mm_put_block(struct drm_mm_node *cur)
253 prev_node->size += next_node->size; 258 prev_node->size += next_node->size;
254 list_del(&next_node->ml_entry); 259 list_del(&next_node->ml_entry);
255 list_del(&next_node->fl_entry); 260 list_del(&next_node->fl_entry);
261 spin_lock(&mm->unused_lock);
256 if (mm->num_unused < MM_UNUSED_TARGET) { 262 if (mm->num_unused < MM_UNUSED_TARGET) {
257 list_add(&next_node->fl_entry, 263 list_add(&next_node->fl_entry,
258 &mm->unused_nodes); 264 &mm->unused_nodes);
259 ++mm->num_unused; 265 ++mm->num_unused;
260 } else 266 } else
261 kfree(next_node); 267 kfree(next_node);
268 spin_unlock(&mm->unused_lock);
262 } else { 269 } else {
263 next_node->size += cur->size; 270 next_node->size += cur->size;
264 next_node->start = cur->start; 271 next_node->start = cur->start;
@@ -271,11 +278,13 @@ void drm_mm_put_block(struct drm_mm_node *cur)
271 list_add(&cur->fl_entry, &mm->fl_entry); 278 list_add(&cur->fl_entry, &mm->fl_entry);
272 } else { 279 } else {
273 list_del(&cur->ml_entry); 280 list_del(&cur->ml_entry);
281 spin_lock(&mm->unused_lock);
274 if (mm->num_unused < MM_UNUSED_TARGET) { 282 if (mm->num_unused < MM_UNUSED_TARGET) {
275 list_add(&cur->fl_entry, &mm->unused_nodes); 283 list_add(&cur->fl_entry, &mm->unused_nodes);
276 ++mm->num_unused; 284 ++mm->num_unused;
277 } else 285 } else
278 kfree(cur); 286 kfree(cur);
287 spin_unlock(&mm->unused_lock);
279 } 288 }
280} 289}
281 290
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index f8ce9a3a420d..26bf0552b3cb 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -267,10 +267,10 @@ static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_co
267 uint32_t *mem; 267 uint32_t *mem;
268 268
269 for (page = 0; page < page_count; page++) { 269 for (page = 0; page < page_count; page++) {
270 mem = kmap(pages[page]); 270 mem = kmap_atomic(pages[page], KM_USER0);
271 for (i = 0; i < PAGE_SIZE; i += 4) 271 for (i = 0; i < PAGE_SIZE; i += 4)
272 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); 272 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
273 kunmap(pages[page]); 273 kunmap_atomic(pages[page], KM_USER0);
274 } 274 }
275} 275}
276 276
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 57204e298975..a725f6591192 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -296,6 +296,7 @@ typedef struct drm_i915_private {
296 u32 saveVBLANK_A; 296 u32 saveVBLANK_A;
297 u32 saveVSYNC_A; 297 u32 saveVSYNC_A;
298 u32 saveBCLRPAT_A; 298 u32 saveBCLRPAT_A;
299 u32 saveTRANSACONF;
299 u32 saveTRANS_HTOTAL_A; 300 u32 saveTRANS_HTOTAL_A;
300 u32 saveTRANS_HBLANK_A; 301 u32 saveTRANS_HBLANK_A;
301 u32 saveTRANS_HSYNC_A; 302 u32 saveTRANS_HSYNC_A;
@@ -326,6 +327,7 @@ typedef struct drm_i915_private {
326 u32 saveVBLANK_B; 327 u32 saveVBLANK_B;
327 u32 saveVSYNC_B; 328 u32 saveVSYNC_B;
328 u32 saveBCLRPAT_B; 329 u32 saveBCLRPAT_B;
330 u32 saveTRANSBCONF;
329 u32 saveTRANS_HTOTAL_B; 331 u32 saveTRANS_HTOTAL_B;
330 u32 saveTRANS_HBLANK_B; 332 u32 saveTRANS_HBLANK_B;
331 u32 saveTRANS_HSYNC_B; 333 u32 saveTRANS_HSYNC_B;
@@ -414,6 +416,16 @@ typedef struct drm_i915_private {
414 u32 savePFB_WIN_SZ; 416 u32 savePFB_WIN_SZ;
415 u32 savePFA_WIN_POS; 417 u32 savePFA_WIN_POS;
416 u32 savePFB_WIN_POS; 418 u32 savePFB_WIN_POS;
419 u32 savePCH_DREF_CONTROL;
420 u32 saveDISP_ARB_CTL;
421 u32 savePIPEA_DATA_M1;
422 u32 savePIPEA_DATA_N1;
423 u32 savePIPEA_LINK_M1;
424 u32 savePIPEA_LINK_N1;
425 u32 savePIPEB_DATA_M1;
426 u32 savePIPEB_DATA_N1;
427 u32 savePIPEB_LINK_M1;
428 u32 savePIPEB_LINK_N1;
417 429
418 struct { 430 struct {
419 struct drm_mm gtt_space; 431 struct drm_mm gtt_space;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c3ceffa46ea0..aa7fd82aa6eb 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -254,10 +254,15 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
254{ 254{
255 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 255 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
256 int ret = IRQ_NONE; 256 int ret = IRQ_NONE;
257 u32 de_iir, gt_iir; 257 u32 de_iir, gt_iir, de_ier;
258 u32 new_de_iir, new_gt_iir; 258 u32 new_de_iir, new_gt_iir;
259 struct drm_i915_master_private *master_priv; 259 struct drm_i915_master_private *master_priv;
260 260
261 /* disable master interrupt before clearing iir */
262 de_ier = I915_READ(DEIER);
263 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
264 (void)I915_READ(DEIER);
265
261 de_iir = I915_READ(DEIIR); 266 de_iir = I915_READ(DEIIR);
262 gt_iir = I915_READ(GTIIR); 267 gt_iir = I915_READ(GTIIR);
263 268
@@ -290,6 +295,9 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
290 gt_iir = new_gt_iir; 295 gt_iir = new_gt_iir;
291 } 296 }
292 297
298 I915_WRITE(DEIER, de_ier);
299 (void)I915_READ(DEIER);
300
293 return ret; 301 return ret;
294} 302}
295 303
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 992d5617e798..6eec8171a44e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -239,6 +239,11 @@ static void i915_save_modeset_reg(struct drm_device *dev)
239 if (drm_core_check_feature(dev, DRIVER_MODESET)) 239 if (drm_core_check_feature(dev, DRIVER_MODESET))
240 return; 240 return;
241 241
242 if (IS_IGDNG(dev)) {
243 dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
244 dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
245 }
246
242 /* Pipe & plane A info */ 247 /* Pipe & plane A info */
243 dev_priv->savePIPEACONF = I915_READ(PIPEACONF); 248 dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
244 dev_priv->savePIPEASRC = I915_READ(PIPEASRC); 249 dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
@@ -263,6 +268,11 @@ static void i915_save_modeset_reg(struct drm_device *dev)
263 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); 268 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
264 269
265 if (IS_IGDNG(dev)) { 270 if (IS_IGDNG(dev)) {
271 dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
272 dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
273 dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
274 dev_priv->savePIPEA_LINK_N1 = I915_READ(PIPEA_LINK_N1);
275
266 dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL); 276 dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL);
267 dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL); 277 dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL);
268 278
@@ -270,6 +280,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
270 dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ); 280 dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ);
271 dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS); 281 dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS);
272 282
283 dev_priv->saveTRANSACONF = I915_READ(TRANSACONF);
273 dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A); 284 dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A);
274 dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A); 285 dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A);
275 dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A); 286 dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A);
@@ -314,6 +325,11 @@ static void i915_save_modeset_reg(struct drm_device *dev)
314 dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B); 325 dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
315 326
316 if (IS_IGDNG(dev)) { 327 if (IS_IGDNG(dev)) {
328 dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
329 dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
330 dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
331 dev_priv->savePIPEB_LINK_N1 = I915_READ(PIPEB_LINK_N1);
332
317 dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL); 333 dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL);
318 dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL); 334 dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL);
319 335
@@ -321,6 +337,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
321 dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ); 337 dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ);
322 dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS); 338 dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS);
323 339
340 dev_priv->saveTRANSBCONF = I915_READ(TRANSBCONF);
324 dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B); 341 dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B);
325 dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B); 342 dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B);
326 dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B); 343 dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B);
@@ -368,6 +385,11 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
368 fpb1_reg = FPB1; 385 fpb1_reg = FPB1;
369 } 386 }
370 387
388 if (IS_IGDNG(dev)) {
389 I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
390 I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
391 }
392
371 /* Pipe & plane A info */ 393 /* Pipe & plane A info */
372 /* Prime the clock */ 394 /* Prime the clock */
373 if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { 395 if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
@@ -395,6 +417,11 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
395 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); 417 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
396 418
397 if (IS_IGDNG(dev)) { 419 if (IS_IGDNG(dev)) {
420 I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
421 I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
422 I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
423 I915_WRITE(PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
424
398 I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL); 425 I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
399 I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL); 426 I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
400 427
@@ -402,6 +429,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
402 I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ); 429 I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
403 I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS); 430 I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
404 431
432 I915_WRITE(TRANSACONF, dev_priv->saveTRANSACONF);
405 I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A); 433 I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
406 I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A); 434 I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
407 I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A); 435 I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
@@ -439,7 +467,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
439 /* Actually enable it */ 467 /* Actually enable it */
440 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); 468 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
441 DRM_UDELAY(150); 469 DRM_UDELAY(150);
442 if (IS_I965G(dev)) 470 if (IS_I965G(dev) && !IS_IGDNG(dev))
443 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); 471 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
444 DRM_UDELAY(150); 472 DRM_UDELAY(150);
445 473
@@ -454,6 +482,11 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
454 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); 482 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
455 483
456 if (IS_IGDNG(dev)) { 484 if (IS_IGDNG(dev)) {
485 I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
486 I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
487 I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
488 I915_WRITE(PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
489
457 I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL); 490 I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
458 I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL); 491 I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
459 492
@@ -461,6 +494,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
461 I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ); 494 I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
462 I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS); 495 I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
463 496
497 I915_WRITE(TRANSBCONF, dev_priv->saveTRANSBCONF);
464 I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B); 498 I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
465 I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B); 499 I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
466 I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B); 500 I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 212e22740fc1..e5051446c48e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -262,8 +262,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
262 } while (time_after(timeout, jiffies)); 262 } while (time_after(timeout, jiffies));
263 } 263 }
264 264
265 if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) == 265 if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) !=
266 CRT_HOTPLUG_MONITOR_COLOR) 266 CRT_HOTPLUG_MONITOR_NONE)
267 return true; 267 return true;
268 268
269 return false; 269 return false;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3ba6546b7c7f..099f420de57a 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -863,10 +863,8 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
863 struct drm_device *dev = crtc->dev; 863 struct drm_device *dev = crtc->dev;
864 struct drm_i915_private *dev_priv = dev->dev_private; 864 struct drm_i915_private *dev_priv = dev->dev_private;
865 intel_clock_t clock; 865 intel_clock_t clock;
866 int max_n;
867 bool found;
868 int err_most = 47; 866 int err_most = 47;
869 found = false; 867 int err_min = 10000;
870 868
871 /* eDP has only 2 clock choice, no n/m/p setting */ 869 /* eDP has only 2 clock choice, no n/m/p setting */
872 if (HAS_eDP) 870 if (HAS_eDP)
@@ -890,10 +888,9 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
890 } 888 }
891 889
892 memset(best_clock, 0, sizeof(*best_clock)); 890 memset(best_clock, 0, sizeof(*best_clock));
893 max_n = limit->n.max;
894 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 891 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
895 /* based on hardware requriment prefer smaller n to precision */ 892 /* based on hardware requriment prefer smaller n to precision */
896 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 893 for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
897 /* based on hardware requirment prefere larger m1,m2 */ 894 /* based on hardware requirment prefere larger m1,m2 */
898 for (clock.m1 = limit->m1.max; 895 for (clock.m1 = limit->m1.max;
899 clock.m1 >= limit->m1.min; clock.m1--) { 896 clock.m1 >= limit->m1.min; clock.m1--) {
@@ -907,18 +904,18 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
907 this_err = abs((10000 - (target*10000/clock.dot))); 904 this_err = abs((10000 - (target*10000/clock.dot)));
908 if (this_err < err_most) { 905 if (this_err < err_most) {
909 *best_clock = clock; 906 *best_clock = clock;
910 err_most = this_err;
911 max_n = clock.n;
912 found = true;
913 /* found on first matching */ 907 /* found on first matching */
914 goto out; 908 goto out;
909 } else if (this_err < err_min) {
910 *best_clock = clock;
911 err_min = this_err;
915 } 912 }
916 } 913 }
917 } 914 }
918 } 915 }
919 } 916 }
920out: 917out:
921 return found; 918 return true;
922} 919}
923 920
924/* DisplayPort has only two frequencies, 162MHz and 270MHz */ 921/* DisplayPort has only two frequencies, 162MHz and 270MHz */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 663ab6de0b58..c33451aec1bd 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -77,14 +77,32 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
77 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 77 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
78 u32 temp; 78 u32 temp;
79 79
80 if (mode != DRM_MODE_DPMS_ON) { 80 temp = I915_READ(hdmi_priv->sdvox_reg);
81 temp = I915_READ(hdmi_priv->sdvox_reg); 81
82 /* HW workaround, need to toggle enable bit off and on for 12bpc, but
83 * we do this anyway which shows more stable in testing.
84 */
85 if (IS_IGDNG(dev)) {
82 I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE); 86 I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
87 POSTING_READ(hdmi_priv->sdvox_reg);
88 }
89
90 if (mode != DRM_MODE_DPMS_ON) {
91 temp &= ~SDVO_ENABLE;
83 } else { 92 } else {
84 temp = I915_READ(hdmi_priv->sdvox_reg); 93 temp |= SDVO_ENABLE;
85 I915_WRITE(hdmi_priv->sdvox_reg, temp | SDVO_ENABLE);
86 } 94 }
95
96 I915_WRITE(hdmi_priv->sdvox_reg, temp);
87 POSTING_READ(hdmi_priv->sdvox_reg); 97 POSTING_READ(hdmi_priv->sdvox_reg);
98
99 /* HW workaround, need to write this twice for issue that may result
100 * in first write getting masked.
101 */
102 if (IS_IGDNG(dev)) {
103 I915_WRITE(hdmi_priv->sdvox_reg, temp);
104 POSTING_READ(hdmi_priv->sdvox_reg);
105 }
88} 106}
89 107
90static void intel_hdmi_save(struct drm_connector *connector) 108static void intel_hdmi_save(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 901befe03da2..d67c42555ab9 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -107,6 +107,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
107 base += 3; 107 base += 3;
108 break; 108 break;
109 case ATOM_IIO_WRITE: 109 case ATOM_IIO_WRITE:
110 (void)ctx->card->reg_read(ctx->card, CU16(base + 1));
110 ctx->card->reg_write(ctx->card, CU16(base + 1), temp); 111 ctx->card->reg_write(ctx->card, CU16(base + 1), temp);
111 base += 3; 112 base += 3;
112 break; 113 break;
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index fb211e585dea..0d79577c1576 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -561,7 +561,7 @@ struct table {
561 char *gpu_prefix; 561 char *gpu_prefix;
562}; 562};
563 563
564struct offset *offset_new(unsigned o) 564static struct offset *offset_new(unsigned o)
565{ 565{
566 struct offset *offset; 566 struct offset *offset;
567 567
@@ -573,12 +573,12 @@ struct offset *offset_new(unsigned o)
573 return offset; 573 return offset;
574} 574}
575 575
576void table_offset_add(struct table *t, struct offset *offset) 576static void table_offset_add(struct table *t, struct offset *offset)
577{ 577{
578 list_add_tail(&offset->list, &t->offsets); 578 list_add_tail(&offset->list, &t->offsets);
579} 579}
580 580
581void table_init(struct table *t) 581static void table_init(struct table *t)
582{ 582{
583 INIT_LIST_HEAD(&t->offsets); 583 INIT_LIST_HEAD(&t->offsets);
584 t->offset_max = 0; 584 t->offset_max = 0;
@@ -586,7 +586,7 @@ void table_init(struct table *t)
586 t->table = NULL; 586 t->table = NULL;
587} 587}
588 588
589void table_print(struct table *t) 589static void table_print(struct table *t)
590{ 590{
591 unsigned nlloop, i, j, n, c, id; 591 unsigned nlloop, i, j, n, c, id;
592 592
@@ -611,7 +611,7 @@ void table_print(struct table *t)
611 printf("};\n"); 611 printf("};\n");
612} 612}
613 613
614int table_build(struct table *t) 614static int table_build(struct table *t)
615{ 615{
616 struct offset *offset; 616 struct offset *offset;
617 unsigned i, m; 617 unsigned i, m;
@@ -631,7 +631,7 @@ int table_build(struct table *t)
631} 631}
632 632
633static char gpu_name[10]; 633static char gpu_name[10];
634int parser_auth(struct table *t, const char *filename) 634static int parser_auth(struct table *t, const char *filename)
635{ 635{
636 FILE *file; 636 FILE *file;
637 regex_t mask_rex; 637 regex_t mask_rex;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 757f5cd37744..224506a2f7b1 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -519,6 +519,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
519 * AGP 519 * AGP
520 */ 520 */
521int radeon_agp_init(struct radeon_device *rdev); 521int radeon_agp_init(struct radeon_device *rdev);
522void radeon_agp_resume(struct radeon_device *rdev);
522void radeon_agp_fini(struct radeon_device *rdev); 523void radeon_agp_fini(struct radeon_device *rdev);
523 524
524 525
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 23ea9955ac59..54bf49a6d676 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -237,6 +237,18 @@ int radeon_agp_init(struct radeon_device *rdev)
237#endif 237#endif
238} 238}
239 239
240void radeon_agp_resume(struct radeon_device *rdev)
241{
242#if __OS_HAS_AGP
243 int r;
244 if (rdev->flags & RADEON_IS_AGP) {
245 r = radeon_agp_init(rdev);
246 if (r)
247 dev_warn(rdev->dev, "radeon AGP reinit failed\n");
248 }
249#endif
250}
251
240void radeon_agp_fini(struct radeon_device *rdev) 252void radeon_agp_fini(struct radeon_device *rdev)
241{ 253{
242#if __OS_HAS_AGP 254#if __OS_HAS_AGP
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index fce4c4087fda..29763ceae3af 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -566,8 +566,9 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
566 radeon_i2c_do_lock(radeon_connector, 0); 566 radeon_i2c_do_lock(radeon_connector, 0);
567 567
568 if (!radeon_connector->edid) { 568 if (!radeon_connector->edid) {
569 DRM_ERROR("DDC responded but not EDID found for %s\n", 569 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
570 drm_get_connector_name(connector)); 570 drm_get_connector_name(connector));
571 ret = connector_status_connected;
571 } else { 572 } else {
572 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); 573 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
573 574
@@ -720,8 +721,8 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
720 radeon_i2c_do_lock(radeon_connector, 0); 721 radeon_i2c_do_lock(radeon_connector, 0);
721 722
722 if (!radeon_connector->edid) { 723 if (!radeon_connector->edid) {
723 DRM_ERROR("DDC responded but not EDID found for %s\n", 724 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
724 drm_get_connector_name(connector)); 725 drm_get_connector_name(connector));
725 } else { 726 } else {
726 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); 727 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
727 728
@@ -1149,6 +1150,13 @@ radeon_add_legacy_connector(struct drm_device *dev,
1149 if (ret) 1150 if (ret)
1150 goto failed; 1151 goto failed;
1151 radeon_connector->dac_load_detect = true; 1152 radeon_connector->dac_load_detect = true;
1153 /* RS400,RC410,RS480 chipset seems to report a lot
1154 * of false positive on load detect, we haven't yet
1155 * found a way to make load detect reliable on those
1156 * chipset, thus just disable it for TV.
1157 */
1158 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
1159 radeon_connector->dac_load_detect = false;
1152 drm_connector_attach_property(&radeon_connector->base, 1160 drm_connector_attach_property(&radeon_connector->base,
1153 rdev->mode_info.load_detect_property, 1161 rdev->mode_info.load_detect_property,
1154 1); 1162 1);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e3f9edfa40fe..41bb76fbe734 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -688,6 +688,8 @@ int radeon_resume_kms(struct drm_device *dev)
688 return -1; 688 return -1;
689 } 689 }
690 pci_set_master(dev->pdev); 690 pci_set_master(dev->pdev);
691 /* resume AGP if in use */
692 radeon_agp_resume(rdev);
691 radeon_resume(rdev); 693 radeon_resume(rdev);
692 radeon_restore_bios_scratch_regs(rdev); 694 radeon_restore_bios_scratch_regs(rdev);
693 fb_set_suspend(rdev->fbdev_info, 0); 695 fb_set_suspend(rdev->fbdev_info, 0);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 7935f793bf62..ba68c9fe90a1 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -137,8 +137,6 @@ int rv515_mc_wait_for_idle(struct radeon_device *rdev)
137 137
138void rv515_vga_render_disable(struct radeon_device *rdev) 138void rv515_vga_render_disable(struct radeon_device *rdev)
139{ 139{
140 WREG32(R_000330_D1VGA_CONTROL, 0);
141 WREG32(R_000338_D2VGA_CONTROL, 0);
142 WREG32(R_000300_VGA_RENDER_CONTROL, 140 WREG32(R_000300_VGA_RENDER_CONTROL,
143 RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL); 141 RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
144} 142}
@@ -382,7 +380,6 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
382 save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL); 380 save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL);
383 381
384 /* Stop all video */ 382 /* Stop all video */
385 WREG32(R_000330_D1VGA_CONTROL, 0);
386 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); 383 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
387 WREG32(R_000300_VGA_RENDER_CONTROL, 0); 384 WREG32(R_000300_VGA_RENDER_CONTROL, 0);
388 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1); 385 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
@@ -391,6 +388,8 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
391 WREG32(R_006880_D2CRTC_CONTROL, 0); 388 WREG32(R_006880_D2CRTC_CONTROL, 0);
392 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0); 389 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
393 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); 390 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
391 WREG32(R_000330_D1VGA_CONTROL, 0);
392 WREG32(R_000338_D2VGA_CONTROL, 0);
394} 393}
395 394
396void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) 395void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
@@ -404,14 +403,14 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
404 WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control); 403 WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
405 mdelay(1); 404 mdelay(1);
406 /* Restore video state */ 405 /* Restore video state */
406 WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
407 WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
407 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1); 408 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
408 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1); 409 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
409 WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control); 410 WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control);
410 WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control); 411 WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control);
411 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0); 412 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
412 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); 413 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
413 WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
414 WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
415 WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control); 414 WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
416} 415}
417 416
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index d39877a7da63..b5a95193c694 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -350,8 +350,7 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
350 350
351 case FAULT: 351 case FAULT:
352 /* Note - only for remote1 and remote2 */ 352 /* Note - only for remote1 and remote2 */
353 out = data->alarms & (sattr->index ? 0x8000 : 0x4000); 353 out = !!(data->alarms & (sattr->index ? 0x8000 : 0x4000));
354 out = out ? 0 : 1;
355 break; 354 break;
356 355
357 default: 356 default:
@@ -863,7 +862,7 @@ static SENSOR_DEVICE_ATTR_2(pwm1_freq, S_IRUGO | S_IWUSR, show_pwmfreq,
863 set_pwmfreq, INPUT, 0); 862 set_pwmfreq, INPUT, 0);
864static SENSOR_DEVICE_ATTR_2(pwm1_enable, S_IRUGO | S_IWUSR, show_pwmctrl, 863static SENSOR_DEVICE_ATTR_2(pwm1_enable, S_IRUGO | S_IWUSR, show_pwmctrl,
865 set_pwmctrl, INPUT, 0); 864 set_pwmctrl, INPUT, 0);
866static SENSOR_DEVICE_ATTR_2(pwm1_auto_channel_temp, S_IRUGO | S_IWUSR, 865static SENSOR_DEVICE_ATTR_2(pwm1_auto_channels_temp, S_IRUGO | S_IWUSR,
867 show_pwmchan, set_pwmchan, INPUT, 0); 866 show_pwmchan, set_pwmchan, INPUT, 0);
868static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm, 867static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm,
869 set_pwm, MIN, 0); 868 set_pwm, MIN, 0);
@@ -875,7 +874,7 @@ static SENSOR_DEVICE_ATTR_2(pwm2_freq, S_IRUGO | S_IWUSR, show_pwmfreq,
875 set_pwmfreq, INPUT, 1); 874 set_pwmfreq, INPUT, 1);
876static SENSOR_DEVICE_ATTR_2(pwm2_enable, S_IRUGO | S_IWUSR, show_pwmctrl, 875static SENSOR_DEVICE_ATTR_2(pwm2_enable, S_IRUGO | S_IWUSR, show_pwmctrl,
877 set_pwmctrl, INPUT, 1); 876 set_pwmctrl, INPUT, 1);
878static SENSOR_DEVICE_ATTR_2(pwm2_auto_channel_temp, S_IRUGO | S_IWUSR, 877static SENSOR_DEVICE_ATTR_2(pwm2_auto_channels_temp, S_IRUGO | S_IWUSR,
879 show_pwmchan, set_pwmchan, INPUT, 1); 878 show_pwmchan, set_pwmchan, INPUT, 1);
880static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm, 879static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm,
881 set_pwm, MIN, 1); 880 set_pwm, MIN, 1);
@@ -887,7 +886,7 @@ static SENSOR_DEVICE_ATTR_2(pwm3_freq, S_IRUGO | S_IWUSR, show_pwmfreq,
887 set_pwmfreq, INPUT, 2); 886 set_pwmfreq, INPUT, 2);
888static SENSOR_DEVICE_ATTR_2(pwm3_enable, S_IRUGO | S_IWUSR, show_pwmctrl, 887static SENSOR_DEVICE_ATTR_2(pwm3_enable, S_IRUGO | S_IWUSR, show_pwmctrl,
889 set_pwmctrl, INPUT, 2); 888 set_pwmctrl, INPUT, 2);
890static SENSOR_DEVICE_ATTR_2(pwm3_auto_channel_temp, S_IRUGO | S_IWUSR, 889static SENSOR_DEVICE_ATTR_2(pwm3_auto_channels_temp, S_IRUGO | S_IWUSR,
891 show_pwmchan, set_pwmchan, INPUT, 2); 890 show_pwmchan, set_pwmchan, INPUT, 2);
892static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm, 891static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO | S_IWUSR, show_pwm,
893 set_pwm, MIN, 2); 892 set_pwm, MIN, 2);
@@ -947,19 +946,19 @@ static struct attribute *adt7475_attrs[] = {
947 &sensor_dev_attr_pwm1.dev_attr.attr, 946 &sensor_dev_attr_pwm1.dev_attr.attr,
948 &sensor_dev_attr_pwm1_freq.dev_attr.attr, 947 &sensor_dev_attr_pwm1_freq.dev_attr.attr,
949 &sensor_dev_attr_pwm1_enable.dev_attr.attr, 948 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
950 &sensor_dev_attr_pwm1_auto_channel_temp.dev_attr.attr, 949 &sensor_dev_attr_pwm1_auto_channels_temp.dev_attr.attr,
951 &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr, 950 &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
952 &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr, 951 &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
953 &sensor_dev_attr_pwm2.dev_attr.attr, 952 &sensor_dev_attr_pwm2.dev_attr.attr,
954 &sensor_dev_attr_pwm2_freq.dev_attr.attr, 953 &sensor_dev_attr_pwm2_freq.dev_attr.attr,
955 &sensor_dev_attr_pwm2_enable.dev_attr.attr, 954 &sensor_dev_attr_pwm2_enable.dev_attr.attr,
956 &sensor_dev_attr_pwm2_auto_channel_temp.dev_attr.attr, 955 &sensor_dev_attr_pwm2_auto_channels_temp.dev_attr.attr,
957 &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr, 956 &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr,
958 &sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr, 957 &sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr,
959 &sensor_dev_attr_pwm3.dev_attr.attr, 958 &sensor_dev_attr_pwm3.dev_attr.attr,
960 &sensor_dev_attr_pwm3_freq.dev_attr.attr, 959 &sensor_dev_attr_pwm3_freq.dev_attr.attr,
961 &sensor_dev_attr_pwm3_enable.dev_attr.attr, 960 &sensor_dev_attr_pwm3_enable.dev_attr.attr,
962 &sensor_dev_attr_pwm3_auto_channel_temp.dev_attr.attr, 961 &sensor_dev_attr_pwm3_auto_channels_temp.dev_attr.attr,
963 &sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr, 962 &sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr,
964 &sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr, 963 &sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr,
965 NULL, 964 NULL,
@@ -1152,7 +1151,7 @@ static struct adt7475_data *adt7475_update_device(struct device *dev)
1152 } 1151 }
1153 1152
1154 /* Limits and settings, should never change update every 60 seconds */ 1153 /* Limits and settings, should never change update every 60 seconds */
1155 if (time_after(jiffies, data->limits_updated + HZ * 2) || 1154 if (time_after(jiffies, data->limits_updated + HZ * 60) ||
1156 !data->valid) { 1155 !data->valid) {
1157 data->config5 = adt7475_read(REG_CONFIG5); 1156 data->config5 = adt7475_read(REG_CONFIG5);
1158 1157
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index 3a524f2fe493..71835412529f 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -323,14 +323,21 @@ static int __devinit s3c_hwmon_probe(struct platform_device *dev)
323 } 323 }
324 324
325 for (i = 0; i < ARRAY_SIZE(pdata->in); i++) { 325 for (i = 0; i < ARRAY_SIZE(pdata->in); i++) {
326 if (!pdata->in[i]) 326 struct s3c24xx_adc_hwmon_incfg *cfg = pdata->in[i];
327
328 if (!cfg)
327 continue; 329 continue;
328 330
329 if (pdata->in[i]->mult >= 0x10000) 331 if (cfg->mult >= 0x10000)
330 dev_warn(&dev->dev, 332 dev_warn(&dev->dev,
331 "channel %d multiplier too large\n", 333 "channel %d multiplier too large\n",
332 i); 334 i);
333 335
336 if (cfg->divider == 0) {
337 dev_err(&dev->dev, "channel %d divider zero\n", i);
338 continue;
339 }
340
334 ret = s3c_hwmon_create_attr(&dev->dev, pdata->in[i], 341 ret = s3c_hwmon_create_attr(&dev->dev, pdata->in[i],
335 &hwmon->attrs[i], i); 342 &hwmon->attrs[i], i);
336 if (ret) { 343 if (ret) {
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 6ff6c20f1e78..fbab6846ae64 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -19,7 +19,9 @@
19#include <linux/completion.h> 19#include <linux/completion.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/i2c-pnx.h> 21#include <linux/i2c-pnx.h>
22#include <linux/io.h>
22#include <mach/hardware.h> 23#include <mach/hardware.h>
24#include <mach/i2c.h>
23#include <asm/irq.h> 25#include <asm/irq.h>
24#include <asm/uaccess.h> 26#include <asm/uaccess.h>
25 27
@@ -54,6 +56,9 @@ static inline void i2c_pnx_arm_timer(struct i2c_adapter *adap)
54 struct timer_list *timer = &data->mif.timer; 56 struct timer_list *timer = &data->mif.timer;
55 int expires = I2C_PNX_TIMEOUT / (1000 / HZ); 57 int expires = I2C_PNX_TIMEOUT / (1000 / HZ);
56 58
59 if (expires <= 1)
60 expires = 2;
61
57 del_timer_sync(timer); 62 del_timer_sync(timer);
58 63
59 dev_dbg(&adap->dev, "Timer armed at %lu plus %u jiffies.\n", 64 dev_dbg(&adap->dev, "Timer armed at %lu plus %u jiffies.\n",
@@ -645,7 +650,7 @@ static int __devinit i2c_pnx_probe(struct platform_device *pdev)
645 return 0; 650 return 0;
646 651
647out_irq: 652out_irq:
648 free_irq(alg_data->irq, alg_data); 653 free_irq(alg_data->irq, i2c_pnx->adapter);
649out_clock: 654out_clock:
650 i2c_pnx->set_clock_stop(pdev); 655 i2c_pnx->set_clock_stop(pdev);
651out_unmap: 656out_unmap:
@@ -664,7 +669,7 @@ static int __devexit i2c_pnx_remove(struct platform_device *pdev)
664 struct i2c_adapter *adap = i2c_pnx->adapter; 669 struct i2c_adapter *adap = i2c_pnx->adapter;
665 struct i2c_pnx_algo_data *alg_data = adap->algo_data; 670 struct i2c_pnx_algo_data *alg_data = adap->algo_data;
666 671
667 free_irq(alg_data->irq, alg_data); 672 free_irq(alg_data->irq, i2c_pnx->adapter);
668 i2c_del_adapter(adap); 673 i2c_del_adapter(adap);
669 i2c_pnx->set_clock_stop(pdev); 674 i2c_pnx->set_clock_stop(pdev);
670 iounmap((void *)alg_data->ioaddr); 675 iounmap((void *)alg_data->ioaddr);
diff --git a/drivers/i2c/chips/tsl2550.c b/drivers/i2c/chips/tsl2550.c
index aa96bd2d27ea..a0702f36a72f 100644
--- a/drivers/i2c/chips/tsl2550.c
+++ b/drivers/i2c/chips/tsl2550.c
@@ -257,6 +257,7 @@ static DEVICE_ATTR(operating_mode, S_IWUSR | S_IRUGO,
257 257
258static ssize_t __tsl2550_show_lux(struct i2c_client *client, char *buf) 258static ssize_t __tsl2550_show_lux(struct i2c_client *client, char *buf)
259{ 259{
260 struct tsl2550_data *data = i2c_get_clientdata(client);
260 u8 ch0, ch1; 261 u8 ch0, ch1;
261 int ret; 262 int ret;
262 263
@@ -274,6 +275,8 @@ static ssize_t __tsl2550_show_lux(struct i2c_client *client, char *buf)
274 ret = tsl2550_calculate_lux(ch0, ch1); 275 ret = tsl2550_calculate_lux(ch0, ch1);
275 if (ret < 0) 276 if (ret < 0)
276 return ret; 277 return ret;
278 if (data->operating_mode == 1)
279 ret *= 5;
277 280
278 return sprintf(buf, "%d\n", ret); 281 return sprintf(buf, "%d\n", ret);
279} 282}
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 8d80fceca6a4..296504355142 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -762,6 +762,7 @@ int i2c_del_adapter(struct i2c_adapter *adap)
762{ 762{
763 int res = 0; 763 int res = 0;
764 struct i2c_adapter *found; 764 struct i2c_adapter *found;
765 struct i2c_client *client, *next;
765 766
766 /* First make sure that this adapter was ever added */ 767 /* First make sure that this adapter was ever added */
767 mutex_lock(&core_lock); 768 mutex_lock(&core_lock);
@@ -781,6 +782,16 @@ int i2c_del_adapter(struct i2c_adapter *adap)
781 if (res) 782 if (res)
782 return res; 783 return res;
783 784
785 /* Remove devices instantiated from sysfs */
786 list_for_each_entry_safe(client, next, &userspace_devices, detected) {
787 if (client->adapter == adap) {
788 dev_dbg(&adap->dev, "Removing %s at 0x%x\n",
789 client->name, client->addr);
790 list_del(&client->detected);
791 i2c_unregister_device(client);
792 }
793 }
794
784 /* Detach any active clients. This can't fail, thus we do not 795 /* Detach any active clients. This can't fail, thus we do not
785 checking the returned value. */ 796 checking the returned value. */
786 res = device_for_each_child(&adap->dev, NULL, __unregister_client); 797 res = device_for_each_child(&adap->dev, NULL, __unregister_client);
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index 063b933d864a..dd6396384c25 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -60,15 +60,6 @@ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
60MODULE_DESCRIPTION("PCMCIA ATA/IDE card driver"); 60MODULE_DESCRIPTION("PCMCIA ATA/IDE card driver");
61MODULE_LICENSE("Dual MPL/GPL"); 61MODULE_LICENSE("Dual MPL/GPL");
62 62
63#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
64
65#ifdef CONFIG_PCMCIA_DEBUG
66INT_MODULE_PARM(pc_debug, 0);
67#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
68#else
69#define DEBUG(n, args...)
70#endif
71
72/*====================================================================*/ 63/*====================================================================*/
73 64
74typedef struct ide_info_t { 65typedef struct ide_info_t {
@@ -98,7 +89,7 @@ static int ide_probe(struct pcmcia_device *link)
98{ 89{
99 ide_info_t *info; 90 ide_info_t *info;
100 91
101 DEBUG(0, "ide_attach()\n"); 92 dev_dbg(&link->dev, "ide_attach()\n");
102 93
103 /* Create new ide device */ 94 /* Create new ide device */
104 info = kzalloc(sizeof(*info), GFP_KERNEL); 95 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -112,7 +103,6 @@ static int ide_probe(struct pcmcia_device *link)
112 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 103 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
113 link->io.IOAddrLines = 3; 104 link->io.IOAddrLines = 3;
114 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 105 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
115 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
116 link->conf.Attributes = CONF_ENABLE_IRQ; 106 link->conf.Attributes = CONF_ENABLE_IRQ;
117 link->conf.IntType = INT_MEMORY_AND_IO; 107 link->conf.IntType = INT_MEMORY_AND_IO;
118 108
@@ -134,7 +124,7 @@ static void ide_detach(struct pcmcia_device *link)
134 ide_hwif_t *hwif = info->host->ports[0]; 124 ide_hwif_t *hwif = info->host->ports[0];
135 unsigned long data_addr, ctl_addr; 125 unsigned long data_addr, ctl_addr;
136 126
137 DEBUG(0, "ide_detach(0x%p)\n", link); 127 dev_dbg(&link->dev, "ide_detach(0x%p)\n", link);
138 128
139 data_addr = hwif->io_ports.data_addr; 129 data_addr = hwif->io_ports.data_addr;
140 ctl_addr = hwif->io_ports.ctl_addr; 130 ctl_addr = hwif->io_ports.ctl_addr;
@@ -217,9 +207,6 @@ out_release:
217 207
218======================================================================*/ 208======================================================================*/
219 209
220#define CS_CHECK(fn, ret) \
221do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
222
223struct pcmcia_config_check { 210struct pcmcia_config_check {
224 unsigned long ctl_base; 211 unsigned long ctl_base;
225 int skip_vcc; 212 int skip_vcc;
@@ -282,11 +269,11 @@ static int ide_config(struct pcmcia_device *link)
282{ 269{
283 ide_info_t *info = link->priv; 270 ide_info_t *info = link->priv;
284 struct pcmcia_config_check *stk = NULL; 271 struct pcmcia_config_check *stk = NULL;
285 int last_ret = 0, last_fn = 0, is_kme = 0; 272 int ret = 0, is_kme = 0;
286 unsigned long io_base, ctl_base; 273 unsigned long io_base, ctl_base;
287 struct ide_host *host; 274 struct ide_host *host;
288 275
289 DEBUG(0, "ide_config(0x%p)\n", link); 276 dev_dbg(&link->dev, "ide_config(0x%p)\n", link);
290 277
291 is_kme = ((link->manf_id == MANFID_KME) && 278 is_kme = ((link->manf_id == MANFID_KME) &&
292 ((link->card_id == PRODID_KME_KXLC005_A) || 279 ((link->card_id == PRODID_KME_KXLC005_A) ||
@@ -306,8 +293,12 @@ static int ide_config(struct pcmcia_device *link)
306 io_base = link->io.BasePort1; 293 io_base = link->io.BasePort1;
307 ctl_base = stk->ctl_base; 294 ctl_base = stk->ctl_base;
308 295
309 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 296 ret = pcmcia_request_irq(link, &link->irq);
310 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 297 if (ret)
298 goto failed;
299 ret = pcmcia_request_configuration(link, &link->conf);
300 if (ret)
301 goto failed;
311 302
312 /* disable drive interrupts during IDE probe */ 303 /* disable drive interrupts during IDE probe */
313 outb(0x02, ctl_base); 304 outb(0x02, ctl_base);
@@ -342,8 +333,6 @@ err_mem:
342 printk(KERN_NOTICE "ide-cs: ide_config failed memory allocation\n"); 333 printk(KERN_NOTICE "ide-cs: ide_config failed memory allocation\n");
343 goto failed; 334 goto failed;
344 335
345cs_failed:
346 cs_error(link, last_fn, last_ret);
347failed: 336failed:
348 kfree(stk); 337 kfree(stk);
349 ide_release(link); 338 ide_release(link);
@@ -363,7 +352,7 @@ static void ide_release(struct pcmcia_device *link)
363 ide_info_t *info = link->priv; 352 ide_info_t *info = link->priv;
364 struct ide_host *host = info->host; 353 struct ide_host *host = info->host;
365 354
366 DEBUG(0, "ide_release(0x%p)\n", link); 355 dev_dbg(&link->dev, "ide_release(0x%p)\n", link);
367 356
368 if (info->ndev) 357 if (info->ndev)
369 /* FIXME: if this fails we need to queue the cleanup somehow 358 /* FIXME: if this fails we need to queue the cleanup somehow
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index d3440b5010a5..6e7ae2b6cfc6 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -162,7 +162,7 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
162 if (tf->command == ATA_CMD_SET_FEATURES && 162 if (tf->command == ATA_CMD_SET_FEATURES &&
163 tf->feature == SETFEATURES_XFER && 163 tf->feature == SETFEATURES_XFER &&
164 tf->nsect >= XFER_SW_DMA_0) { 164 tf->nsect >= XFER_SW_DMA_0) {
165 xfer_rate = ide_find_dma_mode(drive, XFER_UDMA_6); 165 xfer_rate = ide_find_dma_mode(drive, tf->nsect);
166 if (xfer_rate != tf->nsect) { 166 if (xfer_rate != tf->nsect) {
167 err = -EINVAL; 167 err = -EINVAL;
168 goto abort; 168 goto abort;
diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
index 96a2959ce877..7c544f7c74c4 100644
--- a/drivers/ieee802154/fakehard.c
+++ b/drivers/ieee802154/fakehard.c
@@ -260,15 +260,12 @@ static int ieee802154_fake_close(struct net_device *dev)
260static netdev_tx_t ieee802154_fake_xmit(struct sk_buff *skb, 260static netdev_tx_t ieee802154_fake_xmit(struct sk_buff *skb,
261 struct net_device *dev) 261 struct net_device *dev)
262{ 262{
263 skb->iif = dev->ifindex;
264 skb->dev = dev;
265 dev->stats.tx_packets++; 263 dev->stats.tx_packets++;
266 dev->stats.tx_bytes += skb->len; 264 dev->stats.tx_bytes += skb->len;
267 265
268 dev->trans_start = jiffies;
269
270 /* FIXME: do hardware work here ... */ 266 /* FIXME: do hardware work here ... */
271 267
268 dev_kfree_skb(skb);
272 return NETDEV_TX_OK; 269 return NETDEV_TX_OK;
273} 270}
274 271
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
index 72c63e5dd630..38df81fcdc3a 100644
--- a/drivers/input/ff-core.c
+++ b/drivers/input/ff-core.c
@@ -337,16 +337,16 @@ int input_ff_create(struct input_dev *dev, int max_effects)
337 dev->ff = ff; 337 dev->ff = ff;
338 dev->flush = flush_effects; 338 dev->flush = flush_effects;
339 dev->event = input_ff_event; 339 dev->event = input_ff_event;
340 set_bit(EV_FF, dev->evbit); 340 __set_bit(EV_FF, dev->evbit);
341 341
342 /* Copy "true" bits into ff device bitmap */ 342 /* Copy "true" bits into ff device bitmap */
343 for (i = 0; i <= FF_MAX; i++) 343 for (i = 0; i <= FF_MAX; i++)
344 if (test_bit(i, dev->ffbit)) 344 if (test_bit(i, dev->ffbit))
345 set_bit(i, ff->ffbit); 345 __set_bit(i, ff->ffbit);
346 346
347 /* we can emulate RUMBLE with periodic effects */ 347 /* we can emulate RUMBLE with periodic effects */
348 if (test_bit(FF_PERIODIC, ff->ffbit)) 348 if (test_bit(FF_PERIODIC, ff->ffbit))
349 set_bit(FF_RUMBLE, dev->ffbit); 349 __set_bit(FF_RUMBLE, dev->ffbit);
350 350
351 return 0; 351 return 0;
352} 352}
@@ -362,12 +362,14 @@ EXPORT_SYMBOL_GPL(input_ff_create);
362 */ 362 */
363void input_ff_destroy(struct input_dev *dev) 363void input_ff_destroy(struct input_dev *dev)
364{ 364{
365 clear_bit(EV_FF, dev->evbit); 365 struct ff_device *ff = dev->ff;
366 if (dev->ff) { 366
367 if (dev->ff->destroy) 367 __clear_bit(EV_FF, dev->evbit);
368 dev->ff->destroy(dev->ff); 368 if (ff) {
369 kfree(dev->ff->private); 369 if (ff->destroy)
370 kfree(dev->ff); 370 ff->destroy(ff);
371 kfree(ff->private);
372 kfree(ff);
371 dev->ff = NULL; 373 dev->ff = NULL;
372 } 374 }
373} 375}
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index 2d1415e16834..b483b2995fa9 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -61,7 +61,6 @@ struct ml_device {
61 struct ml_effect_state states[FF_MEMLESS_EFFECTS]; 61 struct ml_effect_state states[FF_MEMLESS_EFFECTS];
62 int gain; 62 int gain;
63 struct timer_list timer; 63 struct timer_list timer;
64 spinlock_t timer_lock;
65 struct input_dev *dev; 64 struct input_dev *dev;
66 65
67 int (*play_effect)(struct input_dev *dev, void *data, 66 int (*play_effect)(struct input_dev *dev, void *data,
@@ -368,38 +367,38 @@ static void ml_effect_timer(unsigned long timer_data)
368{ 367{
369 struct input_dev *dev = (struct input_dev *)timer_data; 368 struct input_dev *dev = (struct input_dev *)timer_data;
370 struct ml_device *ml = dev->ff->private; 369 struct ml_device *ml = dev->ff->private;
370 unsigned long flags;
371 371
372 debug("timer: updating effects"); 372 debug("timer: updating effects");
373 373
374 spin_lock(&ml->timer_lock); 374 spin_lock_irqsave(&dev->event_lock, flags);
375 ml_play_effects(ml); 375 ml_play_effects(ml);
376 spin_unlock(&ml->timer_lock); 376 spin_unlock_irqrestore(&dev->event_lock, flags);
377} 377}
378 378
379/*
380 * Sets requested gain for FF effects. Called with dev->event_lock held.
381 */
379static void ml_ff_set_gain(struct input_dev *dev, u16 gain) 382static void ml_ff_set_gain(struct input_dev *dev, u16 gain)
380{ 383{
381 struct ml_device *ml = dev->ff->private; 384 struct ml_device *ml = dev->ff->private;
382 int i; 385 int i;
383 386
384 spin_lock_bh(&ml->timer_lock);
385
386 ml->gain = gain; 387 ml->gain = gain;
387 388
388 for (i = 0; i < FF_MEMLESS_EFFECTS; i++) 389 for (i = 0; i < FF_MEMLESS_EFFECTS; i++)
389 __clear_bit(FF_EFFECT_PLAYING, &ml->states[i].flags); 390 __clear_bit(FF_EFFECT_PLAYING, &ml->states[i].flags);
390 391
391 ml_play_effects(ml); 392 ml_play_effects(ml);
392
393 spin_unlock_bh(&ml->timer_lock);
394} 393}
395 394
395/*
396 * Start/stop specified FF effect. Called with dev->event_lock held.
397 */
396static int ml_ff_playback(struct input_dev *dev, int effect_id, int value) 398static int ml_ff_playback(struct input_dev *dev, int effect_id, int value)
397{ 399{
398 struct ml_device *ml = dev->ff->private; 400 struct ml_device *ml = dev->ff->private;
399 struct ml_effect_state *state = &ml->states[effect_id]; 401 struct ml_effect_state *state = &ml->states[effect_id];
400 unsigned long flags;
401
402 spin_lock_irqsave(&ml->timer_lock, flags);
403 402
404 if (value > 0) { 403 if (value > 0) {
405 debug("initiated play"); 404 debug("initiated play");
@@ -425,8 +424,6 @@ static int ml_ff_playback(struct input_dev *dev, int effect_id, int value)
425 ml_play_effects(ml); 424 ml_play_effects(ml);
426 } 425 }
427 426
428 spin_unlock_irqrestore(&ml->timer_lock, flags);
429
430 return 0; 427 return 0;
431} 428}
432 429
@@ -436,7 +433,7 @@ static int ml_ff_upload(struct input_dev *dev,
436 struct ml_device *ml = dev->ff->private; 433 struct ml_device *ml = dev->ff->private;
437 struct ml_effect_state *state = &ml->states[effect->id]; 434 struct ml_effect_state *state = &ml->states[effect->id];
438 435
439 spin_lock_bh(&ml->timer_lock); 436 spin_lock_irq(&dev->event_lock);
440 437
441 if (test_bit(FF_EFFECT_STARTED, &state->flags)) { 438 if (test_bit(FF_EFFECT_STARTED, &state->flags)) {
442 __clear_bit(FF_EFFECT_PLAYING, &state->flags); 439 __clear_bit(FF_EFFECT_PLAYING, &state->flags);
@@ -448,7 +445,7 @@ static int ml_ff_upload(struct input_dev *dev,
448 ml_schedule_timer(ml); 445 ml_schedule_timer(ml);
449 } 446 }
450 447
451 spin_unlock_bh(&ml->timer_lock); 448 spin_unlock_irq(&dev->event_lock);
452 449
453 return 0; 450 return 0;
454} 451}
@@ -482,7 +479,6 @@ int input_ff_create_memless(struct input_dev *dev, void *data,
482 ml->private = data; 479 ml->private = data;
483 ml->play_effect = play_effect; 480 ml->play_effect = play_effect;
484 ml->gain = 0xffff; 481 ml->gain = 0xffff;
485 spin_lock_init(&ml->timer_lock);
486 setup_timer(&ml->timer, ml_effect_timer, (unsigned long)dev); 482 setup_timer(&ml->timer, ml_effect_timer, (unsigned long)dev);
487 483
488 set_bit(FF_GAIN, dev->ffbit); 484 set_bit(FF_GAIN, dev->ffbit);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index cc763c96fada..2266ecbfbc01 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1292,17 +1292,24 @@ static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env)
1292 return 0; 1292 return 0;
1293} 1293}
1294 1294
1295#define INPUT_DO_TOGGLE(dev, type, bits, on) \ 1295#define INPUT_DO_TOGGLE(dev, type, bits, on) \
1296 do { \ 1296 do { \
1297 int i; \ 1297 int i; \
1298 if (!test_bit(EV_##type, dev->evbit)) \ 1298 bool active; \
1299 break; \ 1299 \
1300 for (i = 0; i < type##_MAX; i++) { \ 1300 if (!test_bit(EV_##type, dev->evbit)) \
1301 if (!test_bit(i, dev->bits##bit) || \ 1301 break; \
1302 !test_bit(i, dev->bits)) \ 1302 \
1303 continue; \ 1303 for (i = 0; i < type##_MAX; i++) { \
1304 dev->event(dev, EV_##type, i, on); \ 1304 if (!test_bit(i, dev->bits##bit)) \
1305 } \ 1305 continue; \
1306 \
1307 active = test_bit(i, dev->bits); \
1308 if (!active && !on) \
1309 continue; \
1310 \
1311 dev->event(dev, EV_##type, i, on ? active : 0); \
1312 } \
1306 } while (0) 1313 } while (0)
1307 1314
1308#ifdef CONFIG_PM 1315#ifdef CONFIG_PM
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 4452eabbee6d..28e6110d1ff8 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -1174,6 +1174,18 @@ static int atkbd_reconnect(struct serio *serio)
1174 return -1; 1174 return -1;
1175 1175
1176 atkbd_activate(atkbd); 1176 atkbd_activate(atkbd);
1177
1178 /*
1179 * Restore LED state and repeat rate. While input core
1180 * will do this for us at resume time reconnect may happen
1181 * because user requested it via sysfs or simply because
1182 * keyboard was unplugged and plugged in again so we need
1183 * to do it ourselves here.
1184 */
1185 atkbd_set_leds(atkbd);
1186 if (!atkbd->softrepeat)
1187 atkbd_set_repeat_rate(atkbd);
1188
1177 } 1189 }
1178 1190
1179 atkbd_enable(atkbd); 1191 atkbd_enable(atkbd);
@@ -1422,6 +1434,7 @@ static ssize_t atkbd_set_set(struct atkbd *atkbd, const char *buf, size_t count)
1422 1434
1423 atkbd->dev = new_dev; 1435 atkbd->dev = new_dev;
1424 atkbd->set = atkbd_select_set(atkbd, value, atkbd->extra); 1436 atkbd->set = atkbd_select_set(atkbd, value, atkbd->extra);
1437 atkbd_reset_state(atkbd);
1425 atkbd_activate(atkbd); 1438 atkbd_activate(atkbd);
1426 atkbd_set_keycode_table(atkbd); 1439 atkbd_set_keycode_table(atkbd);
1427 atkbd_set_device_attrs(atkbd); 1440 atkbd_set_device_attrs(atkbd);
diff --git a/drivers/input/mouse/lifebook.c b/drivers/input/mouse/lifebook.c
index 5e6308694408..82811558ec33 100644
--- a/drivers/input/mouse/lifebook.c
+++ b/drivers/input/mouse/lifebook.c
@@ -107,8 +107,7 @@ static const struct dmi_system_id lifebook_dmi_table[] = {
107 .matches = { 107 .matches = {
108 DMI_MATCH(DMI_PRODUCT_NAME, "CF-72"), 108 DMI_MATCH(DMI_PRODUCT_NAME, "CF-72"),
109 }, 109 },
110 .callback = lifebook_set_serio_phys, 110 .callback = lifebook_set_6byte_proto,
111 .driver_data = "isa0060/serio3",
112 }, 111 },
113 { 112 {
114 .ident = "Lifebook B142", 113 .ident = "Lifebook B142",
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 690aed905436..07c53798301a 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -581,7 +581,7 @@ static int cortron_detect(struct psmouse *psmouse, bool set_properties)
581static int psmouse_extensions(struct psmouse *psmouse, 581static int psmouse_extensions(struct psmouse *psmouse,
582 unsigned int max_proto, bool set_properties) 582 unsigned int max_proto, bool set_properties)
583{ 583{
584 bool synaptics_hardware = true; 584 bool synaptics_hardware = false;
585 585
586/* 586/*
587 * We always check for lifebook because it does not disturb mouse 587 * We always check for lifebook because it does not disturb mouse
@@ -1673,7 +1673,7 @@ static int psmouse_get_maxproto(char *buffer, struct kernel_param *kp)
1673{ 1673{
1674 int type = *((unsigned int *)kp->arg); 1674 int type = *((unsigned int *)kp->arg);
1675 1675
1676 return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name); 1676 return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name);
1677} 1677}
1678 1678
1679static int __init psmouse_init(void) 1679static int __init psmouse_init(void)
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index a537925f7651..2bcf1ace27c0 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -447,6 +447,27 @@ static struct dmi_system_id __initdata i8042_dmi_reset_table[] = {
447 DMI_MATCH(DMI_PRODUCT_NAME, "N10"), 447 DMI_MATCH(DMI_PRODUCT_NAME, "N10"),
448 }, 448 },
449 }, 449 },
450 {
451 .ident = "Dell Vostro 1320",
452 .matches = {
453 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
454 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1320"),
455 },
456 },
457 {
458 .ident = "Dell Vostro 1520",
459 .matches = {
460 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
461 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1520"),
462 },
463 },
464 {
465 .ident = "Dell Vostro 1720",
466 .matches = {
467 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
468 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
469 },
470 },
450 { } 471 { }
451}; 472};
452 473
diff --git a/drivers/isdn/hardware/avm/avm_cs.c b/drivers/isdn/hardware/avm/avm_cs.c
index c72565520e41..5a6ae646a636 100644
--- a/drivers/isdn/hardware/avm/avm_cs.c
+++ b/drivers/isdn/hardware/avm/avm_cs.c
@@ -111,8 +111,6 @@ static int avmcs_probe(struct pcmcia_device *p_dev)
111 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 111 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
112 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; 112 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
113 113
114 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
115
116 /* General socket configuration */ 114 /* General socket configuration */
117 p_dev->conf.Attributes = CONF_ENABLE_IRQ; 115 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
118 p_dev->conf.IntType = INT_MEMORY_AND_IO; 116 p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -198,7 +196,6 @@ static int avmcs_config(struct pcmcia_device *link)
198 */ 196 */
199 i = pcmcia_request_irq(link, &link->irq); 197 i = pcmcia_request_irq(link, &link->irq);
200 if (i != 0) { 198 if (i != 0) {
201 cs_error(link, RequestIRQ, i);
202 /* undo */ 199 /* undo */
203 pcmcia_disable_device(link); 200 pcmcia_disable_device(link);
204 break; 201 break;
@@ -209,7 +206,6 @@ static int avmcs_config(struct pcmcia_device *link)
209 */ 206 */
210 i = pcmcia_request_configuration(link, &link->conf); 207 i = pcmcia_request_configuration(link, &link->conf);
211 if (i != 0) { 208 if (i != 0) {
212 cs_error(link, RequestConfiguration, i);
213 pcmcia_disable_device(link); 209 pcmcia_disable_device(link);
214 break; 210 break;
215 } 211 }
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index faed794cf75a..a6624ad252c5 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -5481,7 +5481,7 @@ HFCmulti_init(void)
5481 if (err) { 5481 if (err) {
5482 printk(KERN_ERR "error registering embedded driver: " 5482 printk(KERN_ERR "error registering embedded driver: "
5483 "%x\n", err); 5483 "%x\n", err);
5484 return -err; 5484 return err;
5485 } 5485 }
5486 HFC_cnt++; 5486 HFC_cnt++;
5487 printk(KERN_INFO "%d devices registered\n", HFC_cnt); 5487 printk(KERN_INFO "%d devices registered\n", HFC_cnt);
diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c
index 23560c897ec3..f9bdff39cf4a 100644
--- a/drivers/isdn/hisax/avma1_cs.c
+++ b/drivers/isdn/hisax/avma1_cs.c
@@ -30,22 +30,6 @@ MODULE_DESCRIPTION("ISDN4Linux: PCMCIA client driver for AVM A1/Fritz!PCMCIA car
30MODULE_AUTHOR("Carsten Paeth"); 30MODULE_AUTHOR("Carsten Paeth");
31MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
32 32
33/*
34 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
35 you do not define PCMCIA_DEBUG at all, all the debug code will be
36 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
37 be present but disabled -- but it can then be enabled for specific
38 modules at load time with a 'pc_debug=#' option to insmod.
39*/
40#ifdef PCMCIA_DEBUG
41static int pc_debug = PCMCIA_DEBUG;
42module_param(pc_debug, int, 0);
43#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args);
44static char *version =
45"avma1_cs.c 1.00 1998/01/23 10:00:00 (Carsten Paeth)";
46#else
47#define DEBUG(n, args...)
48#endif
49 33
50/*====================================================================*/ 34/*====================================================================*/
51 35
@@ -119,7 +103,7 @@ static int avma1cs_probe(struct pcmcia_device *p_dev)
119{ 103{
120 local_info_t *local; 104 local_info_t *local;
121 105
122 DEBUG(0, "avma1cs_attach()\n"); 106 dev_dbg(&p_dev->dev, "avma1cs_attach()\n");
123 107
124 /* Allocate space for private device-specific data */ 108 /* Allocate space for private device-specific data */
125 local = kzalloc(sizeof(local_info_t), GFP_KERNEL); 109 local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
@@ -139,8 +123,6 @@ static int avma1cs_probe(struct pcmcia_device *p_dev)
139 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 123 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
140 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; 124 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
141 125
142 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
143
144 /* General socket configuration */ 126 /* General socket configuration */
145 p_dev->conf.Attributes = CONF_ENABLE_IRQ; 127 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
146 p_dev->conf.IntType = INT_MEMORY_AND_IO; 128 p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -161,7 +143,7 @@ static int avma1cs_probe(struct pcmcia_device *p_dev)
161 143
162static void avma1cs_detach(struct pcmcia_device *link) 144static void avma1cs_detach(struct pcmcia_device *link)
163{ 145{
164 DEBUG(0, "avma1cs_detach(0x%p)\n", link); 146 dev_dbg(&link->dev, "avma1cs_detach(0x%p)\n", link);
165 avma1cs_release(link); 147 avma1cs_release(link);
166 kfree(link->priv); 148 kfree(link->priv);
167} /* avma1cs_detach */ 149} /* avma1cs_detach */
@@ -203,7 +185,7 @@ static int avma1cs_config(struct pcmcia_device *link)
203 185
204 dev = link->priv; 186 dev = link->priv;
205 187
206 DEBUG(0, "avma1cs_config(0x%p)\n", link); 188 dev_dbg(&link->dev, "avma1cs_config(0x%p)\n", link);
207 189
208 devname[0] = 0; 190 devname[0] = 0;
209 if (link->prod_id[1]) 191 if (link->prod_id[1])
@@ -218,7 +200,6 @@ static int avma1cs_config(struct pcmcia_device *link)
218 */ 200 */
219 i = pcmcia_request_irq(link, &link->irq); 201 i = pcmcia_request_irq(link, &link->irq);
220 if (i != 0) { 202 if (i != 0) {
221 cs_error(link, RequestIRQ, i);
222 /* undo */ 203 /* undo */
223 pcmcia_disable_device(link); 204 pcmcia_disable_device(link);
224 break; 205 break;
@@ -229,7 +210,6 @@ static int avma1cs_config(struct pcmcia_device *link)
229 */ 210 */
230 i = pcmcia_request_configuration(link, &link->conf); 211 i = pcmcia_request_configuration(link, &link->conf);
231 if (i != 0) { 212 if (i != 0) {
232 cs_error(link, RequestConfiguration, i);
233 pcmcia_disable_device(link); 213 pcmcia_disable_device(link);
234 break; 214 break;
235 } 215 }
@@ -281,7 +261,7 @@ static void avma1cs_release(struct pcmcia_device *link)
281{ 261{
282 local_info_t *local = link->priv; 262 local_info_t *local = link->priv;
283 263
284 DEBUG(0, "avma1cs_release(0x%p)\n", link); 264 dev_dbg(&link->dev, "avma1cs_release(0x%p)\n", link);
285 265
286 /* now unregister function with hisax */ 266 /* now unregister function with hisax */
287 HiSax_closecard(local->node.minor); 267 HiSax_closecard(local->node.minor);
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c
index f4d0fe29bcf8..a2f709f53974 100644
--- a/drivers/isdn/hisax/elsa_cs.c
+++ b/drivers/isdn/hisax/elsa_cs.c
@@ -57,23 +57,6 @@ MODULE_DESCRIPTION("ISDN4Linux: PCMCIA client driver for Elsa PCM cards");
57MODULE_AUTHOR("Klaus Lichtenwalder"); 57MODULE_AUTHOR("Klaus Lichtenwalder");
58MODULE_LICENSE("Dual MPL/GPL"); 58MODULE_LICENSE("Dual MPL/GPL");
59 59
60/*
61 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
62 you do not define PCMCIA_DEBUG at all, all the debug code will be
63 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
64 be present but disabled -- but it can then be enabled for specific
65 modules at load time with a 'pc_debug=#' option to insmod.
66*/
67
68#ifdef PCMCIA_DEBUG
69static int pc_debug = PCMCIA_DEBUG;
70module_param(pc_debug, int, 0);
71#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args);
72static char *version =
73"elsa_cs.c $Revision: 1.2.2.4 $ $Date: 2004/01/25 15:07:06 $ (K.Lichtenwalder)";
74#else
75#define DEBUG(n, args...)
76#endif
77 60
78/*====================================================================*/ 61/*====================================================================*/
79 62
@@ -142,7 +125,7 @@ static int elsa_cs_probe(struct pcmcia_device *link)
142{ 125{
143 local_info_t *local; 126 local_info_t *local;
144 127
145 DEBUG(0, "elsa_cs_attach()\n"); 128 dev_dbg(&link->dev, "elsa_cs_attach()\n");
146 129
147 /* Allocate space for private device-specific data */ 130 /* Allocate space for private device-specific data */
148 local = kzalloc(sizeof(local_info_t), GFP_KERNEL); 131 local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
@@ -155,7 +138,6 @@ static int elsa_cs_probe(struct pcmcia_device *link)
155 138
156 /* Interrupt setup */ 139 /* Interrupt setup */
157 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; 140 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
158 link->irq.IRQInfo1 = IRQ_LEVEL_ID|IRQ_SHARE_ID;
159 link->irq.Handler = NULL; 141 link->irq.Handler = NULL;
160 142
161 /* 143 /*
@@ -188,7 +170,7 @@ static void elsa_cs_detach(struct pcmcia_device *link)
188{ 170{
189 local_info_t *info = link->priv; 171 local_info_t *info = link->priv;
190 172
191 DEBUG(0, "elsa_cs_detach(0x%p)\n", link); 173 dev_dbg(&link->dev, "elsa_cs_detach(0x%p)\n", link);
192 174
193 info->busy = 1; 175 info->busy = 1;
194 elsa_cs_release(link); 176 elsa_cs_release(link);
@@ -231,30 +213,25 @@ static int elsa_cs_configcheck(struct pcmcia_device *p_dev,
231static int elsa_cs_config(struct pcmcia_device *link) 213static int elsa_cs_config(struct pcmcia_device *link)
232{ 214{
233 local_info_t *dev; 215 local_info_t *dev;
234 int i, last_fn; 216 int i;
235 IsdnCard_t icard; 217 IsdnCard_t icard;
236 218
237 DEBUG(0, "elsa_config(0x%p)\n", link); 219 dev_dbg(&link->dev, "elsa_config(0x%p)\n", link);
238 dev = link->priv; 220 dev = link->priv;
239 221
240 i = pcmcia_loop_config(link, elsa_cs_configcheck, NULL); 222 i = pcmcia_loop_config(link, elsa_cs_configcheck, NULL);
241 if (i != 0) { 223 if (i != 0)
242 last_fn = RequestIO; 224 goto failed;
243 goto cs_failed;
244 }
245 225
246 i = pcmcia_request_irq(link, &link->irq); 226 i = pcmcia_request_irq(link, &link->irq);
247 if (i != 0) { 227 if (i != 0) {
248 link->irq.AssignedIRQ = 0; 228 link->irq.AssignedIRQ = 0;
249 last_fn = RequestIRQ; 229 goto failed;
250 goto cs_failed;
251 } 230 }
252 231
253 i = pcmcia_request_configuration(link, &link->conf); 232 i = pcmcia_request_configuration(link, &link->conf);
254 if (i != 0) { 233 if (i != 0)
255 last_fn = RequestConfiguration; 234 goto failed;
256 goto cs_failed;
257 }
258 235
259 /* At this point, the dev_node_t structure(s) should be 236 /* At this point, the dev_node_t structure(s) should be
260 initialized and arranged in a linked list at link->dev. *//* */ 237 initialized and arranged in a linked list at link->dev. *//* */
@@ -290,8 +267,7 @@ static int elsa_cs_config(struct pcmcia_device *link)
290 ((local_info_t*)link->priv)->cardnr = i; 267 ((local_info_t*)link->priv)->cardnr = i;
291 268
292 return 0; 269 return 0;
293cs_failed: 270failed:
294 cs_error(link, last_fn, i);
295 elsa_cs_release(link); 271 elsa_cs_release(link);
296 return -ENODEV; 272 return -ENODEV;
297} /* elsa_cs_config */ 273} /* elsa_cs_config */
@@ -308,7 +284,7 @@ static void elsa_cs_release(struct pcmcia_device *link)
308{ 284{
309 local_info_t *local = link->priv; 285 local_info_t *local = link->priv;
310 286
311 DEBUG(0, "elsa_cs_release(0x%p)\n", link); 287 dev_dbg(&link->dev, "elsa_cs_release(0x%p)\n", link);
312 288
313 if (local) { 289 if (local) {
314 if (local->cardnr >= 0) { 290 if (local->cardnr >= 0) {
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index 9a3c9f5e4fe8..af5d393cc2d0 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -57,24 +57,6 @@ MODULE_DESCRIPTION("ISDN4Linux: PCMCIA client driver for Sedlbauer cards");
57MODULE_AUTHOR("Marcus Niemann"); 57MODULE_AUTHOR("Marcus Niemann");
58MODULE_LICENSE("Dual MPL/GPL"); 58MODULE_LICENSE("Dual MPL/GPL");
59 59
60/*
61 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
62 you do not define PCMCIA_DEBUG at all, all the debug code will be
63 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
64 be present but disabled -- but it can then be enabled for specific
65 modules at load time with a 'pc_debug=#' option to insmod.
66*/
67
68#ifdef PCMCIA_DEBUG
69static int pc_debug = PCMCIA_DEBUG;
70module_param(pc_debug, int, 0);
71#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args);
72static char *version =
73"sedlbauer_cs.c 1.1a 2001/01/28 15:04:04 (M.Niemann)";
74#else
75#define DEBUG(n, args...)
76#endif
77
78 60
79/*====================================================================*/ 61/*====================================================================*/
80 62
@@ -151,7 +133,7 @@ static int sedlbauer_probe(struct pcmcia_device *link)
151{ 133{
152 local_info_t *local; 134 local_info_t *local;
153 135
154 DEBUG(0, "sedlbauer_attach()\n"); 136 dev_dbg(&link->dev, "sedlbauer_attach()\n");
155 137
156 /* Allocate space for private device-specific data */ 138 /* Allocate space for private device-specific data */
157 local = kzalloc(sizeof(local_info_t), GFP_KERNEL); 139 local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
@@ -163,7 +145,6 @@ static int sedlbauer_probe(struct pcmcia_device *link)
163 145
164 /* Interrupt setup */ 146 /* Interrupt setup */
165 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; 147 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
166 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
167 link->irq.Handler = NULL; 148 link->irq.Handler = NULL;
168 149
169 /* 150 /*
@@ -198,7 +179,7 @@ static int sedlbauer_probe(struct pcmcia_device *link)
198 179
199static void sedlbauer_detach(struct pcmcia_device *link) 180static void sedlbauer_detach(struct pcmcia_device *link)
200{ 181{
201 DEBUG(0, "sedlbauer_detach(0x%p)\n", link); 182 dev_dbg(&link->dev, "sedlbauer_detach(0x%p)\n", link);
202 183
203 ((local_info_t *)link->priv)->stop = 1; 184 ((local_info_t *)link->priv)->stop = 1;
204 sedlbauer_release(link); 185 sedlbauer_release(link);
@@ -214,9 +195,6 @@ static void sedlbauer_detach(struct pcmcia_device *link)
214 device available to the system. 195 device available to the system.
215 196
216======================================================================*/ 197======================================================================*/
217#define CS_CHECK(fn, ret) \
218do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
219
220static int sedlbauer_config_check(struct pcmcia_device *p_dev, 198static int sedlbauer_config_check(struct pcmcia_device *p_dev,
221 cistpl_cftable_entry_t *cfg, 199 cistpl_cftable_entry_t *cfg,
222 cistpl_cftable_entry_t *dflt, 200 cistpl_cftable_entry_t *dflt,
@@ -293,11 +271,11 @@ static int sedlbauer_config_check(struct pcmcia_device *p_dev,
293 req->Base = mem->win[0].host_addr; 271 req->Base = mem->win[0].host_addr;
294 req->Size = mem->win[0].len; 272 req->Size = mem->win[0].len;
295 req->AccessSpeed = 0; 273 req->AccessSpeed = 0;
296 if (pcmcia_request_window(&p_dev, req, &p_dev->win) != 0) 274 if (pcmcia_request_window(p_dev, req, &p_dev->win) != 0)
297 return -ENODEV; 275 return -ENODEV;
298 map.Page = 0; 276 map.Page = 0;
299 map.CardOffset = mem->win[0].card_addr; 277 map.CardOffset = mem->win[0].card_addr;
300 if (pcmcia_map_mem_page(p_dev->win, &map) != 0) 278 if (pcmcia_map_mem_page(p_dev, p_dev->win, &map) != 0)
301 return -ENODEV; 279 return -ENODEV;
302 } 280 }
303 return 0; 281 return 0;
@@ -309,10 +287,10 @@ static int sedlbauer_config(struct pcmcia_device *link)
309{ 287{
310 local_info_t *dev = link->priv; 288 local_info_t *dev = link->priv;
311 win_req_t *req; 289 win_req_t *req;
312 int last_fn, last_ret; 290 int ret;
313 IsdnCard_t icard; 291 IsdnCard_t icard;
314 292
315 DEBUG(0, "sedlbauer_config(0x%p)\n", link); 293 dev_dbg(&link->dev, "sedlbauer_config(0x%p)\n", link);
316 294
317 req = kzalloc(sizeof(win_req_t), GFP_KERNEL); 295 req = kzalloc(sizeof(win_req_t), GFP_KERNEL);
318 if (!req) 296 if (!req)
@@ -330,8 +308,8 @@ static int sedlbauer_config(struct pcmcia_device *link)
330 these things without consulting the CIS, and most client drivers 308 these things without consulting the CIS, and most client drivers
331 will only use the CIS to fill in implementation-defined details. 309 will only use the CIS to fill in implementation-defined details.
332 */ 310 */
333 last_ret = pcmcia_loop_config(link, sedlbauer_config_check, req); 311 ret = pcmcia_loop_config(link, sedlbauer_config_check, req);
334 if (last_ret) 312 if (ret)
335 goto failed; 313 goto failed;
336 314
337 /* 315 /*
@@ -339,15 +317,20 @@ static int sedlbauer_config(struct pcmcia_device *link)
339 handler to the interrupt, unless the 'Handler' member of the 317 handler to the interrupt, unless the 'Handler' member of the
340 irq structure is initialized. 318 irq structure is initialized.
341 */ 319 */
342 if (link->conf.Attributes & CONF_ENABLE_IRQ) 320 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
343 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 321 ret = pcmcia_request_irq(link, &link->irq);
322 if (ret)
323 goto failed;
324 }
344 325
345 /* 326 /*
346 This actually configures the PCMCIA socket -- setting up 327 This actually configures the PCMCIA socket -- setting up
347 the I/O windows and the interrupt mapping, and putting the 328 the I/O windows and the interrupt mapping, and putting the
348 card and host interface into "Memory and IO" mode. 329 card and host interface into "Memory and IO" mode.
349 */ 330 */
350 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 331 ret = pcmcia_request_configuration(link, &link->conf);
332 if (ret)
333 goto failed;
351 334
352 /* 335 /*
353 At this point, the dev_node_t structure(s) need to be 336 At this point, the dev_node_t structure(s) need to be
@@ -380,19 +363,18 @@ static int sedlbauer_config(struct pcmcia_device *link)
380 icard.protocol = protocol; 363 icard.protocol = protocol;
381 icard.typ = ISDN_CTYPE_SEDLBAUER_PCMCIA; 364 icard.typ = ISDN_CTYPE_SEDLBAUER_PCMCIA;
382 365
383 last_ret = hisax_init_pcmcia(link, &(((local_info_t*)link->priv)->stop), &icard); 366 ret = hisax_init_pcmcia(link,
384 if (last_ret < 0) { 367 &(((local_info_t *)link->priv)->stop), &icard);
385 printk(KERN_ERR "sedlbauer_cs: failed to initialize SEDLBAUER PCMCIA %d at i/o %#x\n", 368 if (ret < 0) {
386 last_ret, link->io.BasePort1); 369 printk(KERN_ERR "sedlbauer_cs: failed to initialize SEDLBAUER PCMCIA %d at i/o %#x\n",
370 ret, link->io.BasePort1);
387 sedlbauer_release(link); 371 sedlbauer_release(link);
388 return -ENODEV; 372 return -ENODEV;
389 } else 373 } else
390 ((local_info_t*)link->priv)->cardnr = last_ret; 374 ((local_info_t *)link->priv)->cardnr = ret;
391 375
392 return 0; 376 return 0;
393 377
394cs_failed:
395 cs_error(link, last_fn, last_ret);
396failed: 378failed:
397 sedlbauer_release(link); 379 sedlbauer_release(link);
398 return -ENODEV; 380 return -ENODEV;
@@ -410,7 +392,7 @@ failed:
410static void sedlbauer_release(struct pcmcia_device *link) 392static void sedlbauer_release(struct pcmcia_device *link)
411{ 393{
412 local_info_t *local = link->priv; 394 local_info_t *local = link->priv;
413 DEBUG(0, "sedlbauer_release(0x%p)\n", link); 395 dev_dbg(&link->dev, "sedlbauer_release(0x%p)\n", link);
414 396
415 if (local) { 397 if (local) {
416 if (local->cardnr >= 0) { 398 if (local->cardnr >= 0) {
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c
index 623d111544d4..ea705394ce2b 100644
--- a/drivers/isdn/hisax/teles_cs.c
+++ b/drivers/isdn/hisax/teles_cs.c
@@ -38,23 +38,6 @@ MODULE_DESCRIPTION("ISDN4Linux: PCMCIA client driver for Teles PCMCIA cards");
38MODULE_AUTHOR("Christof Petig, christof.petig@wtal.de, Karsten Keil, kkeil@suse.de"); 38MODULE_AUTHOR("Christof Petig, christof.petig@wtal.de, Karsten Keil, kkeil@suse.de");
39MODULE_LICENSE("GPL"); 39MODULE_LICENSE("GPL");
40 40
41/*
42 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
43 you do not define PCMCIA_DEBUG at all, all the debug code will be
44 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
45 be present but disabled -- but it can then be enabled for specific
46 modules at load time with a 'pc_debug=#' option to insmod.
47*/
48
49#ifdef PCMCIA_DEBUG
50static int pc_debug = PCMCIA_DEBUG;
51module_param(pc_debug, int, 0);
52#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args);
53static char *version =
54"teles_cs.c 2.10 2002/07/30 22:23:34 kkeil";
55#else
56#define DEBUG(n, args...)
57#endif
58 41
59/*====================================================================*/ 42/*====================================================================*/
60 43
@@ -133,7 +116,7 @@ static int teles_probe(struct pcmcia_device *link)
133{ 116{
134 local_info_t *local; 117 local_info_t *local;
135 118
136 DEBUG(0, "teles_attach()\n"); 119 dev_dbg(&link->dev, "teles_attach()\n");
137 120
138 /* Allocate space for private device-specific data */ 121 /* Allocate space for private device-specific data */
139 local = kzalloc(sizeof(local_info_t), GFP_KERNEL); 122 local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
@@ -145,7 +128,6 @@ static int teles_probe(struct pcmcia_device *link)
145 128
146 /* Interrupt setup */ 129 /* Interrupt setup */
147 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; 130 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
148 link->irq.IRQInfo1 = IRQ_LEVEL_ID|IRQ_SHARE_ID;
149 link->irq.Handler = NULL; 131 link->irq.Handler = NULL;
150 132
151 /* 133 /*
@@ -178,7 +160,7 @@ static void teles_detach(struct pcmcia_device *link)
178{ 160{
179 local_info_t *info = link->priv; 161 local_info_t *info = link->priv;
180 162
181 DEBUG(0, "teles_detach(0x%p)\n", link); 163 dev_dbg(&link->dev, "teles_detach(0x%p)\n", link);
182 164
183 info->busy = 1; 165 info->busy = 1;
184 teles_cs_release(link); 166 teles_cs_release(link);
@@ -221,30 +203,25 @@ static int teles_cs_configcheck(struct pcmcia_device *p_dev,
221static int teles_cs_config(struct pcmcia_device *link) 203static int teles_cs_config(struct pcmcia_device *link)
222{ 204{
223 local_info_t *dev; 205 local_info_t *dev;
224 int i, last_fn; 206 int i;
225 IsdnCard_t icard; 207 IsdnCard_t icard;
226 208
227 DEBUG(0, "teles_config(0x%p)\n", link); 209 dev_dbg(&link->dev, "teles_config(0x%p)\n", link);
228 dev = link->priv; 210 dev = link->priv;
229 211
230 i = pcmcia_loop_config(link, teles_cs_configcheck, NULL); 212 i = pcmcia_loop_config(link, teles_cs_configcheck, NULL);
231 if (i != 0) { 213 if (i != 0)
232 last_fn = RequestIO;
233 goto cs_failed; 214 goto cs_failed;
234 }
235 215
236 i = pcmcia_request_irq(link, &link->irq); 216 i = pcmcia_request_irq(link, &link->irq);
237 if (i != 0) { 217 if (i != 0) {
238 link->irq.AssignedIRQ = 0; 218 link->irq.AssignedIRQ = 0;
239 last_fn = RequestIRQ;
240 goto cs_failed; 219 goto cs_failed;
241 } 220 }
242 221
243 i = pcmcia_request_configuration(link, &link->conf); 222 i = pcmcia_request_configuration(link, &link->conf);
244 if (i != 0) { 223 if (i != 0)
245 last_fn = RequestConfiguration;
246 goto cs_failed; 224 goto cs_failed;
247 }
248 225
249 /* At this point, the dev_node_t structure(s) should be 226 /* At this point, the dev_node_t structure(s) should be
250 initialized and arranged in a linked list at link->dev. *//* */ 227 initialized and arranged in a linked list at link->dev. *//* */
@@ -283,7 +260,6 @@ static int teles_cs_config(struct pcmcia_device *link)
283 return 0; 260 return 0;
284 261
285cs_failed: 262cs_failed:
286 cs_error(link, last_fn, i);
287 teles_cs_release(link); 263 teles_cs_release(link);
288 return -ENODEV; 264 return -ENODEV;
289} /* teles_cs_config */ 265} /* teles_cs_config */
@@ -300,7 +276,7 @@ static void teles_cs_release(struct pcmcia_device *link)
300{ 276{
301 local_info_t *local = link->priv; 277 local_info_t *local = link->priv;
302 278
303 DEBUG(0, "teles_cs_release(0x%p)\n", link); 279 dev_dbg(&link->dev, "teles_cs_release(0x%p)\n", link);
304 280
305 if (local) { 281 if (local) {
306 if (local->cardnr >= 0) { 282 if (local->cardnr >= 0) {
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 2d14b64202a3..642d5aaf53ce 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -1535,10 +1535,8 @@ static int isdn_ppp_mp_bundle_array_init(void)
1535 int sz = ISDN_MAX_CHANNELS*sizeof(ippp_bundle); 1535 int sz = ISDN_MAX_CHANNELS*sizeof(ippp_bundle);
1536 if( (isdn_ppp_bundle_arr = kzalloc(sz, GFP_KERNEL)) == NULL ) 1536 if( (isdn_ppp_bundle_arr = kzalloc(sz, GFP_KERNEL)) == NULL )
1537 return -ENOMEM; 1537 return -ENOMEM;
1538 for (i = 0; i < ISDN_MAX_CHANNELS; i++) { 1538 for( i = 0; i < ISDN_MAX_CHANNELS; i++ )
1539 spin_lock_init(&isdn_ppp_bundle_arr[i].lock); 1539 spin_lock_init(&isdn_ppp_bundle_arr[i].lock);
1540 skb_queue_head_init(&isdn_ppp_bundle_arr[i].frags);
1541 }
1542 return 0; 1540 return 0;
1543} 1541}
1544 1542
@@ -1571,7 +1569,7 @@ static int isdn_ppp_mp_init( isdn_net_local * lp, ippp_bundle * add_to )
1571 if ((lp->netdev->pb = isdn_ppp_mp_bundle_alloc()) == NULL) 1569 if ((lp->netdev->pb = isdn_ppp_mp_bundle_alloc()) == NULL)
1572 return -ENOMEM; 1570 return -ENOMEM;
1573 lp->next = lp->last = lp; /* nobody else in a queue */ 1571 lp->next = lp->last = lp; /* nobody else in a queue */
1574 skb_queue_head_init(&lp->netdev->pb->frags); 1572 lp->netdev->pb->frags = NULL;
1575 lp->netdev->pb->frames = 0; 1573 lp->netdev->pb->frames = 0;
1576 lp->netdev->pb->seq = UINT_MAX; 1574 lp->netdev->pb->seq = UINT_MAX;
1577 } 1575 }
@@ -1583,29 +1581,28 @@ static int isdn_ppp_mp_init( isdn_net_local * lp, ippp_bundle * add_to )
1583 1581
1584static u32 isdn_ppp_mp_get_seq( int short_seq, 1582static u32 isdn_ppp_mp_get_seq( int short_seq,
1585 struct sk_buff * skb, u32 last_seq ); 1583 struct sk_buff * skb, u32 last_seq );
1586static void isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from, 1584static struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp,
1587 struct sk_buff *to); 1585 struct sk_buff * from, struct sk_buff * to );
1588static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp, 1586static void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp,
1589 struct sk_buff *from, struct sk_buff *to, 1587 struct sk_buff * from, struct sk_buff * to );
1590 u32 lastseq); 1588static void isdn_ppp_mp_free_skb( ippp_bundle * mp, struct sk_buff * skb );
1591static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb);
1592static void isdn_ppp_mp_print_recv_pkt( int slot, struct sk_buff * skb ); 1589static void isdn_ppp_mp_print_recv_pkt( int slot, struct sk_buff * skb );
1593 1590
1594static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, 1591static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1595 struct sk_buff *skb) 1592 struct sk_buff *skb)
1596{ 1593{
1597 struct sk_buff *newfrag, *frag, *start, *nextf;
1598 u32 newseq, minseq, thisseq;
1599 isdn_mppp_stats *stats;
1600 struct ippp_struct *is; 1594 struct ippp_struct *is;
1595 isdn_net_local * lpq;
1596 ippp_bundle * mp;
1597 isdn_mppp_stats * stats;
1598 struct sk_buff * newfrag, * frag, * start, *nextf;
1599 u32 newseq, minseq, thisseq;
1601 unsigned long flags; 1600 unsigned long flags;
1602 isdn_net_local *lpq;
1603 ippp_bundle *mp;
1604 int slot; 1601 int slot;
1605 1602
1606 spin_lock_irqsave(&net_dev->pb->lock, flags); 1603 spin_lock_irqsave(&net_dev->pb->lock, flags);
1607 mp = net_dev->pb; 1604 mp = net_dev->pb;
1608 stats = &mp->stats; 1605 stats = &mp->stats;
1609 slot = lp->ppp_slot; 1606 slot = lp->ppp_slot;
1610 if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { 1607 if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
1611 printk(KERN_ERR "%s: lp->ppp_slot(%d)\n", 1608 printk(KERN_ERR "%s: lp->ppp_slot(%d)\n",
@@ -1616,19 +1613,20 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1616 return; 1613 return;
1617 } 1614 }
1618 is = ippp_table[slot]; 1615 is = ippp_table[slot];
1619 if (++mp->frames > stats->max_queue_len) 1616 if( ++mp->frames > stats->max_queue_len )
1620 stats->max_queue_len = mp->frames; 1617 stats->max_queue_len = mp->frames;
1621 1618
1622 if (is->debug & 0x8) 1619 if (is->debug & 0x8)
1623 isdn_ppp_mp_print_recv_pkt(lp->ppp_slot, skb); 1620 isdn_ppp_mp_print_recv_pkt(lp->ppp_slot, skb);
1624 1621
1625 newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ, 1622 newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ,
1626 skb, is->last_link_seqno); 1623 skb, is->last_link_seqno);
1624
1627 1625
1628 /* if this packet seq # is less than last already processed one, 1626 /* if this packet seq # is less than last already processed one,
1629 * toss it right away, but check for sequence start case first 1627 * toss it right away, but check for sequence start case first
1630 */ 1628 */
1631 if (mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT)) { 1629 if( mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT) ) {
1632 mp->seq = newseq; /* the first packet: required for 1630 mp->seq = newseq; /* the first packet: required for
1633 * rfc1990 non-compliant clients -- 1631 * rfc1990 non-compliant clients --
1634 * prevents constant packet toss */ 1632 * prevents constant packet toss */
@@ -1638,7 +1636,7 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1638 spin_unlock_irqrestore(&mp->lock, flags); 1636 spin_unlock_irqrestore(&mp->lock, flags);
1639 return; 1637 return;
1640 } 1638 }
1641 1639
1642 /* find the minimum received sequence number over all links */ 1640 /* find the minimum received sequence number over all links */
1643 is->last_link_seqno = minseq = newseq; 1641 is->last_link_seqno = minseq = newseq;
1644 for (lpq = net_dev->queue;;) { 1642 for (lpq = net_dev->queue;;) {
@@ -1659,31 +1657,22 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1659 * packets */ 1657 * packets */
1660 newfrag = skb; 1658 newfrag = skb;
1661 1659
1662 /* Insert new fragment into the proper sequence slot. */ 1660 /* if this new fragment is before the first one, then enqueue it now. */
1663 skb_queue_walk(&mp->frags, frag) { 1661 if ((frag = mp->frags) == NULL || MP_LT(newseq, MP_SEQ(frag))) {
1664 if (MP_SEQ(frag) == newseq) { 1662 newfrag->next = frag;
1665 isdn_ppp_mp_free_skb(mp, newfrag); 1663 mp->frags = frag = newfrag;
1666 newfrag = NULL; 1664 newfrag = NULL;
1667 break; 1665 }
1668 }
1669 if (MP_LT(newseq, MP_SEQ(frag))) {
1670 __skb_queue_before(&mp->frags, frag, newfrag);
1671 newfrag = NULL;
1672 break;
1673 }
1674 }
1675 if (newfrag)
1676 __skb_queue_tail(&mp->frags, newfrag);
1677 1666
1678 frag = skb_peek(&mp->frags); 1667 start = MP_FLAGS(frag) & MP_BEGIN_FRAG &&
1679 start = ((MP_FLAGS(frag) & MP_BEGIN_FRAG) && 1668 MP_SEQ(frag) == mp->seq ? frag : NULL;
1680 (MP_SEQ(frag) == mp->seq)) ? frag : NULL;
1681 if (!start)
1682 goto check_overflow;
1683 1669
1684 /* main fragment traversing loop 1670 /*
1671 * main fragment traversing loop
1685 * 1672 *
1686 * try to accomplish several tasks: 1673 * try to accomplish several tasks:
1674 * - insert new fragment into the proper sequence slot (once that's done
1675 * newfrag will be set to NULL)
1687 * - reassemble any complete fragment sequence (non-null 'start' 1676 * - reassemble any complete fragment sequence (non-null 'start'
1688 * indicates there is a continguous sequence present) 1677 * indicates there is a continguous sequence present)
1689 * - discard any incomplete sequences that are below minseq -- due 1678 * - discard any incomplete sequences that are below minseq -- due
@@ -1692,46 +1681,71 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1692 * come to complete such sequence and it should be discarded 1681 * come to complete such sequence and it should be discarded
1693 * 1682 *
1694 * loop completes when we accomplished the following tasks: 1683 * loop completes when we accomplished the following tasks:
1684 * - new fragment is inserted in the proper sequence ('newfrag' is
1685 * set to NULL)
1695 * - we hit a gap in the sequence, so no reassembly/processing is 1686 * - we hit a gap in the sequence, so no reassembly/processing is
1696 * possible ('start' would be set to NULL) 1687 * possible ('start' would be set to NULL)
1697 * 1688 *
1698 * algorithm for this code is derived from code in the book 1689 * algorithm for this code is derived from code in the book
1699 * 'PPP Design And Debugging' by James Carlson (Addison-Wesley) 1690 * 'PPP Design And Debugging' by James Carlson (Addison-Wesley)
1700 */ 1691 */
1701 skb_queue_walk_safe(&mp->frags, frag, nextf) { 1692 while (start != NULL || newfrag != NULL) {
1702 thisseq = MP_SEQ(frag); 1693
1703 1694 thisseq = MP_SEQ(frag);
1704 /* check for misplaced start */ 1695 nextf = frag->next;
1705 if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) { 1696
1706 printk(KERN_WARNING"isdn_mppp(seq %d): new " 1697 /* drop any duplicate fragments */
1707 "BEGIN flag with no prior END", thisseq); 1698 if (newfrag != NULL && thisseq == newseq) {
1708 stats->seqerrs++; 1699 isdn_ppp_mp_free_skb(mp, newfrag);
1709 stats->frame_drops++; 1700 newfrag = NULL;
1710 isdn_ppp_mp_discard(mp, start, frag); 1701 }
1711 start = frag; 1702
1712 } else if (MP_LE(thisseq, minseq)) { 1703 /* insert new fragment before next element if possible. */
1713 if (MP_FLAGS(frag) & MP_BEGIN_FRAG) 1704 if (newfrag != NULL && (nextf == NULL ||
1705 MP_LT(newseq, MP_SEQ(nextf)))) {
1706 newfrag->next = nextf;
1707 frag->next = nextf = newfrag;
1708 newfrag = NULL;
1709 }
1710
1711 if (start != NULL) {
1712 /* check for misplaced start */
1713 if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) {
1714 printk(KERN_WARNING"isdn_mppp(seq %d): new "
1715 "BEGIN flag with no prior END", thisseq);
1716 stats->seqerrs++;
1717 stats->frame_drops++;
1718 start = isdn_ppp_mp_discard(mp, start,frag);
1719 nextf = frag->next;
1720 }
1721 } else if (MP_LE(thisseq, minseq)) {
1722 if (MP_FLAGS(frag) & MP_BEGIN_FRAG)
1714 start = frag; 1723 start = frag;
1715 else { 1724 else {
1716 if (MP_FLAGS(frag) & MP_END_FRAG) 1725 if (MP_FLAGS(frag) & MP_END_FRAG)
1717 stats->frame_drops++; 1726 stats->frame_drops++;
1718 __skb_unlink(skb, &mp->frags); 1727 if( mp->frags == frag )
1728 mp->frags = nextf;
1719 isdn_ppp_mp_free_skb(mp, frag); 1729 isdn_ppp_mp_free_skb(mp, frag);
1730 frag = nextf;
1720 continue; 1731 continue;
1721 } 1732 }
1722 } 1733 }
1723 1734
1724 /* if we have end fragment, then we have full reassembly 1735 /* if start is non-null and we have end fragment, then
1725 * sequence -- reassemble and process packet now 1736 * we have full reassembly sequence -- reassemble
1737 * and process packet now
1726 */ 1738 */
1727 if (MP_FLAGS(frag) & MP_END_FRAG) { 1739 if (start != NULL && (MP_FLAGS(frag) & MP_END_FRAG)) {
1728 minseq = mp->seq = (thisseq+1) & MP_LONGSEQ_MASK; 1740 minseq = mp->seq = (thisseq+1) & MP_LONGSEQ_MASK;
1729 /* Reassemble the packet then dispatch it */ 1741 /* Reassemble the packet then dispatch it */
1730 isdn_ppp_mp_reassembly(net_dev, lp, start, frag, thisseq); 1742 isdn_ppp_mp_reassembly(net_dev, lp, start, nextf);
1743
1744 start = NULL;
1745 frag = NULL;
1731 1746
1732 start = NULL; 1747 mp->frags = nextf;
1733 frag = NULL; 1748 }
1734 }
1735 1749
1736 /* check if need to update start pointer: if we just 1750 /* check if need to update start pointer: if we just
1737 * reassembled the packet and sequence is contiguous 1751 * reassembled the packet and sequence is contiguous
@@ -1742,25 +1756,26 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1742 * below low watermark and set start to the next frag or 1756 * below low watermark and set start to the next frag or
1743 * clear start ptr. 1757 * clear start ptr.
1744 */ 1758 */
1745 if (nextf != (struct sk_buff *)&mp->frags && 1759 if (nextf != NULL &&
1746 ((thisseq+1) & MP_LONGSEQ_MASK) == MP_SEQ(nextf)) { 1760 ((thisseq+1) & MP_LONGSEQ_MASK) == MP_SEQ(nextf)) {
1747 /* if we just reassembled and the next one is here, 1761 /* if we just reassembled and the next one is here,
1748 * then start another reassembly. 1762 * then start another reassembly. */
1749 */ 1763
1750 if (frag == NULL) { 1764 if (frag == NULL) {
1751 if (MP_FLAGS(nextf) & MP_BEGIN_FRAG) 1765 if (MP_FLAGS(nextf) & MP_BEGIN_FRAG)
1752 start = nextf; 1766 start = nextf;
1753 else { 1767 else
1754 printk(KERN_WARNING"isdn_mppp(seq %d):" 1768 {
1755 " END flag with no following " 1769 printk(KERN_WARNING"isdn_mppp(seq %d):"
1756 "BEGIN", thisseq); 1770 " END flag with no following "
1771 "BEGIN", thisseq);
1757 stats->seqerrs++; 1772 stats->seqerrs++;
1758 } 1773 }
1759 } 1774 }
1760 } else { 1775
1761 if (nextf != (struct sk_buff *)&mp->frags && 1776 } else {
1762 frag != NULL && 1777 if ( nextf != NULL && frag != NULL &&
1763 MP_LT(thisseq, minseq)) { 1778 MP_LT(thisseq, minseq)) {
1764 /* we've got a break in the sequence 1779 /* we've got a break in the sequence
1765 * and we not at the end yet 1780 * and we not at the end yet
1766 * and we did not just reassembled 1781 * and we did not just reassembled
@@ -1769,39 +1784,41 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1769 * discard all the frames below low watermark 1784 * discard all the frames below low watermark
1770 * and start over */ 1785 * and start over */
1771 stats->frame_drops++; 1786 stats->frame_drops++;
1772 isdn_ppp_mp_discard(mp, start, nextf); 1787 mp->frags = isdn_ppp_mp_discard(mp,start,nextf);
1773 } 1788 }
1774 /* break in the sequence, no reassembly */ 1789 /* break in the sequence, no reassembly */
1775 start = NULL; 1790 start = NULL;
1776 } 1791 }
1777 if (!start) 1792
1778 break; 1793 frag = nextf;
1779 } 1794 } /* while -- main loop */
1780 1795
1781check_overflow: 1796 if (mp->frags == NULL)
1797 mp->frags = frag;
1798
1782 /* rather straighforward way to deal with (not very) possible 1799 /* rather straighforward way to deal with (not very) possible
1783 * queue overflow 1800 * queue overflow */
1784 */
1785 if (mp->frames > MP_MAX_QUEUE_LEN) { 1801 if (mp->frames > MP_MAX_QUEUE_LEN) {
1786 stats->overflows++; 1802 stats->overflows++;
1787 skb_queue_walk_safe(&mp->frags, frag, nextf) { 1803 while (mp->frames > MP_MAX_QUEUE_LEN) {
1788 if (mp->frames <= MP_MAX_QUEUE_LEN) 1804 frag = mp->frags->next;
1789 break; 1805 isdn_ppp_mp_free_skb(mp, mp->frags);
1790 __skb_unlink(frag, &mp->frags); 1806 mp->frags = frag;
1791 isdn_ppp_mp_free_skb(mp, frag);
1792 } 1807 }
1793 } 1808 }
1794 spin_unlock_irqrestore(&mp->lock, flags); 1809 spin_unlock_irqrestore(&mp->lock, flags);
1795} 1810}
1796 1811
1797static void isdn_ppp_mp_cleanup(isdn_net_local *lp) 1812static void isdn_ppp_mp_cleanup( isdn_net_local * lp )
1798{ 1813{
1799 struct sk_buff *skb, *tmp; 1814 struct sk_buff * frag = lp->netdev->pb->frags;
1800 1815 struct sk_buff * nextfrag;
1801 skb_queue_walk_safe(&lp->netdev->pb->frags, skb, tmp) { 1816 while( frag ) {
1802 __skb_unlink(skb, &lp->netdev->pb->frags); 1817 nextfrag = frag->next;
1803 isdn_ppp_mp_free_skb(lp->netdev->pb, skb); 1818 isdn_ppp_mp_free_skb(lp->netdev->pb, frag);
1804 } 1819 frag = nextfrag;
1820 }
1821 lp->netdev->pb->frags = NULL;
1805} 1822}
1806 1823
1807static u32 isdn_ppp_mp_get_seq( int short_seq, 1824static u32 isdn_ppp_mp_get_seq( int short_seq,
@@ -1838,115 +1855,72 @@ static u32 isdn_ppp_mp_get_seq( int short_seq,
1838 return seq; 1855 return seq;
1839} 1856}
1840 1857
1841static void isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from, 1858struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp,
1842 struct sk_buff *to) 1859 struct sk_buff * from, struct sk_buff * to )
1843{ 1860{
1844 if (from) { 1861 if( from )
1845 struct sk_buff *skb, *tmp; 1862 while (from != to) {
1846 int freeing = 0; 1863 struct sk_buff * next = from->next;
1847 1864 isdn_ppp_mp_free_skb(mp, from);
1848 skb_queue_walk_safe(&mp->frags, skb, tmp) { 1865 from = next;
1849 if (skb == to)
1850 break;
1851 if (skb == from)
1852 freeing = 1;
1853 if (!freeing)
1854 continue;
1855 __skb_unlink(skb, &mp->frags);
1856 isdn_ppp_mp_free_skb(mp, skb);
1857 } 1866 }
1858 } 1867 return from;
1859}
1860
1861static unsigned int calc_tot_len(struct sk_buff_head *queue,
1862 struct sk_buff *from, struct sk_buff *to)
1863{
1864 unsigned int tot_len = 0;
1865 struct sk_buff *skb;
1866 int found_start = 0;
1867
1868 skb_queue_walk(queue, skb) {
1869 if (skb == from)
1870 found_start = 1;
1871 if (!found_start)
1872 continue;
1873 tot_len += skb->len - MP_HEADER_LEN;
1874 if (skb == to)
1875 break;
1876 }
1877 return tot_len;
1878} 1868}
1879 1869
1880/* Reassemble packet using fragments in the reassembly queue from 1870void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp,
1881 * 'from' until 'to', inclusive. 1871 struct sk_buff * from, struct sk_buff * to )
1882 */
1883static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp,
1884 struct sk_buff *from, struct sk_buff *to,
1885 u32 lastseq)
1886{ 1872{
1887 ippp_bundle *mp = net_dev->pb; 1873 ippp_bundle * mp = net_dev->pb;
1888 unsigned int tot_len;
1889 struct sk_buff *skb;
1890 int proto; 1874 int proto;
1875 struct sk_buff * skb;
1876 unsigned int tot_len;
1891 1877
1892 if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { 1878 if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
1893 printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n", 1879 printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
1894 __func__, lp->ppp_slot); 1880 __func__, lp->ppp_slot);
1895 return; 1881 return;
1896 } 1882 }
1897 1883 if( MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG) ) {
1898 tot_len = calc_tot_len(&mp->frags, from, to); 1884 if( ippp_table[lp->ppp_slot]->debug & 0x40 )
1899
1900 if (MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG)) {
1901 if (ippp_table[lp->ppp_slot]->debug & 0x40)
1902 printk(KERN_DEBUG "isdn_mppp: reassembly: frame %d, " 1885 printk(KERN_DEBUG "isdn_mppp: reassembly: frame %d, "
1903 "len %d\n", MP_SEQ(from), from->len); 1886 "len %d\n", MP_SEQ(from), from->len );
1904 skb = from; 1887 skb = from;
1905 skb_pull(skb, MP_HEADER_LEN); 1888 skb_pull(skb, MP_HEADER_LEN);
1906 __skb_unlink(skb, &mp->frags);
1907 mp->frames--; 1889 mp->frames--;
1908 } else { 1890 } else {
1909 struct sk_buff *walk, *tmp; 1891 struct sk_buff * frag;
1910 int found_start = 0; 1892 int n;
1911 1893
1912 if (ippp_table[lp->ppp_slot]->debug & 0x40) 1894 for(tot_len=n=0, frag=from; frag != to; frag=frag->next, n++)
1913 printk(KERN_DEBUG"isdn_mppp: reassembling frames %d " 1895 tot_len += frag->len - MP_HEADER_LEN;
1914 "to %d, len %d\n", MP_SEQ(from), lastseq,
1915 tot_len);
1916 1896
1917 skb = dev_alloc_skb(tot_len); 1897 if( ippp_table[lp->ppp_slot]->debug & 0x40 )
1918 if (!skb) 1898 printk(KERN_DEBUG"isdn_mppp: reassembling frames %d "
1899 "to %d, len %d\n", MP_SEQ(from),
1900 (MP_SEQ(from)+n-1) & MP_LONGSEQ_MASK, tot_len );
1901 if( (skb = dev_alloc_skb(tot_len)) == NULL ) {
1919 printk(KERN_ERR "isdn_mppp: cannot allocate sk buff " 1902 printk(KERN_ERR "isdn_mppp: cannot allocate sk buff "
1920 "of size %d\n", tot_len); 1903 "of size %d\n", tot_len);
1921 1904 isdn_ppp_mp_discard(mp, from, to);
1922 found_start = 0; 1905 return;
1923 skb_queue_walk_safe(&mp->frags, walk, tmp) { 1906 }
1924 if (walk == from)
1925 found_start = 1;
1926 if (!found_start)
1927 continue;
1928 1907
1929 if (skb) { 1908 while( from != to ) {
1930 unsigned int len = walk->len - MP_HEADER_LEN; 1909 unsigned int len = from->len - MP_HEADER_LEN;
1931 skb_copy_from_linear_data_offset(walk, MP_HEADER_LEN,
1932 skb_put(skb, len),
1933 len);
1934 }
1935 __skb_unlink(walk, &mp->frags);
1936 isdn_ppp_mp_free_skb(mp, walk);
1937 1910
1938 if (walk == to) 1911 skb_copy_from_linear_data_offset(from, MP_HEADER_LEN,
1939 break; 1912 skb_put(skb,len),
1913 len);
1914 frag = from->next;
1915 isdn_ppp_mp_free_skb(mp, from);
1916 from = frag;
1940 } 1917 }
1941 } 1918 }
1942 if (!skb)
1943 return;
1944
1945 proto = isdn_ppp_strip_proto(skb); 1919 proto = isdn_ppp_strip_proto(skb);
1946 isdn_ppp_push_higher(net_dev, lp, skb, proto); 1920 isdn_ppp_push_higher(net_dev, lp, skb, proto);
1947} 1921}
1948 1922
1949static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb) 1923static void isdn_ppp_mp_free_skb(ippp_bundle * mp, struct sk_buff * skb)
1950{ 1924{
1951 dev_kfree_skb(skb); 1925 dev_kfree_skb(skb);
1952 mp->frames--; 1926 mp->frames--;
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 7467980b8cf9..e5225d28f392 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -78,6 +78,8 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
78{ 78{
79 int ret, state; 79 int ret, state;
80 80
81 led_dat->gpio = -1;
82
81 /* skip leds that aren't available */ 83 /* skip leds that aren't available */
82 if (!gpio_is_valid(template->gpio)) { 84 if (!gpio_is_valid(template->gpio)) {
83 printk(KERN_INFO "Skipping unavailable LED gpio %d (%s)\n", 85 printk(KERN_INFO "Skipping unavailable LED gpio %d (%s)\n",
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e64c971038d1..b182f86a19dd 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -944,6 +944,14 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
944 desc->raid_disk < mddev->raid_disks */) { 944 desc->raid_disk < mddev->raid_disks */) {
945 set_bit(In_sync, &rdev->flags); 945 set_bit(In_sync, &rdev->flags);
946 rdev->raid_disk = desc->raid_disk; 946 rdev->raid_disk = desc->raid_disk;
947 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
948 /* active but not in sync implies recovery up to
949 * reshape position. We don't know exactly where
950 * that is, so set to zero for now */
951 if (mddev->minor_version >= 91) {
952 rdev->recovery_offset = 0;
953 rdev->raid_disk = desc->raid_disk;
954 }
947 } 955 }
948 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 956 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
949 set_bit(WriteMostly, &rdev->flags); 957 set_bit(WriteMostly, &rdev->flags);
@@ -1032,8 +1040,19 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1032 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1040 list_for_each_entry(rdev2, &mddev->disks, same_set) {
1033 mdp_disk_t *d; 1041 mdp_disk_t *d;
1034 int desc_nr; 1042 int desc_nr;
1035 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 1043 int is_active = test_bit(In_sync, &rdev2->flags);
1036 && !test_bit(Faulty, &rdev2->flags)) 1044
1045 if (rdev2->raid_disk >= 0 &&
1046 sb->minor_version >= 91)
1047 /* we have nowhere to store the recovery_offset,
1048 * but if it is not below the reshape_position,
1049 * we can piggy-back on that.
1050 */
1051 is_active = 1;
1052 if (rdev2->raid_disk < 0 ||
1053 test_bit(Faulty, &rdev2->flags))
1054 is_active = 0;
1055 if (is_active)
1037 desc_nr = rdev2->raid_disk; 1056 desc_nr = rdev2->raid_disk;
1038 else 1057 else
1039 desc_nr = next_spare++; 1058 desc_nr = next_spare++;
@@ -1043,16 +1062,16 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1043 d->number = rdev2->desc_nr; 1062 d->number = rdev2->desc_nr;
1044 d->major = MAJOR(rdev2->bdev->bd_dev); 1063 d->major = MAJOR(rdev2->bdev->bd_dev);
1045 d->minor = MINOR(rdev2->bdev->bd_dev); 1064 d->minor = MINOR(rdev2->bdev->bd_dev);
1046 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 1065 if (is_active)
1047 && !test_bit(Faulty, &rdev2->flags))
1048 d->raid_disk = rdev2->raid_disk; 1066 d->raid_disk = rdev2->raid_disk;
1049 else 1067 else
1050 d->raid_disk = rdev2->desc_nr; /* compatibility */ 1068 d->raid_disk = rdev2->desc_nr; /* compatibility */
1051 if (test_bit(Faulty, &rdev2->flags)) 1069 if (test_bit(Faulty, &rdev2->flags))
1052 d->state = (1<<MD_DISK_FAULTY); 1070 d->state = (1<<MD_DISK_FAULTY);
1053 else if (test_bit(In_sync, &rdev2->flags)) { 1071 else if (is_active) {
1054 d->state = (1<<MD_DISK_ACTIVE); 1072 d->state = (1<<MD_DISK_ACTIVE);
1055 d->state |= (1<<MD_DISK_SYNC); 1073 if (test_bit(In_sync, &rdev2->flags))
1074 d->state |= (1<<MD_DISK_SYNC);
1056 active++; 1075 active++;
1057 working++; 1076 working++;
1058 } else { 1077 } else {
@@ -1382,8 +1401,6 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1382 1401
1383 if (rdev->raid_disk >= 0 && 1402 if (rdev->raid_disk >= 0 &&
1384 !test_bit(In_sync, &rdev->flags)) { 1403 !test_bit(In_sync, &rdev->flags)) {
1385 if (mddev->curr_resync_completed > rdev->recovery_offset)
1386 rdev->recovery_offset = mddev->curr_resync_completed;
1387 if (rdev->recovery_offset > 0) { 1404 if (rdev->recovery_offset > 0) {
1388 sb->feature_map |= 1405 sb->feature_map |=
1389 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 1406 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
@@ -1917,6 +1934,14 @@ static void sync_sbs(mddev_t * mddev, int nospares)
1917 */ 1934 */
1918 mdk_rdev_t *rdev; 1935 mdk_rdev_t *rdev;
1919 1936
1937 /* First make sure individual recovery_offsets are correct */
1938 list_for_each_entry(rdev, &mddev->disks, same_set) {
1939 if (rdev->raid_disk >= 0 &&
1940 !test_bit(In_sync, &rdev->flags) &&
1941 mddev->curr_resync_completed > rdev->recovery_offset)
1942 rdev->recovery_offset = mddev->curr_resync_completed;
1943
1944 }
1920 list_for_each_entry(rdev, &mddev->disks, same_set) { 1945 list_for_each_entry(rdev, &mddev->disks, same_set) {
1921 if (rdev->sb_events == mddev->events || 1946 if (rdev->sb_events == mddev->events ||
1922 (nospares && 1947 (nospares &&
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a053423785c9..e07ce2e033a9 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1650,11 +1650,12 @@ static void raid1d(mddev_t *mddev)
1650 r1_bio->sector, 1650 r1_bio->sector,
1651 r1_bio->sectors); 1651 r1_bio->sectors);
1652 unfreeze_array(conf); 1652 unfreeze_array(conf);
1653 } 1653 } else
1654 md_error(mddev,
1655 conf->mirrors[r1_bio->read_disk].rdev);
1654 1656
1655 bio = r1_bio->bios[r1_bio->read_disk]; 1657 bio = r1_bio->bios[r1_bio->read_disk];
1656 if ((disk=read_balance(conf, r1_bio)) == -1 || 1658 if ((disk=read_balance(conf, r1_bio)) == -1) {
1657 disk == r1_bio->read_disk) {
1658 printk(KERN_ALERT "raid1: %s: unrecoverable I/O" 1659 printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
1659 " read error for block %llu\n", 1660 " read error for block %llu\n",
1660 bdevname(bio->bi_bdev,b), 1661 bdevname(bio->bi_bdev,b),
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index dcce204b6c73..d29215d966da 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4823,11 +4823,40 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4823 return ERR_PTR(-ENOMEM); 4823 return ERR_PTR(-ENOMEM);
4824} 4824}
4825 4825
4826
4827static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
4828{
4829 switch (algo) {
4830 case ALGORITHM_PARITY_0:
4831 if (raid_disk < max_degraded)
4832 return 1;
4833 break;
4834 case ALGORITHM_PARITY_N:
4835 if (raid_disk >= raid_disks - max_degraded)
4836 return 1;
4837 break;
4838 case ALGORITHM_PARITY_0_6:
4839 if (raid_disk == 0 ||
4840 raid_disk == raid_disks - 1)
4841 return 1;
4842 break;
4843 case ALGORITHM_LEFT_ASYMMETRIC_6:
4844 case ALGORITHM_RIGHT_ASYMMETRIC_6:
4845 case ALGORITHM_LEFT_SYMMETRIC_6:
4846 case ALGORITHM_RIGHT_SYMMETRIC_6:
4847 if (raid_disk == raid_disks - 1)
4848 return 1;
4849 }
4850 return 0;
4851}
4852
4826static int run(mddev_t *mddev) 4853static int run(mddev_t *mddev)
4827{ 4854{
4828 raid5_conf_t *conf; 4855 raid5_conf_t *conf;
4829 int working_disks = 0, chunk_size; 4856 int working_disks = 0, chunk_size;
4857 int dirty_parity_disks = 0;
4830 mdk_rdev_t *rdev; 4858 mdk_rdev_t *rdev;
4859 sector_t reshape_offset = 0;
4831 4860
4832 if (mddev->recovery_cp != MaxSector) 4861 if (mddev->recovery_cp != MaxSector)
4833 printk(KERN_NOTICE "raid5: %s is not clean" 4862 printk(KERN_NOTICE "raid5: %s is not clean"
@@ -4861,6 +4890,7 @@ static int run(mddev_t *mddev)
4861 "on a stripe boundary\n"); 4890 "on a stripe boundary\n");
4862 return -EINVAL; 4891 return -EINVAL;
4863 } 4892 }
4893 reshape_offset = here_new * mddev->new_chunk_sectors;
4864 /* here_new is the stripe we will write to */ 4894 /* here_new is the stripe we will write to */
4865 here_old = mddev->reshape_position; 4895 here_old = mddev->reshape_position;
4866 sector_div(here_old, mddev->chunk_sectors * 4896 sector_div(here_old, mddev->chunk_sectors *
@@ -4916,10 +4946,51 @@ static int run(mddev_t *mddev)
4916 /* 4946 /*
4917 * 0 for a fully functional array, 1 or 2 for a degraded array. 4947 * 0 for a fully functional array, 1 or 2 for a degraded array.
4918 */ 4948 */
4919 list_for_each_entry(rdev, &mddev->disks, same_set) 4949 list_for_each_entry(rdev, &mddev->disks, same_set) {
4920 if (rdev->raid_disk >= 0 && 4950 if (rdev->raid_disk < 0)
4921 test_bit(In_sync, &rdev->flags)) 4951 continue;
4952 if (test_bit(In_sync, &rdev->flags))
4922 working_disks++; 4953 working_disks++;
4954 /* This disc is not fully in-sync. However if it
4955 * just stored parity (beyond the recovery_offset),
4956 * when we don't need to be concerned about the
4957 * array being dirty.
4958 * When reshape goes 'backwards', we never have
4959 * partially completed devices, so we only need
4960 * to worry about reshape going forwards.
4961 */
4962 /* Hack because v0.91 doesn't store recovery_offset properly. */
4963 if (mddev->major_version == 0 &&
4964 mddev->minor_version > 90)
4965 rdev->recovery_offset = reshape_offset;
4966
4967 printk("%d: w=%d pa=%d pr=%d m=%d a=%d r=%d op1=%d op2=%d\n",
4968 rdev->raid_disk, working_disks, conf->prev_algo,
4969 conf->previous_raid_disks, conf->max_degraded,
4970 conf->algorithm, conf->raid_disks,
4971 only_parity(rdev->raid_disk,
4972 conf->prev_algo,
4973 conf->previous_raid_disks,
4974 conf->max_degraded),
4975 only_parity(rdev->raid_disk,
4976 conf->algorithm,
4977 conf->raid_disks,
4978 conf->max_degraded));
4979 if (rdev->recovery_offset < reshape_offset) {
4980 /* We need to check old and new layout */
4981 if (!only_parity(rdev->raid_disk,
4982 conf->algorithm,
4983 conf->raid_disks,
4984 conf->max_degraded))
4985 continue;
4986 }
4987 if (!only_parity(rdev->raid_disk,
4988 conf->prev_algo,
4989 conf->previous_raid_disks,
4990 conf->max_degraded))
4991 continue;
4992 dirty_parity_disks++;
4993 }
4923 4994
4924 mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks) 4995 mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks)
4925 - working_disks); 4996 - working_disks);
@@ -4935,7 +5006,7 @@ static int run(mddev_t *mddev)
4935 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); 5006 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
4936 mddev->resync_max_sectors = mddev->dev_sectors; 5007 mddev->resync_max_sectors = mddev->dev_sectors;
4937 5008
4938 if (mddev->degraded > 0 && 5009 if (mddev->degraded > dirty_parity_disks &&
4939 mddev->recovery_cp != MaxSector) { 5010 mddev->recovery_cp != MaxSector) {
4940 if (mddev->ok_start_degraded) 5011 if (mddev->ok_start_degraded)
4941 printk(KERN_WARNING 5012 printk(KERN_WARNING
@@ -5361,9 +5432,11 @@ static int raid5_start_reshape(mddev_t *mddev)
5361 !test_bit(Faulty, &rdev->flags)) { 5432 !test_bit(Faulty, &rdev->flags)) {
5362 if (raid5_add_disk(mddev, rdev) == 0) { 5433 if (raid5_add_disk(mddev, rdev) == 0) {
5363 char nm[20]; 5434 char nm[20];
5364 set_bit(In_sync, &rdev->flags); 5435 if (rdev->raid_disk >= conf->previous_raid_disks)
5436 set_bit(In_sync, &rdev->flags);
5437 else
5438 rdev->recovery_offset = 0;
5365 added_devices++; 5439 added_devices++;
5366 rdev->recovery_offset = 0;
5367 sprintf(nm, "rd%d", rdev->raid_disk); 5440 sprintf(nm, "rd%d", rdev->raid_disk);
5368 if (sysfs_create_link(&mddev->kobj, 5441 if (sysfs_create_link(&mddev->kobj,
5369 &rdev->kobj, nm)) 5442 &rdev->kobj, nm))
diff --git a/drivers/media/common/ir-functions.c b/drivers/media/common/ir-functions.c
index 655474b29e21..abd4791acb0e 100644
--- a/drivers/media/common/ir-functions.c
+++ b/drivers/media/common/ir-functions.c
@@ -64,7 +64,7 @@ void ir_input_init(struct input_dev *dev, struct ir_input_state *ir,
64 64
65 ir->ir_type = ir_type; 65 ir->ir_type = ir_type;
66 66
67 memset(ir->ir_codes, sizeof(ir->ir_codes), 0); 67 memset(ir->ir_codes, 0, sizeof(ir->ir_codes));
68 68
69 /* 69 /*
70 * FIXME: This is a temporary workaround to use the new IR tables 70 * FIXME: This is a temporary workaround to use the new IR tables
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index ddf639ed2fd8..98082416aa52 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -31,6 +31,7 @@
31#include <linux/wait.h> 31#include <linux/wait.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/poll.h> 33#include <linux/poll.h>
34#include <linux/semaphore.h>
34#include <linux/module.h> 35#include <linux/module.h>
35#include <linux/list.h> 36#include <linux/list.h>
36#include <linux/freezer.h> 37#include <linux/freezer.h>
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index f65591fb7cec..2a53dd096eef 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -663,6 +663,14 @@ static struct zl10353_config cxusb_zl10353_xc3028_config = {
663 .parallel_ts = 1, 663 .parallel_ts = 1,
664}; 664};
665 665
666static struct zl10353_config cxusb_zl10353_xc3028_config_no_i2c_gate = {
667 .demod_address = 0x0f,
668 .if2 = 45600,
669 .no_tuner = 1,
670 .parallel_ts = 1,
671 .disable_i2c_gate_ctrl = 1,
672};
673
666static struct mt352_config cxusb_mt352_xc3028_config = { 674static struct mt352_config cxusb_mt352_xc3028_config = {
667 .demod_address = 0x0f, 675 .demod_address = 0x0f,
668 .if2 = 4560, 676 .if2 = 4560,
@@ -894,7 +902,7 @@ static int cxusb_dualdig4_frontend_attach(struct dvb_usb_adapter *adap)
894 cxusb_bluebird_gpio_pulse(adap->dev, 0x02, 1); 902 cxusb_bluebird_gpio_pulse(adap->dev, 0x02, 1);
895 903
896 if ((adap->fe = dvb_attach(zl10353_attach, 904 if ((adap->fe = dvb_attach(zl10353_attach,
897 &cxusb_zl10353_xc3028_config, 905 &cxusb_zl10353_xc3028_config_no_i2c_gate,
898 &adap->dev->i2c_adap)) == NULL) 906 &adap->dev->i2c_adap)) == NULL)
899 return -EIO; 907 return -EIO;
900 908
diff --git a/drivers/media/dvb/siano/Kconfig b/drivers/media/dvb/siano/Kconfig
index 8c1aed77ea30..85a222c4eaa0 100644
--- a/drivers/media/dvb/siano/Kconfig
+++ b/drivers/media/dvb/siano/Kconfig
@@ -4,7 +4,7 @@
4 4
5config SMS_SIANO_MDTV 5config SMS_SIANO_MDTV
6 tristate "Siano SMS1xxx based MDTV receiver" 6 tristate "Siano SMS1xxx based MDTV receiver"
7 depends on DVB_CORE && INPUT 7 depends on DVB_CORE && INPUT && HAS_DMA
8 ---help--- 8 ---help---
9 Choose Y or M here if you have MDTV receiver with a Siano chipset. 9 Choose Y or M here if you have MDTV receiver with a Siano chipset.
10 10
diff --git a/drivers/media/radio/radio-gemtek-pci.c b/drivers/media/radio/radio-gemtek-pci.c
index c3f579de6e71..c6cf11661868 100644
--- a/drivers/media/radio/radio-gemtek-pci.c
+++ b/drivers/media/radio/radio-gemtek-pci.c
@@ -181,12 +181,10 @@ static void gemtek_pci_mute(struct gemtek_pci *card)
181 181
182static void gemtek_pci_unmute(struct gemtek_pci *card) 182static void gemtek_pci_unmute(struct gemtek_pci *card)
183{ 183{
184 mutex_lock(&card->lock);
185 if (card->mute) { 184 if (card->mute) {
186 gemtek_pci_setfrequency(card, card->current_frequency); 185 gemtek_pci_setfrequency(card, card->current_frequency);
187 card->mute = false; 186 card->mute = false;
188 } 187 }
189 mutex_unlock(&card->lock);
190} 188}
191 189
192static int gemtek_pci_getsignal(struct gemtek_pci *card) 190static int gemtek_pci_getsignal(struct gemtek_pci *card)
diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c
index c015da813dda..d14cfb200ed0 100644
--- a/drivers/media/video/davinci/vpif_display.c
+++ b/drivers/media/video/davinci/vpif_display.c
@@ -1426,7 +1426,6 @@ static __init int vpif_probe(struct platform_device *pdev)
1426 struct vpif_display_config *config; 1426 struct vpif_display_config *config;
1427 int i, j = 0, k, q, m, err = 0; 1427 int i, j = 0, k, q, m, err = 0;
1428 struct i2c_adapter *i2c_adap; 1428 struct i2c_adapter *i2c_adap;
1429 struct vpif_config *config;
1430 struct common_obj *common; 1429 struct common_obj *common;
1431 struct channel_obj *ch; 1430 struct channel_obj *ch;
1432 struct video_device *vfd; 1431 struct video_device *vfd;
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index bdb249bd9d5d..c0fd5c6feeac 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -1584,8 +1584,8 @@ struct em28xx_board em28xx_boards[] = {
1584 [EM2870_BOARD_REDDO_DVB_C_USB_BOX] = { 1584 [EM2870_BOARD_REDDO_DVB_C_USB_BOX] = {
1585 .name = "Reddo DVB-C USB TV Box", 1585 .name = "Reddo DVB-C USB TV Box",
1586 .tuner_type = TUNER_ABSENT, 1586 .tuner_type = TUNER_ABSENT,
1587 .tuner_gpio = reddo_dvb_c_usb_box,
1587 .has_dvb = 1, 1588 .has_dvb = 1,
1588 .dvb_gpio = reddo_dvb_c_usb_box,
1589 }, 1589 },
1590}; 1590};
1591const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards); 1591const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 5f37952c75cf..72802291e812 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -28,6 +28,7 @@
28#include <linux/moduleparam.h> 28#include <linux/moduleparam.h>
29#include <linux/mutex.h> 29#include <linux/mutex.h>
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/sched.h>
31#include <linux/time.h> 32#include <linux/time.h>
32#include <linux/version.h> 33#include <linux/version.h>
33#include <linux/videodev2.h> 34#include <linux/videodev2.h>
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index dff2e5e2d8c6..7db82bdf6f31 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -17,6 +17,7 @@
17#include <linux/clk.h> 17#include <linux/clk.h>
18#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/sched.h>
20 21
21#include <media/v4l2-common.h> 22#include <media/v4l2-common.h>
22#include <media/v4l2-dev.h> 23#include <media/v4l2-dev.h>
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index 2f78b4f263f5..9c8b7c7b89ee 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -31,6 +31,7 @@
31#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/videodev2.h> 32#include <linux/videodev2.h>
33#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34#include <linux/sched.h>
34 35
35#include <media/v4l2-common.h> 36#include <media/v4l2-common.h>
36#include <media/v4l2-dev.h> 37#include <media/v4l2-dev.h>
@@ -1723,11 +1724,12 @@ static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev)
1723 1724
1724 err = soc_camera_host_register(&pcdev->ici); 1725 err = soc_camera_host_register(&pcdev->ici);
1725 if (err) 1726 if (err)
1726 goto exit_free_irq; 1727 goto exit_free_clk;
1727 1728
1728 return 0; 1729 return 0;
1729 1730
1730exit_free_irq: 1731exit_free_clk:
1732 pm_runtime_disable(&pdev->dev);
1731 free_irq(pcdev->irq, pcdev); 1733 free_irq(pcdev->irq, pcdev);
1732exit_release_mem: 1734exit_release_mem:
1733 if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) 1735 if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
@@ -1747,6 +1749,7 @@ static int __devexit sh_mobile_ceu_remove(struct platform_device *pdev)
1747 struct sh_mobile_ceu_dev, ici); 1749 struct sh_mobile_ceu_dev, ici);
1748 1750
1749 soc_camera_host_unregister(soc_host); 1751 soc_camera_host_unregister(soc_host);
1752 pm_runtime_disable(&pdev->dev);
1750 free_irq(pcdev->irq, pcdev); 1753 free_irq(pcdev->irq, pcdev);
1751 if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) 1754 if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
1752 dma_release_declared_memory(&pdev->dev); 1755 dma_release_declared_memory(&pdev->dev);
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 36e617bd13c7..95fdeb23c2c1 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -1097,6 +1097,13 @@ static int default_s_crop(struct soc_camera_device *icd, struct v4l2_crop *a)
1097 return v4l2_subdev_call(sd, video, s_crop, a); 1097 return v4l2_subdev_call(sd, video, s_crop, a);
1098} 1098}
1099 1099
1100static void soc_camera_device_init(struct device *dev, void *pdata)
1101{
1102 dev->platform_data = pdata;
1103 dev->bus = &soc_camera_bus_type;
1104 dev->release = dummy_release;
1105}
1106
1100int soc_camera_host_register(struct soc_camera_host *ici) 1107int soc_camera_host_register(struct soc_camera_host *ici)
1101{ 1108{
1102 struct soc_camera_host *ix; 1109 struct soc_camera_host *ix;
@@ -1158,6 +1165,7 @@ void soc_camera_host_unregister(struct soc_camera_host *ici)
1158 1165
1159 list_for_each_entry(icd, &devices, list) { 1166 list_for_each_entry(icd, &devices, list) {
1160 if (icd->iface == ici->nr) { 1167 if (icd->iface == ici->nr) {
1168 void *pdata = icd->dev.platform_data;
1161 /* The bus->remove will be called */ 1169 /* The bus->remove will be called */
1162 device_unregister(&icd->dev); 1170 device_unregister(&icd->dev);
1163 /* 1171 /*
@@ -1169,6 +1177,7 @@ void soc_camera_host_unregister(struct soc_camera_host *ici)
1169 * device private data. 1177 * device private data.
1170 */ 1178 */
1171 memset(&icd->dev, 0, sizeof(icd->dev)); 1179 memset(&icd->dev, 0, sizeof(icd->dev));
1180 soc_camera_device_init(&icd->dev, pdata);
1172 } 1181 }
1173 } 1182 }
1174 1183
@@ -1200,10 +1209,7 @@ static int soc_camera_device_register(struct soc_camera_device *icd)
1200 * man, stay reasonable... */ 1209 * man, stay reasonable... */
1201 return -ENOMEM; 1210 return -ENOMEM;
1202 1211
1203 icd->devnum = num; 1212 icd->devnum = num;
1204 icd->dev.bus = &soc_camera_bus_type;
1205
1206 icd->dev.release = dummy_release;
1207 icd->use_count = 0; 1213 icd->use_count = 0;
1208 icd->host_priv = NULL; 1214 icd->host_priv = NULL;
1209 mutex_init(&icd->video_lock); 1215 mutex_init(&icd->video_lock);
@@ -1311,12 +1317,13 @@ static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev)
1311 icd->iface = icl->bus_id; 1317 icd->iface = icl->bus_id;
1312 icd->pdev = &pdev->dev; 1318 icd->pdev = &pdev->dev;
1313 platform_set_drvdata(pdev, icd); 1319 platform_set_drvdata(pdev, icd);
1314 icd->dev.platform_data = icl;
1315 1320
1316 ret = soc_camera_device_register(icd); 1321 ret = soc_camera_device_register(icd);
1317 if (ret < 0) 1322 if (ret < 0)
1318 goto escdevreg; 1323 goto escdevreg;
1319 1324
1325 soc_camera_device_init(&icd->dev, icl);
1326
1320 icd->user_width = DEFAULT_WIDTH; 1327 icd->user_width = DEFAULT_WIDTH;
1321 icd->user_height = DEFAULT_HEIGHT; 1328 icd->user_height = DEFAULT_HEIGHT;
1322 1329
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index 635ffc7b0391..c3065c4bcba9 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -19,6 +19,7 @@
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/pagemap.h> 20#include <linux/pagemap.h>
21#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
22#include <linux/sched.h>
22#include <media/videobuf-dma-contig.h> 23#include <media/videobuf-dma-contig.h>
23 24
24struct videobuf_dma_contig_memory { 25struct videobuf_dma_contig_memory {
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 49b7885c2702..7f27576ca046 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -29,7 +29,7 @@
29/* Current settings - values are 2*2^(reg_val/4) microamps. These are 29/* Current settings - values are 2*2^(reg_val/4) microamps. These are
30 * exported since they are used by multiple drivers. 30 * exported since they are used by multiple drivers.
31 */ 31 */
32int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL] = { 32int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1] = {
33 2, 33 2,
34 2, 34 2,
35 3, 35 3,
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index db39f4a52f53..2cb2736d65aa 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -158,6 +158,7 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
158 struct i2c_msg msg[2]; 158 struct i2c_msg msg[2];
159 u8 msgbuf[2]; 159 u8 msgbuf[2];
160 struct i2c_client *client; 160 struct i2c_client *client;
161 unsigned long timeout, read_time;
161 int status, i; 162 int status, i;
162 163
163 memset(msg, 0, sizeof(msg)); 164 memset(msg, 0, sizeof(msg));
@@ -183,47 +184,60 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
183 if (count > io_limit) 184 if (count > io_limit)
184 count = io_limit; 185 count = io_limit;
185 186
186 /* Smaller eeproms can work given some SMBus extension calls */
187 if (at24->use_smbus) { 187 if (at24->use_smbus) {
188 /* Smaller eeproms can work given some SMBus extension calls */
188 if (count > I2C_SMBUS_BLOCK_MAX) 189 if (count > I2C_SMBUS_BLOCK_MAX)
189 count = I2C_SMBUS_BLOCK_MAX; 190 count = I2C_SMBUS_BLOCK_MAX;
190 status = i2c_smbus_read_i2c_block_data(client, offset, 191 } else {
191 count, buf); 192 /*
192 dev_dbg(&client->dev, "smbus read %zu@%d --> %d\n", 193 * When we have a better choice than SMBus calls, use a
193 count, offset, status); 194 * combined I2C message. Write address; then read up to
194 return (status < 0) ? -EIO : status; 195 * io_limit data bytes. Note that read page rollover helps us
196 * here (unlike writes). msgbuf is u8 and will cast to our
197 * needs.
198 */
199 i = 0;
200 if (at24->chip.flags & AT24_FLAG_ADDR16)
201 msgbuf[i++] = offset >> 8;
202 msgbuf[i++] = offset;
203
204 msg[0].addr = client->addr;
205 msg[0].buf = msgbuf;
206 msg[0].len = i;
207
208 msg[1].addr = client->addr;
209 msg[1].flags = I2C_M_RD;
210 msg[1].buf = buf;
211 msg[1].len = count;
195 } 212 }
196 213
197 /* 214 /*
198 * When we have a better choice than SMBus calls, use a combined 215 * Reads fail if the previous write didn't complete yet. We may
199 * I2C message. Write address; then read up to io_limit data bytes. 216 * loop a few times until this one succeeds, waiting at least
200 * Note that read page rollover helps us here (unlike writes). 217 * long enough for one entire page write to work.
201 * msgbuf is u8 and will cast to our needs.
202 */ 218 */
203 i = 0; 219 timeout = jiffies + msecs_to_jiffies(write_timeout);
204 if (at24->chip.flags & AT24_FLAG_ADDR16) 220 do {
205 msgbuf[i++] = offset >> 8; 221 read_time = jiffies;
206 msgbuf[i++] = offset; 222 if (at24->use_smbus) {
207 223 status = i2c_smbus_read_i2c_block_data(client, offset,
208 msg[0].addr = client->addr; 224 count, buf);
209 msg[0].buf = msgbuf; 225 } else {
210 msg[0].len = i; 226 status = i2c_transfer(client->adapter, msg, 2);
227 if (status == 2)
228 status = count;
229 }
230 dev_dbg(&client->dev, "read %zu@%d --> %d (%ld)\n",
231 count, offset, status, jiffies);
211 232
212 msg[1].addr = client->addr; 233 if (status == count)
213 msg[1].flags = I2C_M_RD; 234 return count;
214 msg[1].buf = buf;
215 msg[1].len = count;
216 235
217 status = i2c_transfer(client->adapter, msg, 2); 236 /* REVISIT: at HZ=100, this is sloooow */
218 dev_dbg(&client->dev, "i2c read %zu@%d --> %d\n", 237 msleep(1);
219 count, offset, status); 238 } while (time_before(read_time, timeout));
220 239
221 if (status == 2) 240 return -ETIMEDOUT;
222 return count;
223 else if (status >= 0)
224 return -EIO;
225 else
226 return status;
227} 241}
228 242
229static ssize_t at24_read(struct at24_data *at24, 243static ssize_t at24_read(struct at24_data *at24,
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index b00d67319058..9fb480bb0e0a 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -760,6 +760,8 @@ static int pxamci_remove(struct platform_device *pdev)
760 if (mmc) { 760 if (mmc) {
761 struct pxamci_host *host = mmc_priv(mmc); 761 struct pxamci_host *host = mmc_priv(mmc);
762 762
763 mmc_remove_host(mmc);
764
763 if (host->pdata) { 765 if (host->pdata) {
764 gpio_cd = host->pdata->gpio_card_detect; 766 gpio_cd = host->pdata->gpio_card_detect;
765 gpio_ro = host->pdata->gpio_card_ro; 767 gpio_ro = host->pdata->gpio_card_ro;
@@ -779,8 +781,6 @@ static int pxamci_remove(struct platform_device *pdev)
779 if (host->pdata && host->pdata->exit) 781 if (host->pdata && host->pdata->exit)
780 host->pdata->exit(&pdev->dev, mmc); 782 host->pdata->exit(&pdev->dev, mmc);
781 783
782 mmc_remove_host(mmc);
783
784 pxamci_stop_clock(host); 784 pxamci_stop_clock(host);
785 writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD| 785 writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD|
786 END_CMD_RES|PRG_DONE|DATA_TRAN_DONE, 786 END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index d600c2deff73..689d6a79ffc0 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -118,11 +118,9 @@ static caddr_t remap_window(struct map_info *map, unsigned long to)
118 DEBUG(2, "Remapping window from 0x%8.8x to 0x%8.8x", 118 DEBUG(2, "Remapping window from 0x%8.8x to 0x%8.8x",
119 dev->offset, mrq.CardOffset); 119 dev->offset, mrq.CardOffset);
120 mrq.Page = 0; 120 mrq.Page = 0;
121 ret = pcmcia_map_mem_page(win, &mrq); 121 ret = pcmcia_map_mem_page(dev->p_dev, win, &mrq);
122 if (ret != 0) { 122 if (ret != 0)
123 cs_error(dev->p_dev, MapMemPage, ret);
124 return NULL; 123 return NULL;
125 }
126 dev->offset = mrq.CardOffset; 124 dev->offset = mrq.CardOffset;
127 } 125 }
128 return dev->win_base + (to & (dev->win_size-1)); 126 return dev->win_base + (to & (dev->win_size-1));
@@ -327,8 +325,6 @@ static void pcmciamtd_set_vpp(struct map_info *map, int on)
327 325
328 DEBUG(2, "dev = %p on = %d vpp = %d\n", dev, on, dev->vpp); 326 DEBUG(2, "dev = %p on = %d vpp = %d\n", dev, on, dev->vpp);
329 ret = pcmcia_modify_configuration(link, &mod); 327 ret = pcmcia_modify_configuration(link, &mod);
330 if (ret != 0)
331 cs_error(link, ModifyConfiguration, ret);
332} 328}
333 329
334 330
@@ -348,107 +344,116 @@ static void pcmciamtd_release(struct pcmcia_device *link)
348 iounmap(dev->win_base); 344 iounmap(dev->win_base);
349 dev->win_base = NULL; 345 dev->win_base = NULL;
350 } 346 }
351 pcmcia_release_window(link->win); 347 pcmcia_release_window(link, link->win);
352 } 348 }
353 pcmcia_disable_device(link); 349 pcmcia_disable_device(link);
354} 350}
355 351
356 352
357static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *link, int *new_name) 353#ifdef CONFIG_MTD_DEBUG
354static int pcmciamtd_cistpl_format(struct pcmcia_device *p_dev,
355 tuple_t *tuple,
356 void *priv_data)
358{ 357{
359 int rc;
360 tuple_t tuple;
361 cisparse_t parse; 358 cisparse_t parse;
362 u_char buf[64];
363
364 tuple.Attributes = 0;
365 tuple.TupleData = (cisdata_t *)buf;
366 tuple.TupleDataMax = sizeof(buf);
367 tuple.TupleOffset = 0;
368 tuple.DesiredTuple = RETURN_FIRST_TUPLE;
369
370 rc = pcmcia_get_first_tuple(link, &tuple);
371 while (rc == 0) {
372 rc = pcmcia_get_tuple_data(link, &tuple);
373 if (rc != 0) {
374 cs_error(link, GetTupleData, rc);
375 break;
376 }
377 rc = pcmcia_parse_tuple(&tuple, &parse);
378 if (rc != 0) {
379 cs_error(link, ParseTuple, rc);
380 break;
381 }
382 359
383 switch(tuple.TupleCode) { 360 if (!pcmcia_parse_tuple(tuple, &parse)) {
384 case CISTPL_FORMAT: { 361 cistpl_format_t *t = &parse.format;
385 cistpl_format_t *t = &parse.format; 362 (void)t; /* Shut up, gcc */
386 (void)t; /* Shut up, gcc */ 363 DEBUG(2, "Format type: %u, Error Detection: %u, offset = %u, length =%u",
387 DEBUG(2, "Format type: %u, Error Detection: %u, offset = %u, length =%u", 364 t->type, t->edc, t->offset, t->length);
388 t->type, t->edc, t->offset, t->length); 365 }
389 break; 366 return -ENOSPC;
367}
390 368
391 } 369static int pcmciamtd_cistpl_jedec(struct pcmcia_device *p_dev,
370 tuple_t *tuple,
371 void *priv_data)
372{
373 cisparse_t parse;
374 int i;
392 375
393 case CISTPL_DEVICE: { 376 if (!pcmcia_parse_tuple(tuple, &parse)) {
394 cistpl_device_t *t = &parse.device; 377 cistpl_jedec_t *t = &parse.jedec;
395 int i; 378 for (i = 0; i < t->nid; i++)
396 DEBUG(2, "Common memory:"); 379 DEBUG(2, "JEDEC: 0x%02x 0x%02x", t->id[i].mfr, t->id[i].info);
397 dev->pcmcia_map.size = t->dev[0].size; 380 }
398 for(i = 0; i < t->ndev; i++) { 381 return -ENOSPC;
399 DEBUG(2, "Region %d, type = %u", i, t->dev[i].type); 382}
400 DEBUG(2, "Region %d, wp = %u", i, t->dev[i].wp); 383#endif
401 DEBUG(2, "Region %d, speed = %u ns", i, t->dev[i].speed);
402 DEBUG(2, "Region %d, size = %u bytes", i, t->dev[i].size);
403 }
404 break;
405 }
406 384
407 case CISTPL_VERS_1: { 385static int pcmciamtd_cistpl_device(struct pcmcia_device *p_dev,
408 cistpl_vers_1_t *t = &parse.version_1; 386 tuple_t *tuple,
409 int i; 387 void *priv_data)
410 if(t->ns) { 388{
411 dev->mtd_name[0] = '\0'; 389 struct pcmciamtd_dev *dev = priv_data;
412 for(i = 0; i < t->ns; i++) { 390 cisparse_t parse;
413 if(i) 391 cistpl_device_t *t = &parse.device;
414 strcat(dev->mtd_name, " "); 392 int i;
415 strcat(dev->mtd_name, t->str+t->ofs[i]);
416 }
417 }
418 DEBUG(2, "Found name: %s", dev->mtd_name);
419 break;
420 }
421 393
422 case CISTPL_JEDEC_C: { 394 if (pcmcia_parse_tuple(tuple, &parse))
423 cistpl_jedec_t *t = &parse.jedec; 395 return -EINVAL;
424 int i; 396
425 for(i = 0; i < t->nid; i++) { 397 DEBUG(2, "Common memory:");
426 DEBUG(2, "JEDEC: 0x%02x 0x%02x", t->id[i].mfr, t->id[i].info); 398 dev->pcmcia_map.size = t->dev[0].size;
427 } 399 /* from here on: DEBUG only */
428 break; 400 for (i = 0; i < t->ndev; i++) {
429 } 401 DEBUG(2, "Region %d, type = %u", i, t->dev[i].type);
402 DEBUG(2, "Region %d, wp = %u", i, t->dev[i].wp);
403 DEBUG(2, "Region %d, speed = %u ns", i, t->dev[i].speed);
404 DEBUG(2, "Region %d, size = %u bytes", i, t->dev[i].size);
405 }
406 return 0;
407}
430 408
431 case CISTPL_DEVICE_GEO: { 409static int pcmciamtd_cistpl_geo(struct pcmcia_device *p_dev,
432 cistpl_device_geo_t *t = &parse.device_geo; 410 tuple_t *tuple,
433 int i; 411 void *priv_data)
434 dev->pcmcia_map.bankwidth = t->geo[0].buswidth; 412{
435 for(i = 0; i < t->ngeo; i++) { 413 struct pcmciamtd_dev *dev = priv_data;
436 DEBUG(2, "region: %d bankwidth = %u", i, t->geo[i].buswidth); 414 cisparse_t parse;
437 DEBUG(2, "region: %d erase_block = %u", i, t->geo[i].erase_block); 415 cistpl_device_geo_t *t = &parse.device_geo;
438 DEBUG(2, "region: %d read_block = %u", i, t->geo[i].read_block); 416 int i;
439 DEBUG(2, "region: %d write_block = %u", i, t->geo[i].write_block);
440 DEBUG(2, "region: %d partition = %u", i, t->geo[i].partition);
441 DEBUG(2, "region: %d interleave = %u", i, t->geo[i].interleave);
442 }
443 break;
444 }
445 417
446 default: 418 if (pcmcia_parse_tuple(tuple, &parse))
447 DEBUG(2, "Unknown tuple code %d", tuple.TupleCode); 419 return -EINVAL;
448 } 420
421 dev->pcmcia_map.bankwidth = t->geo[0].buswidth;
422 /* from here on: DEBUG only */
423 for (i = 0; i < t->ngeo; i++) {
424 DEBUG(2, "region: %d bankwidth = %u", i, t->geo[i].buswidth);
425 DEBUG(2, "region: %d erase_block = %u", i, t->geo[i].erase_block);
426 DEBUG(2, "region: %d read_block = %u", i, t->geo[i].read_block);
427 DEBUG(2, "region: %d write_block = %u", i, t->geo[i].write_block);
428 DEBUG(2, "region: %d partition = %u", i, t->geo[i].partition);
429 DEBUG(2, "region: %d interleave = %u", i, t->geo[i].interleave);
430 }
431 return 0;
432}
433
434
435static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *link, int *new_name)
436{
437 int i;
449 438
450 rc = pcmcia_get_next_tuple(link, &tuple); 439 if (p_dev->prod_id[0]) {
440 dev->mtd_name[0] = '\0';
441 for (i = 0; i < 4; i++) {
442 if (i)
443 strcat(dev->mtd_name, " ");
444 if (p_dev->prod_id[i])
445 strcat(dev->mtd_name, p_dev->prod_id[i]);
446 }
447 DEBUG(2, "Found name: %s", dev->mtd_name);
451 } 448 }
449
450#ifdef CONFIG_MTD_DEBUG
451 pcmcia_loop_tuple(p_dev, CISTPL_FORMAT, pcmciamtd_cistpl_format, NULL);
452 pcmcia_loop_tuple(p_dev, CISTPL_JEDEC_C, pcmciamtd_cistpl_jedec, NULL);
453#endif
454 pcmcia_loop_tuple(p_dev, CISTPL_DEVICE, pcmciamtd_cistpl_device, dev);
455 pcmcia_loop_tuple(p_dev, CISTPL_DEVICE_GEO, pcmciamtd_cistpl_geo, dev);
456
452 if(!dev->pcmcia_map.size) 457 if(!dev->pcmcia_map.size)
453 dev->pcmcia_map.size = MAX_PCMCIA_ADDR; 458 dev->pcmcia_map.size = MAX_PCMCIA_ADDR;
454 459
@@ -481,16 +486,12 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *link,
481 * MTD device available to the system. 486 * MTD device available to the system.
482 */ 487 */
483 488
484#define CS_CHECK(fn, ret) \
485do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
486
487static int pcmciamtd_config(struct pcmcia_device *link) 489static int pcmciamtd_config(struct pcmcia_device *link)
488{ 490{
489 struct pcmciamtd_dev *dev = link->priv; 491 struct pcmciamtd_dev *dev = link->priv;
490 struct mtd_info *mtd = NULL; 492 struct mtd_info *mtd = NULL;
491 cs_status_t status; 493 cs_status_t status;
492 win_req_t req; 494 win_req_t req;
493 int last_ret = 0, last_fn = 0;
494 int ret; 495 int ret;
495 int i; 496 int i;
496 static char *probes[] = { "jedec_probe", "cfi_probe" }; 497 static char *probes[] = { "jedec_probe", "cfi_probe" };
@@ -529,7 +530,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
529 int ret; 530 int ret;
530 DEBUG(2, "requesting window with size = %dKiB memspeed = %d", 531 DEBUG(2, "requesting window with size = %dKiB memspeed = %d",
531 req.Size >> 10, req.AccessSpeed); 532 req.Size >> 10, req.AccessSpeed);
532 ret = pcmcia_request_window(&link, &req, &link->win); 533 ret = pcmcia_request_window(link, &req, &link->win);
533 DEBUG(2, "ret = %d dev->win_size = %d", ret, dev->win_size); 534 DEBUG(2, "ret = %d dev->win_size = %d", ret, dev->win_size);
534 if(ret) { 535 if(ret) {
535 req.Size >>= 1; 536 req.Size >>= 1;
@@ -577,7 +578,6 @@ static int pcmciamtd_config(struct pcmcia_device *link)
577 DEBUG(2, "Setting Configuration"); 578 DEBUG(2, "Setting Configuration");
578 ret = pcmcia_request_configuration(link, &link->conf); 579 ret = pcmcia_request_configuration(link, &link->conf);
579 if (ret != 0) { 580 if (ret != 0) {
580 cs_error(link, RequestConfiguration, ret);
581 if (dev->win_base) { 581 if (dev->win_base) {
582 iounmap(dev->win_base); 582 iounmap(dev->win_base);
583 dev->win_base = NULL; 583 dev->win_base = NULL;
@@ -652,8 +652,7 @@ static int pcmciamtd_config(struct pcmcia_device *link)
652 link->dev_node = &dev->node; 652 link->dev_node = &dev->node;
653 return 0; 653 return 0;
654 654
655 cs_failed: 655 failed:
656 cs_error(link, last_fn, last_ret);
657 err("CS Error, exiting"); 656 err("CS Error, exiting");
658 pcmciamtd_release(link); 657 pcmciamtd_release(link);
659 return -ENODEV; 658 return -ENODEV;
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index fdb97f3d30e9..d7a47574d21e 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -209,8 +209,8 @@ static int sa1100_probe_subdev(struct sa_subdev_info *subdev, struct resource *r
209 } 209 }
210 subdev->mtd->owner = THIS_MODULE; 210 subdev->mtd->owner = THIS_MODULE;
211 211
212 printk(KERN_INFO "SA1100 flash: CFI device at 0x%08lx, %dMiB, " 212 printk(KERN_INFO "SA1100 flash: CFI device at 0x%08lx, %uMiB, %d-bit\n",
213 "%d-bit\n", phys, subdev->mtd->size >> 20, 213 phys, (unsigned)(subdev->mtd->size >> 20),
214 subdev->map.bankwidth * 8); 214 subdev->map.bankwidth * 8);
215 215
216 return 0; 216 return 0;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e19ca4bb7510..b2f71f79baaf 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -975,7 +975,7 @@ config ENC28J60_WRITEVERIFY
975 975
976config ETHOC 976config ETHOC
977 tristate "OpenCores 10/100 Mbps Ethernet MAC support" 977 tristate "OpenCores 10/100 Mbps Ethernet MAC support"
978 depends on NET_ETHERNET && HAS_IOMEM 978 depends on NET_ETHERNET && HAS_IOMEM && HAS_DMA
979 select MII 979 select MII
980 select PHYLIB 980 select PHYLIB
981 select CRC32 981 select CRC32
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 2be49c817995..b25467ac895c 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -628,15 +628,6 @@ static int ep93xx_open(struct net_device *dev)
628 if (ep93xx_alloc_buffers(ep)) 628 if (ep93xx_alloc_buffers(ep))
629 return -ENOMEM; 629 return -ENOMEM;
630 630
631 if (is_zero_ether_addr(dev->dev_addr)) {
632 random_ether_addr(dev->dev_addr);
633 printk(KERN_INFO "%s: generated random MAC address "
634 "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x.\n", dev->name,
635 dev->dev_addr[0], dev->dev_addr[1],
636 dev->dev_addr[2], dev->dev_addr[3],
637 dev->dev_addr[4], dev->dev_addr[5]);
638 }
639
640 napi_enable(&ep->napi); 631 napi_enable(&ep->napi);
641 632
642 if (ep93xx_start_hw(dev)) { 633 if (ep93xx_start_hw(dev)) {
@@ -877,6 +868,9 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
877 ep->mii.mdio_write = ep93xx_mdio_write; 868 ep->mii.mdio_write = ep93xx_mdio_write;
878 ep->mdc_divisor = 40; /* Max HCLK 100 MHz, min MDIO clk 2.5 MHz. */ 869 ep->mdc_divisor = 40; /* Max HCLK 100 MHz, min MDIO clk 2.5 MHz. */
879 870
871 if (is_zero_ether_addr(dev->dev_addr))
872 random_ether_addr(dev->dev_addr);
873
880 err = register_netdev(dev); 874 err = register_netdev(dev);
881 if (err) { 875 if (err) {
882 dev_err(&pdev->dev, "Failed to register netdev\n"); 876 dev_err(&pdev->dev, "Failed to register netdev\n");
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index ce6f1ac25df8..3f4b4300f533 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -1088,7 +1088,14 @@ static struct net_device * au1000_probe(int port_num)
1088 return NULL; 1088 return NULL;
1089 } 1089 }
1090 1090
1091 if ((err = register_netdev(dev)) != 0) { 1091 dev->base_addr = base;
1092 dev->irq = irq;
1093 dev->netdev_ops = &au1000_netdev_ops;
1094 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
1095 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1096
1097 err = register_netdev(dev);
1098 if (err != 0) {
1092 printk(KERN_ERR "%s: Cannot register net device, error %d\n", 1099 printk(KERN_ERR "%s: Cannot register net device, error %d\n",
1093 DRV_NAME, err); 1100 DRV_NAME, err);
1094 free_netdev(dev); 1101 free_netdev(dev);
@@ -1209,12 +1216,6 @@ static struct net_device * au1000_probe(int port_num)
1209 aup->tx_db_inuse[i] = pDB; 1216 aup->tx_db_inuse[i] = pDB;
1210 } 1217 }
1211 1218
1212 dev->base_addr = base;
1213 dev->irq = irq;
1214 dev->netdev_ops = &au1000_netdev_ops;
1215 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
1216 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1217
1218 /* 1219 /*
1219 * The boot code uses the ethernet controller, so reset it to start 1220 * The boot code uses the ethernet controller, so reset it to start
1220 * fresh. au1000_init() expects that the device is in reset state. 1221 * fresh. au1000_init() expects that the device is in reset state.
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index e046943ef29d..2a9132343b66 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -912,9 +912,6 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id)
912 bp->istat = istat; 912 bp->istat = istat;
913 __b44_disable_ints(bp); 913 __b44_disable_ints(bp);
914 __napi_schedule(&bp->napi); 914 __napi_schedule(&bp->napi);
915 } else {
916 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
917 dev->name);
918 } 915 }
919 916
920irq_ack: 917irq_ack:
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index df32c109b7ac..772f6d2489ce 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -35,66 +35,16 @@ config CAN_CALC_BITTIMING
35 arguments "tq", "prop_seg", "phase_seg1", "phase_seg2" and "sjw". 35 arguments "tq", "prop_seg", "phase_seg1", "phase_seg2" and "sjw".
36 If unsure, say Y. 36 If unsure, say Y.
37 37
38config CAN_SJA1000
39 depends on CAN_DEV && HAS_IOMEM
40 tristate "Philips SJA1000"
41 ---help---
42 Driver for the SJA1000 CAN controllers from Philips or NXP
43
44config CAN_SJA1000_ISA
45 depends on CAN_SJA1000 && ISA
46 tristate "ISA Bus based legacy SJA1000 driver"
47 ---help---
48 This driver adds legacy support for SJA1000 chips connected to
49 the ISA bus using I/O port, memory mapped or indirect access.
50
51config CAN_SJA1000_PLATFORM
52 depends on CAN_SJA1000
53 tristate "Generic Platform Bus based SJA1000 driver"
54 ---help---
55 This driver adds support for the SJA1000 chips connected to
56 the "platform bus" (Linux abstraction for directly to the
57 processor attached devices). Which can be found on various
58 boards from Phytec (http://www.phytec.de) like the PCM027,
59 PCM038.
60
61config CAN_SJA1000_OF_PLATFORM
62 depends on CAN_SJA1000 && PPC_OF
63 tristate "Generic OF Platform Bus based SJA1000 driver"
64 ---help---
65 This driver adds support for the SJA1000 chips connected to
66 the OpenFirmware "platform bus" found on embedded systems with
67 OpenFirmware bindings, e.g. if you have a PowerPC based system
68 you may want to enable this option.
69
70config CAN_EMS_PCI
71 tristate "EMS CPC-PCI, CPC-PCIe and CPC-104P Card"
72 depends on PCI && CAN_SJA1000
73 ---help---
74 This driver is for the one, two or four channel CPC-PCI,
75 CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche
76 (http://www.ems-wuensche.de).
77
78config CAN_EMS_USB
79 tristate "EMS CPC-USB/ARM7 CAN/USB interface"
80 depends on USB && CAN_DEV
81 ---help---
82 This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
83 from from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
84
85config CAN_KVASER_PCI
86 tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards"
87 depends on PCI && CAN_SJA1000
88 ---help---
89 This driver is for the the PCIcanx and PCIcan cards (1, 2 or
90 4 channel) from Kvaser (http://www.kvaser.com).
91
92config CAN_AT91 38config CAN_AT91
93 tristate "Atmel AT91 onchip CAN controller" 39 tristate "Atmel AT91 onchip CAN controller"
94 depends on CAN && CAN_DEV && ARCH_AT91SAM9263 40 depends on CAN_DEV && ARCH_AT91SAM9263
95 ---help--- 41 ---help---
96 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263. 42 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263.
97 43
44source "drivers/net/can/sja1000/Kconfig"
45
46source "drivers/net/can/usb/Kconfig"
47
98config CAN_DEBUG_DEVICES 48config CAN_DEBUG_DEVICES
99 bool "CAN devices debugging messages" 49 bool "CAN devices debugging messages"
100 depends on CAN 50 depends on CAN
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 564e31c9fee4..2868fe842a41 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -629,6 +629,11 @@ nla_put_failure:
629 return -EMSGSIZE; 629 return -EMSGSIZE;
630} 630}
631 631
632static size_t can_get_xstats_size(const struct net_device *dev)
633{
634 return sizeof(struct can_device_stats);
635}
636
632static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev) 637static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
633{ 638{
634 struct can_priv *priv = netdev_priv(dev); 639 struct can_priv *priv = netdev_priv(dev);
@@ -657,6 +662,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
657 .changelink = can_changelink, 662 .changelink = can_changelink,
658 .get_size = can_get_size, 663 .get_size = can_get_size,
659 .fill_info = can_fill_info, 664 .fill_info = can_fill_info,
665 .get_xstats_size = can_get_xstats_size,
660 .fill_xstats = can_fill_xstats, 666 .fill_xstats = can_fill_xstats,
661}; 667};
662 668
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
new file mode 100644
index 000000000000..4c674927f247
--- /dev/null
+++ b/drivers/net/can/sja1000/Kconfig
@@ -0,0 +1,47 @@
1menuconfig CAN_SJA1000
2 tristate "Philips/NXP SJA1000 devices"
3 depends on CAN_DEV && HAS_IOMEM
4
5if CAN_SJA1000
6
7config CAN_SJA1000_ISA
8 tristate "ISA Bus based legacy SJA1000 driver"
9 depends on ISA
10 ---help---
11 This driver adds legacy support for SJA1000 chips connected to
12 the ISA bus using I/O port, memory mapped or indirect access.
13
14config CAN_SJA1000_PLATFORM
15 tristate "Generic Platform Bus based SJA1000 driver"
16 ---help---
17 This driver adds support for the SJA1000 chips connected to
18 the "platform bus" (Linux abstraction for directly to the
19 processor attached devices). Which can be found on various
20 boards from Phytec (http://www.phytec.de) like the PCM027,
21 PCM038.
22
23config CAN_SJA1000_OF_PLATFORM
24 tristate "Generic OF Platform Bus based SJA1000 driver"
25 depends on PPC_OF
26 ---help---
27 This driver adds support for the SJA1000 chips connected to
28 the OpenFirmware "platform bus" found on embedded systems with
29 OpenFirmware bindings, e.g. if you have a PowerPC based system
30 you may want to enable this option.
31
32config CAN_EMS_PCI
33 tristate "EMS CPC-PCI, CPC-PCIe and CPC-104P Card"
34 depends on PCI
35 ---help---
36 This driver is for the one, two or four channel CPC-PCI,
37 CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche
38 (http://www.ems-wuensche.de).
39
40config CAN_KVASER_PCI
41 tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards"
42 depends on PCI
43 ---help---
44 This driver is for the the PCIcanx and PCIcan cards (1, 2 or
45 4 channel) from Kvaser (http://www.kvaser.com).
46
47endif
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
new file mode 100644
index 000000000000..bbc78e0b8a15
--- /dev/null
+++ b/drivers/net/can/usb/Kconfig
@@ -0,0 +1,10 @@
1menu "CAN USB interfaces"
2 depends on USB && CAN_DEV
3
4config CAN_EMS_USB
5 tristate "EMS CPC-USB/ARM7 CAN/USB interface"
6 ---help---
7 This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
8 from from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
9
10endmenu
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index c3f75ba701b1..0afd51d4c7a5 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -3,3 +3,5 @@
3# 3#
4 4
5obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o 5obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
6
7ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index f86612857a73..6366061712f4 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -879,7 +879,7 @@ recycle:
879 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, 879 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
880 PCI_DMA_FROMDEVICE); 880 PCI_DMA_FROMDEVICE);
881 (*sd->pg_chunk.p_cnt)--; 881 (*sd->pg_chunk.p_cnt)--;
882 if (!*sd->pg_chunk.p_cnt) 882 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
883 pci_unmap_page(adap->pdev, 883 pci_unmap_page(adap->pdev,
884 sd->pg_chunk.mapping, 884 sd->pg_chunk.mapping,
885 fl->alloc_size, 885 fl->alloc_size,
@@ -2088,7 +2088,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2088 PCI_DMA_FROMDEVICE); 2088 PCI_DMA_FROMDEVICE);
2089 2089
2090 (*sd->pg_chunk.p_cnt)--; 2090 (*sd->pg_chunk.p_cnt)--;
2091 if (!*sd->pg_chunk.p_cnt) 2091 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
2092 pci_unmap_page(adap->pdev, 2092 pci_unmap_page(adap->pdev,
2093 sd->pg_chunk.mapping, 2093 sd->pg_chunk.mapping,
2094 fl->alloc_size, 2094 fl->alloc_size,
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 3179521aee90..e3478314c002 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -164,16 +164,14 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
164# define EMAC_MBP_MCASTCHAN(ch) ((ch) & 0x7) 164# define EMAC_MBP_MCASTCHAN(ch) ((ch) & 0x7)
165 165
166/* EMAC mac_control register */ 166/* EMAC mac_control register */
167#define EMAC_MACCONTROL_TXPTYPE (0x200) 167#define EMAC_MACCONTROL_TXPTYPE BIT(9)
168#define EMAC_MACCONTROL_TXPACEEN (0x40) 168#define EMAC_MACCONTROL_TXPACEEN BIT(6)
169#define EMAC_MACCONTROL_MIIEN (0x20) 169#define EMAC_MACCONTROL_GMIIEN BIT(5)
170#define EMAC_MACCONTROL_GIGABITEN (0x80) 170#define EMAC_MACCONTROL_GIGABITEN BIT(7)
171#define EMAC_MACCONTROL_GIGABITEN_SHIFT (7) 171#define EMAC_MACCONTROL_FULLDUPLEXEN BIT(0)
172#define EMAC_MACCONTROL_FULLDUPLEXEN (0x1)
173#define EMAC_MACCONTROL_RMIISPEED_MASK BIT(15) 172#define EMAC_MACCONTROL_RMIISPEED_MASK BIT(15)
174 173
175/* GIGABIT MODE related bits */ 174/* GIGABIT MODE related bits */
176#define EMAC_DM646X_MACCONTORL_GMIIEN BIT(5)
177#define EMAC_DM646X_MACCONTORL_GIG BIT(7) 175#define EMAC_DM646X_MACCONTORL_GIG BIT(7)
178#define EMAC_DM646X_MACCONTORL_GIGFORCE BIT(17) 176#define EMAC_DM646X_MACCONTORL_GIGFORCE BIT(17)
179 177
@@ -192,10 +190,10 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
192#define EMAC_RX_BUFFER_OFFSET_MASK (0xFFFF) 190#define EMAC_RX_BUFFER_OFFSET_MASK (0xFFFF)
193 191
194/* MAC_IN_VECTOR (0x180) register bit fields */ 192/* MAC_IN_VECTOR (0x180) register bit fields */
195#define EMAC_DM644X_MAC_IN_VECTOR_HOST_INT (0x20000) 193#define EMAC_DM644X_MAC_IN_VECTOR_HOST_INT BIT(17)
196#define EMAC_DM644X_MAC_IN_VECTOR_STATPEND_INT (0x10000) 194#define EMAC_DM644X_MAC_IN_VECTOR_STATPEND_INT BIT(16)
197#define EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC (0x0100) 195#define EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC BIT(8)
198#define EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC (0x01) 196#define EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC BIT(0)
199 197
200/** NOTE:: For DM646x the IN_VECTOR has changed */ 198/** NOTE:: For DM646x the IN_VECTOR has changed */
201#define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC BIT(EMAC_DEF_RX_CH) 199#define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC BIT(EMAC_DEF_RX_CH)
@@ -203,7 +201,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
203#define EMAC_DM646X_MAC_IN_VECTOR_HOST_INT BIT(26) 201#define EMAC_DM646X_MAC_IN_VECTOR_HOST_INT BIT(26)
204#define EMAC_DM646X_MAC_IN_VECTOR_STATPEND_INT BIT(27) 202#define EMAC_DM646X_MAC_IN_VECTOR_STATPEND_INT BIT(27)
205 203
206
207/* CPPI bit positions */ 204/* CPPI bit positions */
208#define EMAC_CPPI_SOP_BIT BIT(31) 205#define EMAC_CPPI_SOP_BIT BIT(31)
209#define EMAC_CPPI_EOP_BIT BIT(30) 206#define EMAC_CPPI_EOP_BIT BIT(30)
@@ -750,8 +747,7 @@ static void emac_update_phystatus(struct emac_priv *priv)
750 747
751 if (priv->speed == SPEED_1000 && (priv->version == EMAC_VERSION_2)) { 748 if (priv->speed == SPEED_1000 && (priv->version == EMAC_VERSION_2)) {
752 mac_control = emac_read(EMAC_MACCONTROL); 749 mac_control = emac_read(EMAC_MACCONTROL);
753 mac_control |= (EMAC_DM646X_MACCONTORL_GMIIEN | 750 mac_control |= (EMAC_DM646X_MACCONTORL_GIG |
754 EMAC_DM646X_MACCONTORL_GIG |
755 EMAC_DM646X_MACCONTORL_GIGFORCE); 751 EMAC_DM646X_MACCONTORL_GIGFORCE);
756 } else { 752 } else {
757 /* Clear the GIG bit and GIGFORCE bit */ 753 /* Clear the GIG bit and GIGFORCE bit */
@@ -2108,7 +2104,7 @@ static int emac_hw_enable(struct emac_priv *priv)
2108 2104
2109 /* Enable MII */ 2105 /* Enable MII */
2110 val = emac_read(EMAC_MACCONTROL); 2106 val = emac_read(EMAC_MACCONTROL);
2111 val |= (EMAC_MACCONTROL_MIIEN); 2107 val |= (EMAC_MACCONTROL_GMIIEN);
2112 emac_write(EMAC_MACCONTROL, val); 2108 emac_write(EMAC_MACCONTROL, val);
2113 2109
2114 /* Enable NAPI and interrupts */ 2110 /* Enable NAPI and interrupts */
@@ -2140,9 +2136,6 @@ static int emac_poll(struct napi_struct *napi, int budget)
2140 u32 status = 0; 2136 u32 status = 0;
2141 u32 num_pkts = 0; 2137 u32 num_pkts = 0;
2142 2138
2143 if (!netif_running(ndev))
2144 return 0;
2145
2146 /* Check interrupt vectors and call packet processing */ 2139 /* Check interrupt vectors and call packet processing */
2147 status = emac_read(EMAC_MACINVECTOR); 2140 status = emac_read(EMAC_MACINVECTOR);
2148 2141
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 3c29a20b751e..d269a68ce354 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -157,6 +157,7 @@
157#include <linux/init.h> 157#include <linux/init.h>
158#include <linux/pci.h> 158#include <linux/pci.h>
159#include <linux/dma-mapping.h> 159#include <linux/dma-mapping.h>
160#include <linux/dmapool.h>
160#include <linux/netdevice.h> 161#include <linux/netdevice.h>
161#include <linux/etherdevice.h> 162#include <linux/etherdevice.h>
162#include <linux/mii.h> 163#include <linux/mii.h>
@@ -602,6 +603,7 @@ struct nic {
602 struct mem *mem; 603 struct mem *mem;
603 dma_addr_t dma_addr; 604 dma_addr_t dma_addr;
604 605
606 struct pci_pool *cbs_pool;
605 dma_addr_t cbs_dma_addr; 607 dma_addr_t cbs_dma_addr;
606 u8 adaptive_ifs; 608 u8 adaptive_ifs;
607 u8 tx_threshold; 609 u8 tx_threshold;
@@ -1793,9 +1795,7 @@ static void e100_clean_cbs(struct nic *nic)
1793 nic->cb_to_clean = nic->cb_to_clean->next; 1795 nic->cb_to_clean = nic->cb_to_clean->next;
1794 nic->cbs_avail++; 1796 nic->cbs_avail++;
1795 } 1797 }
1796 pci_free_consistent(nic->pdev, 1798 pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
1797 sizeof(struct cb) * nic->params.cbs.count,
1798 nic->cbs, nic->cbs_dma_addr);
1799 nic->cbs = NULL; 1799 nic->cbs = NULL;
1800 nic->cbs_avail = 0; 1800 nic->cbs_avail = 0;
1801 } 1801 }
@@ -1813,8 +1813,8 @@ static int e100_alloc_cbs(struct nic *nic)
1813 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; 1813 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1814 nic->cbs_avail = 0; 1814 nic->cbs_avail = 0;
1815 1815
1816 nic->cbs = pci_alloc_consistent(nic->pdev, 1816 nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
1817 sizeof(struct cb) * count, &nic->cbs_dma_addr); 1817 &nic->cbs_dma_addr);
1818 if (!nic->cbs) 1818 if (!nic->cbs)
1819 return -ENOMEM; 1819 return -ENOMEM;
1820 1820
@@ -2841,7 +2841,11 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2841 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n"); 2841 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
2842 goto err_out_free; 2842 goto err_out_free;
2843 } 2843 }
2844 2844 nic->cbs_pool = pci_pool_create(netdev->name,
2845 nic->pdev,
2846 nic->params.cbs.count * sizeof(struct cb),
2847 sizeof(u32),
2848 0);
2845 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n", 2849 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
2846 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), 2850 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2847 pdev->irq, netdev->dev_addr); 2851 pdev->irq, netdev->dev_addr);
@@ -2871,6 +2875,7 @@ static void __devexit e100_remove(struct pci_dev *pdev)
2871 unregister_netdev(netdev); 2875 unregister_netdev(netdev);
2872 e100_free(nic); 2876 e100_free(nic);
2873 pci_iounmap(pdev, nic->csr); 2877 pci_iounmap(pdev, nic->csr);
2878 pci_pool_destroy(nic->cbs_pool);
2874 free_netdev(netdev); 2879 free_netdev(netdev);
2875 pci_release_regions(pdev); 2880 pci_release_regions(pdev);
2876 pci_disable_device(pdev); 2881 pci_disable_device(pdev);
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 189dfa2d6c76..3e187b0e4203 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -141,6 +141,8 @@ struct e1000_info;
141#define HV_TNCRS_UPPER PHY_REG(778, 29) /* Transmit with no CRS */ 141#define HV_TNCRS_UPPER PHY_REG(778, 29) /* Transmit with no CRS */
142#define HV_TNCRS_LOWER PHY_REG(778, 30) 142#define HV_TNCRS_LOWER PHY_REG(778, 30)
143 143
144#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */
145
144/* BM PHY Copper Specific Status */ 146/* BM PHY Copper Specific Status */
145#define BM_CS_STATUS 17 147#define BM_CS_STATUS 17
146#define BM_CS_STATUS_LINK_UP 0x0400 148#define BM_CS_STATUS_LINK_UP 0x0400
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 1bf4d2a5d34f..e82638ecae88 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -327,10 +327,18 @@ static int e1000_set_pauseparam(struct net_device *netdev,
327 327
328 hw->fc.current_mode = hw->fc.requested_mode; 328 hw->fc.current_mode = hw->fc.requested_mode;
329 329
330 retval = ((hw->phy.media_type == e1000_media_type_fiber) ? 330 if (hw->phy.media_type == e1000_media_type_fiber) {
331 hw->mac.ops.setup_link(hw) : e1000e_force_mac_fc(hw)); 331 retval = hw->mac.ops.setup_link(hw);
332 /* implicit goto out */
333 } else {
334 retval = e1000e_force_mac_fc(hw);
335 if (retval)
336 goto out;
337 e1000e_set_fc_watermarks(hw);
338 }
332 } 339 }
333 340
341out:
334 clear_bit(__E1000_RESETTING, &adapter->state); 342 clear_bit(__E1000_RESETTING, &adapter->state);
335 return retval; 343 return retval;
336} 344}
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 51ddb04ab195..eff3f4783655 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1118,7 +1118,8 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1118 oem_reg |= HV_OEM_BITS_LPLU; 1118 oem_reg |= HV_OEM_BITS_LPLU;
1119 } 1119 }
1120 /* Restart auto-neg to activate the bits */ 1120 /* Restart auto-neg to activate the bits */
1121 oem_reg |= HV_OEM_BITS_RESTART_AN; 1121 if (!e1000_check_reset_block(hw))
1122 oem_reg |= HV_OEM_BITS_RESTART_AN;
1122 ret_val = hw->phy.ops.write_phy_reg_locked(hw, HV_OEM_BITS, oem_reg); 1123 ret_val = hw->phy.ops.write_phy_reg_locked(hw, HV_OEM_BITS, oem_reg);
1123 1124
1124out: 1125out:
@@ -3558,6 +3559,7 @@ struct e1000_info e1000_pch_info = {
3558 | FLAG_HAS_AMT 3559 | FLAG_HAS_AMT
3559 | FLAG_HAS_FLASH 3560 | FLAG_HAS_FLASH
3560 | FLAG_HAS_JUMBO_FRAMES 3561 | FLAG_HAS_JUMBO_FRAMES
3562 | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
3561 | FLAG_APME_IN_WUC, 3563 | FLAG_APME_IN_WUC,
3562 .pba = 26, 3564 .pba = 26,
3563 .max_hw_frame_size = 4096, 3565 .max_hw_frame_size = 4096,
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 0687c6aa4e46..fad8f9ea0043 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2769,25 +2769,38 @@ void e1000e_reset(struct e1000_adapter *adapter)
2769 /* 2769 /*
2770 * flow control settings 2770 * flow control settings
2771 * 2771 *
2772 * The high water mark must be low enough to fit two full frame 2772 * The high water mark must be low enough to fit one full frame
2773 * (or the size used for early receive) above it in the Rx FIFO. 2773 * (or the size used for early receive) above it in the Rx FIFO.
2774 * Set it to the lower of: 2774 * Set it to the lower of:
2775 * - 90% of the Rx FIFO size, and 2775 * - 90% of the Rx FIFO size, and
2776 * - the full Rx FIFO size minus the early receive size (for parts 2776 * - the full Rx FIFO size minus the early receive size (for parts
2777 * with ERT support assuming ERT set to E1000_ERT_2048), or 2777 * with ERT support assuming ERT set to E1000_ERT_2048), or
2778 * - the full Rx FIFO size minus two full frames 2778 * - the full Rx FIFO size minus one full frame
2779 */ 2779 */
2780 if ((adapter->flags & FLAG_HAS_ERT) && 2780 if (hw->mac.type == e1000_pchlan) {
2781 (adapter->netdev->mtu > ETH_DATA_LEN)) 2781 /*
2782 hwm = min(((pba << 10) * 9 / 10), 2782 * Workaround PCH LOM adapter hangs with certain network
2783 ((pba << 10) - (E1000_ERT_2048 << 3))); 2783 * loads. If hangs persist, try disabling Tx flow control.
2784 else 2784 */
2785 hwm = min(((pba << 10) * 9 / 10), 2785 if (adapter->netdev->mtu > ETH_DATA_LEN) {
2786 ((pba << 10) - (2 * adapter->max_frame_size))); 2786 fc->high_water = 0x3500;
2787 fc->low_water = 0x1500;
2788 } else {
2789 fc->high_water = 0x5000;
2790 fc->low_water = 0x3000;
2791 }
2792 } else {
2793 if ((adapter->flags & FLAG_HAS_ERT) &&
2794 (adapter->netdev->mtu > ETH_DATA_LEN))
2795 hwm = min(((pba << 10) * 9 / 10),
2796 ((pba << 10) - (E1000_ERT_2048 << 3)));
2797 else
2798 hwm = min(((pba << 10) * 9 / 10),
2799 ((pba << 10) - adapter->max_frame_size));
2787 2800
2788 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ 2801 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
2789 fc->low_water = (fc->high_water - (2 * adapter->max_frame_size)); 2802 fc->low_water = fc->high_water - 8;
2790 fc->low_water &= E1000_FCRTL_RTL; /* 8-byte granularity */ 2803 }
2791 2804
2792 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) 2805 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
2793 fc->pause_time = 0xFFFF; 2806 fc->pause_time = 0xFFFF;
@@ -2813,6 +2826,10 @@ void e1000e_reset(struct e1000_adapter *adapter)
2813 if (mac->ops.init_hw(hw)) 2826 if (mac->ops.init_hw(hw))
2814 e_err("Hardware Error\n"); 2827 e_err("Hardware Error\n");
2815 2828
2829 /* additional part of the flow-control workaround above */
2830 if (hw->mac.type == e1000_pchlan)
2831 ew32(FCRTV_PCH, 0x1000);
2832
2816 e1000_update_mng_vlan(adapter); 2833 e1000_update_mng_vlan(adapter);
2817 2834
2818 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 2835 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
@@ -3610,7 +3627,7 @@ static void e1000_watchdog_task(struct work_struct *work)
3610 case SPEED_100: 3627 case SPEED_100:
3611 txb2b = 0; 3628 txb2b = 0;
3612 netdev->tx_queue_len = 100; 3629 netdev->tx_queue_len = 100;
3613 /* maybe add some timeout factor ? */ 3630 adapter->tx_timeout_factor = 10;
3614 break; 3631 break;
3615 } 3632 }
3616 3633
@@ -4288,8 +4305,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4288 4305
4289 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 4306 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
4290 msleep(1); 4307 msleep(1);
4291 /* e1000e_down has a dependency on max_frame_size */ 4308 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
4292 adapter->max_frame_size = max_frame; 4309 adapter->max_frame_size = max_frame;
4310 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
4311 netdev->mtu = new_mtu;
4293 if (netif_running(netdev)) 4312 if (netif_running(netdev))
4294 e1000e_down(adapter); 4313 e1000e_down(adapter);
4295 4314
@@ -4319,9 +4338,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4319 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN 4338 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
4320 + ETH_FCS_LEN; 4339 + ETH_FCS_LEN;
4321 4340
4322 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
4323 netdev->mtu = new_mtu;
4324
4325 if (netif_running(netdev)) 4341 if (netif_running(netdev))
4326 e1000e_up(adapter); 4342 e1000e_up(adapter);
4327 else 4343 else
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 03175b3a2c9e..85f955f70417 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -71,7 +71,6 @@ static const u16 e1000_igp_2_cable_length_table[] =
71#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) 71#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
72#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ 72#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
73#define I82577_CTRL_REG 23 73#define I82577_CTRL_REG 23
74#define I82577_CTRL_DOWNSHIFT_MASK (7 << 10)
75 74
76/* 82577 specific PHY registers */ 75/* 82577 specific PHY registers */
77#define I82577_PHY_CTRL_2 18 76#define I82577_PHY_CTRL_2 18
@@ -660,15 +659,6 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
660 phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; 659 phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
661 660
662 ret_val = phy->ops.write_phy_reg(hw, I82577_CFG_REG, phy_data); 661 ret_val = phy->ops.write_phy_reg(hw, I82577_CFG_REG, phy_data);
663 if (ret_val)
664 goto out;
665
666 /* Set number of link attempts before downshift */
667 ret_val = phy->ops.read_phy_reg(hw, I82577_CTRL_REG, &phy_data);
668 if (ret_val)
669 goto out;
670 phy_data &= ~I82577_CTRL_DOWNSHIFT_MASK;
671 ret_val = phy->ops.write_phy_reg(hw, I82577_CTRL_REG, phy_data);
672 662
673out: 663out:
674 return ret_val; 664 return ret_val;
@@ -2658,19 +2648,18 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2658 page = 0; 2648 page = 0;
2659 2649
2660 if (reg > MAX_PHY_MULTI_PAGE_REG) { 2650 if (reg > MAX_PHY_MULTI_PAGE_REG) {
2661 if ((hw->phy.type != e1000_phy_82578) || 2651 u32 phy_addr = hw->phy.addr;
2662 ((reg != I82578_ADDR_REG) &&
2663 (reg != I82578_ADDR_REG + 1))) {
2664 u32 phy_addr = hw->phy.addr;
2665 2652
2666 hw->phy.addr = 1; 2653 hw->phy.addr = 1;
2667 2654
2668 /* Page is shifted left, PHY expects (page x 32) */ 2655 /* Page is shifted left, PHY expects (page x 32) */
2669 ret_val = e1000e_write_phy_reg_mdic(hw, 2656 ret_val = e1000e_write_phy_reg_mdic(hw,
2670 IGP01E1000_PHY_PAGE_SELECT, 2657 IGP01E1000_PHY_PAGE_SELECT,
2671 (page << IGP_PAGE_SHIFT)); 2658 (page << IGP_PAGE_SHIFT));
2672 hw->phy.addr = phy_addr; 2659 hw->phy.addr = phy_addr;
2673 } 2660
2661 if (ret_val)
2662 goto out;
2674 } 2663 }
2675 2664
2676 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, 2665 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
@@ -2678,7 +2667,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2678out: 2667out:
2679 /* Revert to MDIO fast mode, if applicable */ 2668 /* Revert to MDIO fast mode, if applicable */
2680 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) 2669 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
2681 ret_val = e1000_set_mdio_slow_mode_hv(hw, false); 2670 ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
2682 2671
2683 if (!locked) 2672 if (!locked)
2684 hw->phy.ops.release_phy(hw); 2673 hw->phy.ops.release_phy(hw);
@@ -2784,19 +2773,18 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2784 } 2773 }
2785 2774
2786 if (reg > MAX_PHY_MULTI_PAGE_REG) { 2775 if (reg > MAX_PHY_MULTI_PAGE_REG) {
2787 if ((hw->phy.type != e1000_phy_82578) || 2776 u32 phy_addr = hw->phy.addr;
2788 ((reg != I82578_ADDR_REG) &&
2789 (reg != I82578_ADDR_REG + 1))) {
2790 u32 phy_addr = hw->phy.addr;
2791 2777
2792 hw->phy.addr = 1; 2778 hw->phy.addr = 1;
2793 2779
2794 /* Page is shifted left, PHY expects (page x 32) */ 2780 /* Page is shifted left, PHY expects (page x 32) */
2795 ret_val = e1000e_write_phy_reg_mdic(hw, 2781 ret_val = e1000e_write_phy_reg_mdic(hw,
2796 IGP01E1000_PHY_PAGE_SELECT, 2782 IGP01E1000_PHY_PAGE_SELECT,
2797 (page << IGP_PAGE_SHIFT)); 2783 (page << IGP_PAGE_SHIFT));
2798 hw->phy.addr = phy_addr; 2784 hw->phy.addr = phy_addr;
2799 } 2785
2786 if (ret_val)
2787 goto out;
2800 } 2788 }
2801 2789
2802 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, 2790 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
@@ -2805,7 +2793,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2805out: 2793out:
2806 /* Revert to MDIO fast mode, if applicable */ 2794 /* Revert to MDIO fast mode, if applicable */
2807 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) 2795 if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
2808 ret_val = e1000_set_mdio_slow_mode_hv(hw, false); 2796 ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
2809 2797
2810 if (!locked) 2798 if (!locked)
2811 hw->phy.ops.release_phy(hw); 2799 hw->phy.ops.release_phy(hw);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index e1da4666f204..3116601dbfea 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5821,10 +5821,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5821 dev->dev_addr); 5821 dev->dev_addr);
5822 dev_printk(KERN_ERR, &pci_dev->dev, 5822 dev_printk(KERN_ERR, &pci_dev->dev,
5823 "Please complain to your hardware vendor. Switching to a random MAC.\n"); 5823 "Please complain to your hardware vendor. Switching to a random MAC.\n");
5824 dev->dev_addr[0] = 0x00; 5824 random_ether_addr(dev->dev_addr);
5825 dev->dev_addr[1] = 0x00;
5826 dev->dev_addr[2] = 0x6c;
5827 get_random_bytes(&dev->dev_addr[3], 3);
5828 } 5825 }
5829 5826
5830 dprintk(KERN_DEBUG "%s: MAC Address %pM\n", 5827 dprintk(KERN_DEBUG "%s: MAC Address %pM\n",
diff --git a/drivers/net/ibm_newemac/emac.h b/drivers/net/ibm_newemac/emac.h
index d34adf99fc6a..8a61b597a169 100644
--- a/drivers/net/ibm_newemac/emac.h
+++ b/drivers/net/ibm_newemac/emac.h
@@ -263,8 +263,8 @@ struct emac_regs {
263 263
264 264
265/* EMACx_TRTR */ 265/* EMACx_TRTR */
266#define EMAC_TRTR_SHIFT_EMAC4 27 266#define EMAC_TRTR_SHIFT_EMAC4 24
267#define EMAC_TRTR_SHIFT 24 267#define EMAC_TRTR_SHIFT 27
268 268
269/* EMAC specific TX descriptor control fields (write access) */ 269/* EMAC specific TX descriptor control fields (write access) */
270#define EMAC_TX_CTRL_GFCS 0x0200 270#define EMAC_TX_CTRL_GFCS 0x0200
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 5bd9e6bf6f2f..a456578b8578 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -240,11 +240,11 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
240static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter, 240static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
241 struct ixgbe_ring *tx_ring) 241 struct ixgbe_ring *tx_ring)
242{ 242{
243 int tc;
244 u32 txoff = IXGBE_TFCS_TXOFF; 243 u32 txoff = IXGBE_TFCS_TXOFF;
245 244
246#ifdef CONFIG_IXGBE_DCB 245#ifdef CONFIG_IXGBE_DCB
247 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 246 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
247 int tc;
248 int reg_idx = tx_ring->reg_idx; 248 int reg_idx = tx_ring->reg_idx;
249 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 249 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
250 250
@@ -5994,6 +5994,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
5994 } else { 5994 } else {
5995 pci_set_master(pdev); 5995 pci_set_master(pdev);
5996 pci_restore_state(pdev); 5996 pci_restore_state(pdev);
5997 pci_save_state(pdev);
5997 5998
5998 pci_wake_from_d3(pdev, false); 5999 pci_wake_from_d3(pdev, false);
5999 6000
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
index 0be14d702beb..c146304d8d6c 100644
--- a/drivers/net/ks8851_mll.c
+++ b/drivers/net/ks8851_mll.c
@@ -568,6 +568,16 @@ static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
568 iowrite16(*wptr++, ks->hw_addr); 568 iowrite16(*wptr++, ks->hw_addr);
569} 569}
570 570
571static void ks_disable_int(struct ks_net *ks)
572{
573 ks_wrreg16(ks, KS_IER, 0x0000);
574} /* ks_disable_int */
575
576static void ks_enable_int(struct ks_net *ks)
577{
578 ks_wrreg16(ks, KS_IER, ks->rc_ier);
579} /* ks_enable_int */
580
571/** 581/**
572 * ks_tx_fifo_space - return the available hardware buffer size. 582 * ks_tx_fifo_space - return the available hardware buffer size.
573 * @ks: The chip information 583 * @ks: The chip information
@@ -681,6 +691,47 @@ static void ks_soft_reset(struct ks_net *ks, unsigned op)
681} 691}
682 692
683 693
694void ks_enable_qmu(struct ks_net *ks)
695{
696 u16 w;
697
698 w = ks_rdreg16(ks, KS_TXCR);
699 /* Enables QMU Transmit (TXCR). */
700 ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
701
702 /*
703 * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
704 * Enable
705 */
706
707 w = ks_rdreg16(ks, KS_RXQCR);
708 ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
709
710 /* Enables QMU Receive (RXCR1). */
711 w = ks_rdreg16(ks, KS_RXCR1);
712 ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
713 ks->enabled = true;
714} /* ks_enable_qmu */
715
716static void ks_disable_qmu(struct ks_net *ks)
717{
718 u16 w;
719
720 w = ks_rdreg16(ks, KS_TXCR);
721
722 /* Disables QMU Transmit (TXCR). */
723 w &= ~TXCR_TXE;
724 ks_wrreg16(ks, KS_TXCR, w);
725
726 /* Disables QMU Receive (RXCR1). */
727 w = ks_rdreg16(ks, KS_RXCR1);
728 w &= ~RXCR1_RXE ;
729 ks_wrreg16(ks, KS_RXCR1, w);
730
731 ks->enabled = false;
732
733} /* ks_disable_qmu */
734
684/** 735/**
685 * ks_read_qmu - read 1 pkt data from the QMU. 736 * ks_read_qmu - read 1 pkt data from the QMU.
686 * @ks: The chip information 737 * @ks: The chip information
@@ -752,7 +803,7 @@ static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
752 (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) { 803 (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
753 skb_reserve(skb, 2); 804 skb_reserve(skb, 2);
754 /* read data block including CRC 4 bytes */ 805 /* read data block including CRC 4 bytes */
755 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len + 4); 806 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
756 skb_put(skb, frame_hdr->len); 807 skb_put(skb, frame_hdr->len);
757 skb->dev = netdev; 808 skb->dev = netdev;
758 skb->protocol = eth_type_trans(skb, netdev); 809 skb->protocol = eth_type_trans(skb, netdev);
@@ -861,7 +912,7 @@ static int ks_net_open(struct net_device *netdev)
861 ks_dbg(ks, "%s - entry\n", __func__); 912 ks_dbg(ks, "%s - entry\n", __func__);
862 913
863 /* reset the HW */ 914 /* reset the HW */
864 err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, ks); 915 err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
865 916
866 if (err) { 917 if (err) {
867 printk(KERN_ERR "Failed to request IRQ: %d: %d\n", 918 printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
@@ -869,6 +920,15 @@ static int ks_net_open(struct net_device *netdev)
869 return err; 920 return err;
870 } 921 }
871 922
923 /* wake up powermode to normal mode */
924 ks_set_powermode(ks, PMECR_PM_NORMAL);
925 mdelay(1); /* wait for normal mode to take effect */
926
927 ks_wrreg16(ks, KS_ISR, 0xffff);
928 ks_enable_int(ks);
929 ks_enable_qmu(ks);
930 netif_start_queue(ks->netdev);
931
872 if (netif_msg_ifup(ks)) 932 if (netif_msg_ifup(ks))
873 ks_dbg(ks, "network device %s up\n", netdev->name); 933 ks_dbg(ks, "network device %s up\n", netdev->name);
874 934
@@ -892,19 +952,14 @@ static int ks_net_stop(struct net_device *netdev)
892 952
893 netif_stop_queue(netdev); 953 netif_stop_queue(netdev);
894 954
895 kfree(ks->frame_head_info);
896
897 mutex_lock(&ks->lock); 955 mutex_lock(&ks->lock);
898 956
899 /* turn off the IRQs and ack any outstanding */ 957 /* turn off the IRQs and ack any outstanding */
900 ks_wrreg16(ks, KS_IER, 0x0000); 958 ks_wrreg16(ks, KS_IER, 0x0000);
901 ks_wrreg16(ks, KS_ISR, 0xffff); 959 ks_wrreg16(ks, KS_ISR, 0xffff);
902 960
903 /* shutdown RX process */ 961 /* shutdown RX/TX QMU */
904 ks_wrreg16(ks, KS_RXCR1, 0x0000); 962 ks_disable_qmu(ks);
905
906 /* shutdown TX process */
907 ks_wrreg16(ks, KS_TXCR, 0x0000);
908 963
909 /* set powermode to soft power down to save power */ 964 /* set powermode to soft power down to save power */
910 ks_set_powermode(ks, PMECR_PM_SOFTDOWN); 965 ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
@@ -929,17 +984,8 @@ static int ks_net_stop(struct net_device *netdev)
929 */ 984 */
930static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len) 985static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
931{ 986{
932 unsigned fid = ks->fid;
933
934 fid = ks->fid;
935 ks->fid = (ks->fid + 1) & TXFR_TXFID_MASK;
936
937 /* reduce the tx interrupt occurrances. */
938 if (!fid)
939 fid |= TXFR_TXIC; /* irq on completion */
940
941 /* start header at txb[0] to align txw entries */ 987 /* start header at txb[0] to align txw entries */
942 ks->txh.txw[0] = cpu_to_le16(fid); 988 ks->txh.txw[0] = 0;
943 ks->txh.txw[1] = cpu_to_le16(len); 989 ks->txh.txw[1] = cpu_to_le16(len);
944 990
945 /* 1. set sudo-DMA mode */ 991 /* 1. set sudo-DMA mode */
@@ -957,16 +1003,6 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
957 ; 1003 ;
958} 1004}
959 1005
960static void ks_disable_int(struct ks_net *ks)
961{
962 ks_wrreg16(ks, KS_IER, 0x0000);
963} /* ks_disable_int */
964
965static void ks_enable_int(struct ks_net *ks)
966{
967 ks_wrreg16(ks, KS_IER, ks->rc_ier);
968} /* ks_enable_int */
969
970/** 1006/**
971 * ks_start_xmit - transmit packet 1007 * ks_start_xmit - transmit packet
972 * @skb : The buffer to transmit 1008 * @skb : The buffer to transmit
@@ -1410,25 +1446,6 @@ static int ks_read_selftest(struct ks_net *ks)
1410 return ret; 1446 return ret;
1411} 1447}
1412 1448
1413static void ks_disable(struct ks_net *ks)
1414{
1415 u16 w;
1416
1417 w = ks_rdreg16(ks, KS_TXCR);
1418
1419 /* Disables QMU Transmit (TXCR). */
1420 w &= ~TXCR_TXE;
1421 ks_wrreg16(ks, KS_TXCR, w);
1422
1423 /* Disables QMU Receive (RXCR1). */
1424 w = ks_rdreg16(ks, KS_RXCR1);
1425 w &= ~RXCR1_RXE ;
1426 ks_wrreg16(ks, KS_RXCR1, w);
1427
1428 ks->enabled = false;
1429
1430} /* ks_disable */
1431
1432static void ks_setup(struct ks_net *ks) 1449static void ks_setup(struct ks_net *ks)
1433{ 1450{
1434 u16 w; 1451 u16 w;
@@ -1463,7 +1480,7 @@ static void ks_setup(struct ks_net *ks)
1463 w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP; 1480 w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
1464 ks_wrreg16(ks, KS_TXCR, w); 1481 ks_wrreg16(ks, KS_TXCR, w);
1465 1482
1466 w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE; 1483 w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC;
1467 1484
1468 if (ks->promiscuous) /* bPromiscuous */ 1485 if (ks->promiscuous) /* bPromiscuous */
1469 w |= (RXCR1_RXAE | RXCR1_RXINVF); 1486 w |= (RXCR1_RXAE | RXCR1_RXINVF);
@@ -1486,28 +1503,6 @@ static void ks_setup_int(struct ks_net *ks)
1486 ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI); 1503 ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
1487} /* ks_setup_int */ 1504} /* ks_setup_int */
1488 1505
1489void ks_enable(struct ks_net *ks)
1490{
1491 u16 w;
1492
1493 w = ks_rdreg16(ks, KS_TXCR);
1494 /* Enables QMU Transmit (TXCR). */
1495 ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
1496
1497 /*
1498 * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
1499 * Enable
1500 */
1501
1502 w = ks_rdreg16(ks, KS_RXQCR);
1503 ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
1504
1505 /* Enables QMU Receive (RXCR1). */
1506 w = ks_rdreg16(ks, KS_RXCR1);
1507 ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
1508 ks->enabled = true;
1509} /* ks_enable */
1510
1511static int ks_hw_init(struct ks_net *ks) 1506static int ks_hw_init(struct ks_net *ks)
1512{ 1507{
1513#define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES) 1508#define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
@@ -1612,11 +1607,9 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
1612 1607
1613 ks_soft_reset(ks, GRR_GSR); 1608 ks_soft_reset(ks, GRR_GSR);
1614 ks_hw_init(ks); 1609 ks_hw_init(ks);
1615 ks_disable(ks); 1610 ks_disable_qmu(ks);
1616 ks_setup(ks); 1611 ks_setup(ks);
1617 ks_setup_int(ks); 1612 ks_setup_int(ks);
1618 ks_enable_int(ks);
1619 ks_enable(ks);
1620 memcpy(netdev->dev_addr, ks->mac_addr, 6); 1613 memcpy(netdev->dev_addr, ks->mac_addr, 6);
1621 1614
1622 data = ks_rdreg16(ks, KS_OBCR); 1615 data = ks_rdreg16(ks, KS_OBCR);
@@ -1658,6 +1651,7 @@ static int __devexit ks8851_remove(struct platform_device *pdev)
1658 struct ks_net *ks = netdev_priv(netdev); 1651 struct ks_net *ks = netdev_priv(netdev);
1659 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1652 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1660 1653
1654 kfree(ks->frame_head_info);
1661 unregister_netdev(netdev); 1655 unregister_netdev(netdev);
1662 iounmap(ks->hw_addr); 1656 iounmap(ks->hw_addr);
1663 free_netdev(netdev); 1657 free_netdev(netdev);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 3aabfd9dd212..2490aa39804c 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -360,6 +360,7 @@ static int macvlan_init(struct net_device *dev)
360 dev->state = (dev->state & ~MACVLAN_STATE_MASK) | 360 dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
361 (lowerdev->state & MACVLAN_STATE_MASK); 361 (lowerdev->state & MACVLAN_STATE_MASK);
362 dev->features = lowerdev->features & MACVLAN_FEATURES; 362 dev->features = lowerdev->features & MACVLAN_FEATURES;
363 dev->gso_max_size = lowerdev->gso_max_size;
363 dev->iflink = lowerdev->ifindex; 364 dev->iflink = lowerdev->ifindex;
364 dev->hard_header_len = lowerdev->hard_header_len; 365 dev->hard_header_len = lowerdev->hard_header_len;
365 366
@@ -596,6 +597,7 @@ static int macvlan_device_event(struct notifier_block *unused,
596 case NETDEV_FEAT_CHANGE: 597 case NETDEV_FEAT_CHANGE:
597 list_for_each_entry(vlan, &port->vlans, list) { 598 list_for_each_entry(vlan, &port->vlans, list) {
598 vlan->dev->features = dev->features & MACVLAN_FEATURES; 599 vlan->dev->features = dev->features & MACVLAN_FEATURES;
600 vlan->dev->gso_max_size = dev->gso_max_size;
599 netdev_features_change(vlan->dev); 601 netdev_features_change(vlan->dev);
600 } 602 }
601 break; 603 break;
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 7384f59df615..e1237b802872 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -1163,6 +1163,8 @@ struct netxen_adapter {
1163 u32 int_vec_bit; 1163 u32 int_vec_bit;
1164 u32 heartbit; 1164 u32 heartbit;
1165 1165
1166 u8 mac_addr[ETH_ALEN];
1167
1166 struct netxen_adapter_stats stats; 1168 struct netxen_adapter_stats stats;
1167 1169
1168 struct netxen_recv_context recv_ctx; 1170 struct netxen_recv_context recv_ctx;
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 1c46da632125..17bb3818d84e 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -545,6 +545,8 @@ enum {
545#define NETXEN_NIU_TEST_MUX_CTL (NETXEN_CRB_NIU + 0x00094) 545#define NETXEN_NIU_TEST_MUX_CTL (NETXEN_CRB_NIU + 0x00094)
546#define NETXEN_NIU_XG_PAUSE_CTL (NETXEN_CRB_NIU + 0x00098) 546#define NETXEN_NIU_XG_PAUSE_CTL (NETXEN_CRB_NIU + 0x00098)
547#define NETXEN_NIU_XG_PAUSE_LEVEL (NETXEN_CRB_NIU + 0x000dc) 547#define NETXEN_NIU_XG_PAUSE_LEVEL (NETXEN_CRB_NIU + 0x000dc)
548#define NETXEN_NIU_FRAME_COUNT_SELECT (NETXEN_CRB_NIU + 0x000ac)
549#define NETXEN_NIU_FRAME_COUNT (NETXEN_CRB_NIU + 0x000b0)
548#define NETXEN_NIU_XG_SEL (NETXEN_CRB_NIU + 0x00128) 550#define NETXEN_NIU_XG_SEL (NETXEN_CRB_NIU + 0x00128)
549#define NETXEN_NIU_GB_PAUSE_CTL (NETXEN_CRB_NIU + 0x0030c) 551#define NETXEN_NIU_GB_PAUSE_CTL (NETXEN_CRB_NIU + 0x0030c)
550 552
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 3185a98b0917..52a3798d8d94 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -383,24 +383,51 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
383 383
384int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) 384int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
385{ 385{
386 __u32 reg; 386 u32 mac_cfg;
387 u32 cnt = 0;
388 __u32 reg = 0x0200;
387 u32 port = adapter->physical_port; 389 u32 port = adapter->physical_port;
390 u16 board_type = adapter->ahw.board_type;
388 391
389 if (port > NETXEN_NIU_MAX_XG_PORTS) 392 if (port > NETXEN_NIU_MAX_XG_PORTS)
390 return -EINVAL; 393 return -EINVAL;
391 394
392 reg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port)); 395 mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port));
393 if (mode == NETXEN_NIU_PROMISC_MODE) 396 mac_cfg &= ~0x4;
394 reg = (reg | 0x2000UL); 397 NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg);
395 else
396 reg = (reg & ~0x2000UL);
397 398
398 if (mode == NETXEN_NIU_ALLMULTI_MODE) 399 if ((board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) ||
399 reg = (reg | 0x1000UL); 400 (board_type == NETXEN_BRDTYPE_P2_SB31_10G_HMEZ))
400 else 401 reg = (0x20 << port);
401 reg = (reg & ~0x1000UL); 402
403 NXWR32(adapter, NETXEN_NIU_FRAME_COUNT_SELECT, reg);
404
405 mdelay(10);
406
407 while (NXRD32(adapter, NETXEN_NIU_FRAME_COUNT) && ++cnt < 20)
408 mdelay(10);
409
410 if (cnt < 20) {
411
412 reg = NXRD32(adapter,
413 NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port));
414
415 if (mode == NETXEN_NIU_PROMISC_MODE)
416 reg = (reg | 0x2000UL);
417 else
418 reg = (reg & ~0x2000UL);
419
420 if (mode == NETXEN_NIU_ALLMULTI_MODE)
421 reg = (reg | 0x1000UL);
422 else
423 reg = (reg & ~0x1000UL);
424
425 NXWR32(adapter,
426 NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg);
427 }
402 428
403 NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg); 429 mac_cfg |= 0x4;
430 NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg);
404 431
405 return 0; 432 return 0;
406} 433}
@@ -436,7 +463,7 @@ netxen_nic_enable_mcast_filter(struct netxen_adapter *adapter)
436{ 463{
437 u32 val = 0; 464 u32 val = 0;
438 u16 port = adapter->physical_port; 465 u16 port = adapter->physical_port;
439 u8 *addr = adapter->netdev->dev_addr; 466 u8 *addr = adapter->mac_addr;
440 467
441 if (adapter->mc_enabled) 468 if (adapter->mc_enabled)
442 return 0; 469 return 0;
@@ -465,7 +492,7 @@ netxen_nic_disable_mcast_filter(struct netxen_adapter *adapter)
465{ 492{
466 u32 val = 0; 493 u32 val = 0;
467 u16 port = adapter->physical_port; 494 u16 port = adapter->physical_port;
468 u8 *addr = adapter->netdev->dev_addr; 495 u8 *addr = adapter->mac_addr;
469 496
470 if (!adapter->mc_enabled) 497 if (!adapter->mc_enabled)
471 return 0; 498 return 0;
@@ -660,7 +687,7 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
660 687
661 list_splice_tail_init(&adapter->mac_list, &del_list); 688 list_splice_tail_init(&adapter->mac_list, &del_list);
662 689
663 nx_p3_nic_add_mac(adapter, netdev->dev_addr, &del_list); 690 nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list);
664 nx_p3_nic_add_mac(adapter, bcast_addr, &del_list); 691 nx_p3_nic_add_mac(adapter, bcast_addr, &del_list);
665 692
666 if (netdev->flags & IFF_PROMISC) { 693 if (netdev->flags & IFF_PROMISC) {
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index e40b914d6faf..8a0904368e08 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -544,6 +544,8 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
544 continue; 544 continue;
545 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */ 545 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
546 continue; 546 continue;
547 if ((off & 0x0ff00000) == NETXEN_CRB_DDR_NET)
548 continue;
547 if (off == (NETXEN_CRB_PEG_NET_1 + 0x18)) 549 if (off == (NETXEN_CRB_PEG_NET_1 + 0x18))
548 buf[i].data = 0x1020; 550 buf[i].data = 0x1020;
549 /* skip the function enable register */ 551 /* skip the function enable register */
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 0b4a56a8c8d5..3bf78dbfbf0f 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -437,6 +437,7 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
437 netdev->dev_addr[i] = *(p + 5 - i); 437 netdev->dev_addr[i] = *(p + 5 - i);
438 438
439 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); 439 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
440 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
440 441
441 /* set station address */ 442 /* set station address */
442 443
@@ -459,6 +460,7 @@ int netxen_nic_set_mac(struct net_device *netdev, void *p)
459 netxen_napi_disable(adapter); 460 netxen_napi_disable(adapter);
460 } 461 }
461 462
463 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
462 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 464 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
463 adapter->macaddr_set(adapter, addr->sa_data); 465 adapter->macaddr_set(adapter, addr->sa_data);
464 466
@@ -956,7 +958,7 @@ netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
956 return err; 958 return err;
957 } 959 }
958 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) 960 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
959 adapter->macaddr_set(adapter, netdev->dev_addr); 961 adapter->macaddr_set(adapter, adapter->mac_addr);
960 962
961 adapter->set_multi(netdev); 963 adapter->set_multi(netdev);
962 adapter->set_mtu(adapter, netdev->mtu); 964 adapter->set_mtu(adapter, netdev->mtu);
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index b58965a2b3ae..17a27225cc98 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -118,14 +118,6 @@ INT_MODULE_PARM(full_duplex, 0);
118/* Autodetect link polarity reversal? */ 118/* Autodetect link polarity reversal? */
119INT_MODULE_PARM(auto_polarity, 1); 119INT_MODULE_PARM(auto_polarity, 1);
120 120
121#ifdef PCMCIA_DEBUG
122INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
123#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
124static char *version =
125"3c574_cs.c 1.65ac1 2003/04/07 Donald Becker/David Hinds, becker@scyld.com.\n";
126#else
127#define DEBUG(n, args...)
128#endif
129 121
130/*====================================================================*/ 122/*====================================================================*/
131 123
@@ -278,7 +270,7 @@ static int tc574_probe(struct pcmcia_device *link)
278 struct el3_private *lp; 270 struct el3_private *lp;
279 struct net_device *dev; 271 struct net_device *dev;
280 272
281 DEBUG(0, "3c574_attach()\n"); 273 dev_dbg(&link->dev, "3c574_attach()\n");
282 274
283 /* Create the PC card device object. */ 275 /* Create the PC card device object. */
284 dev = alloc_etherdev(sizeof(struct el3_private)); 276 dev = alloc_etherdev(sizeof(struct el3_private));
@@ -291,10 +283,8 @@ static int tc574_probe(struct pcmcia_device *link)
291 spin_lock_init(&lp->window_lock); 283 spin_lock_init(&lp->window_lock);
292 link->io.NumPorts1 = 32; 284 link->io.NumPorts1 = 32;
293 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; 285 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
294 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; 286 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
295 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
296 link->irq.Handler = &el3_interrupt; 287 link->irq.Handler = &el3_interrupt;
297 link->irq.Instance = dev;
298 link->conf.Attributes = CONF_ENABLE_IRQ; 288 link->conf.Attributes = CONF_ENABLE_IRQ;
299 link->conf.IntType = INT_MEMORY_AND_IO; 289 link->conf.IntType = INT_MEMORY_AND_IO;
300 link->conf.ConfigIndex = 1; 290 link->conf.ConfigIndex = 1;
@@ -319,7 +309,7 @@ static void tc574_detach(struct pcmcia_device *link)
319{ 309{
320 struct net_device *dev = link->priv; 310 struct net_device *dev = link->priv;
321 311
322 DEBUG(0, "3c574_detach(0x%p)\n", link); 312 dev_dbg(&link->dev, "3c574_detach()\n");
323 313
324 if (link->dev_node) 314 if (link->dev_node)
325 unregister_netdev(dev); 315 unregister_netdev(dev);
@@ -335,26 +325,23 @@ static void tc574_detach(struct pcmcia_device *link)
335 ethernet device available to the system. 325 ethernet device available to the system.
336*/ 326*/
337 327
338#define CS_CHECK(fn, ret) \
339 do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
340
341static const char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; 328static const char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
342 329
343static int tc574_config(struct pcmcia_device *link) 330static int tc574_config(struct pcmcia_device *link)
344{ 331{
345 struct net_device *dev = link->priv; 332 struct net_device *dev = link->priv;
346 struct el3_private *lp = netdev_priv(dev); 333 struct el3_private *lp = netdev_priv(dev);
347 tuple_t tuple; 334 int ret, i, j;
348 __le16 buf[32];
349 int last_fn, last_ret, i, j;
350 unsigned int ioaddr; 335 unsigned int ioaddr;
351 __be16 *phys_addr; 336 __be16 *phys_addr;
352 char *cardname; 337 char *cardname;
353 __u32 config; 338 __u32 config;
339 u8 *buf;
340 size_t len;
354 341
355 phys_addr = (__be16 *)dev->dev_addr; 342 phys_addr = (__be16 *)dev->dev_addr;
356 343
357 DEBUG(0, "3c574_config(0x%p)\n", link); 344 dev_dbg(&link->dev, "3c574_config()\n");
358 345
359 link->io.IOAddrLines = 16; 346 link->io.IOAddrLines = 16;
360 for (i = j = 0; j < 0x400; j += 0x20) { 347 for (i = j = 0; j < 0x400; j += 0x20) {
@@ -363,12 +350,16 @@ static int tc574_config(struct pcmcia_device *link)
363 if (i == 0) 350 if (i == 0)
364 break; 351 break;
365 } 352 }
366 if (i != 0) { 353 if (i != 0)
367 cs_error(link, RequestIO, i); 354 goto failed;
355
356 ret = pcmcia_request_irq(link, &link->irq);
357 if (ret)
358 goto failed;
359
360 ret = pcmcia_request_configuration(link, &link->conf);
361 if (ret)
368 goto failed; 362 goto failed;
369 }
370 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
371 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
372 363
373 dev->irq = link->irq.AssignedIRQ; 364 dev->irq = link->irq.AssignedIRQ;
374 dev->base_addr = link->io.BasePort1; 365 dev->base_addr = link->io.BasePort1;
@@ -378,16 +369,14 @@ static int tc574_config(struct pcmcia_device *link)
378 /* The 3c574 normally uses an EEPROM for configuration info, including 369 /* The 3c574 normally uses an EEPROM for configuration info, including
379 the hardware address. The future products may include a modem chip 370 the hardware address. The future products may include a modem chip
380 and put the address in the CIS. */ 371 and put the address in the CIS. */
381 tuple.Attributes = 0; 372
382 tuple.TupleData = (cisdata_t *)buf; 373 len = pcmcia_get_tuple(link, 0x88, &buf);
383 tuple.TupleDataMax = 64; 374 if (buf && len >= 6) {
384 tuple.TupleOffset = 0;
385 tuple.DesiredTuple = 0x88;
386 if (pcmcia_get_first_tuple(link, &tuple) == 0) {
387 pcmcia_get_tuple_data(link, &tuple);
388 for (i = 0; i < 3; i++) 375 for (i = 0; i < 3; i++)
389 phys_addr[i] = htons(le16_to_cpu(buf[i])); 376 phys_addr[i] = htons(le16_to_cpu(buf[i * 2]));
377 kfree(buf);
390 } else { 378 } else {
379 kfree(buf); /* 0 < len < 6 */
391 EL3WINDOW(0); 380 EL3WINDOW(0);
392 for (i = 0; i < 3; i++) 381 for (i = 0; i < 3; i++)
393 phys_addr[i] = htons(read_eeprom(ioaddr, i + 10)); 382 phys_addr[i] = htons(read_eeprom(ioaddr, i + 10));
@@ -435,7 +424,8 @@ static int tc574_config(struct pcmcia_device *link)
435 mii_status = mdio_read(ioaddr, phy & 0x1f, 1); 424 mii_status = mdio_read(ioaddr, phy & 0x1f, 1);
436 if (mii_status != 0xffff) { 425 if (mii_status != 0xffff) {
437 lp->phys = phy & 0x1f; 426 lp->phys = phy & 0x1f;
438 DEBUG(0, " MII transceiver at index %d, status %x.\n", 427 dev_dbg(&link->dev, " MII transceiver at "
428 "index %d, status %x.\n",
439 phy, mii_status); 429 phy, mii_status);
440 if ((mii_status & 0x0040) == 0) 430 if ((mii_status & 0x0040) == 0)
441 mii_preamble_required = 1; 431 mii_preamble_required = 1;
@@ -457,7 +447,7 @@ static int tc574_config(struct pcmcia_device *link)
457 } 447 }
458 448
459 link->dev_node = &lp->node; 449 link->dev_node = &lp->node;
460 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 450 SET_NETDEV_DEV(dev, &link->dev);
461 451
462 if (register_netdev(dev) != 0) { 452 if (register_netdev(dev) != 0) {
463 printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n"); 453 printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n");
@@ -478,8 +468,6 @@ static int tc574_config(struct pcmcia_device *link)
478 468
479 return 0; 469 return 0;
480 470
481cs_failed:
482 cs_error(link, last_fn, last_ret);
483failed: 471failed:
484 tc574_release(link); 472 tc574_release(link);
485 return -ENODEV; 473 return -ENODEV;
@@ -738,7 +726,7 @@ static int el3_open(struct net_device *dev)
738 lp->media.expires = jiffies + HZ; 726 lp->media.expires = jiffies + HZ;
739 add_timer(&lp->media); 727 add_timer(&lp->media);
740 728
741 DEBUG(2, "%s: opened, status %4.4x.\n", 729 dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
742 dev->name, inw(dev->base_addr + EL3_STATUS)); 730 dev->name, inw(dev->base_addr + EL3_STATUS));
743 731
744 return 0; 732 return 0;
@@ -772,7 +760,7 @@ static void pop_tx_status(struct net_device *dev)
772 if (tx_status & 0x30) 760 if (tx_status & 0x30)
773 tc574_wait_for_completion(dev, TxReset); 761 tc574_wait_for_completion(dev, TxReset);
774 if (tx_status & 0x38) { 762 if (tx_status & 0x38) {
775 DEBUG(1, "%s: transmit error: status 0x%02x\n", 763 pr_debug("%s: transmit error: status 0x%02x\n",
776 dev->name, tx_status); 764 dev->name, tx_status);
777 outw(TxEnable, ioaddr + EL3_CMD); 765 outw(TxEnable, ioaddr + EL3_CMD);
778 dev->stats.tx_aborted_errors++; 766 dev->stats.tx_aborted_errors++;
@@ -788,7 +776,7 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
788 struct el3_private *lp = netdev_priv(dev); 776 struct el3_private *lp = netdev_priv(dev);
789 unsigned long flags; 777 unsigned long flags;
790 778
791 DEBUG(3, "%s: el3_start_xmit(length = %ld) called, " 779 pr_debug("%s: el3_start_xmit(length = %ld) called, "
792 "status %4.4x.\n", dev->name, (long)skb->len, 780 "status %4.4x.\n", dev->name, (long)skb->len,
793 inw(ioaddr + EL3_STATUS)); 781 inw(ioaddr + EL3_STATUS));
794 782
@@ -827,7 +815,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
827 return IRQ_NONE; 815 return IRQ_NONE;
828 ioaddr = dev->base_addr; 816 ioaddr = dev->base_addr;
829 817
830 DEBUG(3, "%s: interrupt, status %4.4x.\n", 818 pr_debug("%s: interrupt, status %4.4x.\n",
831 dev->name, inw(ioaddr + EL3_STATUS)); 819 dev->name, inw(ioaddr + EL3_STATUS));
832 820
833 spin_lock(&lp->window_lock); 821 spin_lock(&lp->window_lock);
@@ -836,7 +824,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
836 (IntLatch | RxComplete | RxEarly | StatsFull)) { 824 (IntLatch | RxComplete | RxEarly | StatsFull)) {
837 if (!netif_device_present(dev) || 825 if (!netif_device_present(dev) ||
838 ((status & 0xe000) != 0x2000)) { 826 ((status & 0xe000) != 0x2000)) {
839 DEBUG(1, "%s: Interrupt from dead card\n", dev->name); 827 pr_debug("%s: Interrupt from dead card\n", dev->name);
840 break; 828 break;
841 } 829 }
842 830
@@ -846,7 +834,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
846 work_budget = el3_rx(dev, work_budget); 834 work_budget = el3_rx(dev, work_budget);
847 835
848 if (status & TxAvailable) { 836 if (status & TxAvailable) {
849 DEBUG(3, " TX room bit was handled.\n"); 837 pr_debug(" TX room bit was handled.\n");
850 /* There's room in the FIFO for a full-sized packet. */ 838 /* There's room in the FIFO for a full-sized packet. */
851 outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); 839 outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
852 netif_wake_queue(dev); 840 netif_wake_queue(dev);
@@ -886,7 +874,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
886 } 874 }
887 875
888 if (--work_budget < 0) { 876 if (--work_budget < 0) {
889 DEBUG(0, "%s: Too much work in interrupt, " 877 pr_debug("%s: Too much work in interrupt, "
890 "status %4.4x.\n", dev->name, status); 878 "status %4.4x.\n", dev->name, status);
891 /* Clear all interrupts */ 879 /* Clear all interrupts */
892 outw(AckIntr | 0xFF, ioaddr + EL3_CMD); 880 outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
@@ -896,7 +884,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
896 outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); 884 outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
897 } 885 }
898 886
899 DEBUG(3, "%s: exiting interrupt, status %4.4x.\n", 887 pr_debug("%s: exiting interrupt, status %4.4x.\n",
900 dev->name, inw(ioaddr + EL3_STATUS)); 888 dev->name, inw(ioaddr + EL3_STATUS));
901 889
902 spin_unlock(&lp->window_lock); 890 spin_unlock(&lp->window_lock);
@@ -1003,7 +991,7 @@ static void update_stats(struct net_device *dev)
1003 unsigned int ioaddr = dev->base_addr; 991 unsigned int ioaddr = dev->base_addr;
1004 u8 rx, tx, up; 992 u8 rx, tx, up;
1005 993
1006 DEBUG(2, "%s: updating the statistics.\n", dev->name); 994 pr_debug("%s: updating the statistics.\n", dev->name);
1007 995
1008 if (inw(ioaddr+EL3_STATUS) == 0xffff) /* No card. */ 996 if (inw(ioaddr+EL3_STATUS) == 0xffff) /* No card. */
1009 return; 997 return;
@@ -1039,7 +1027,7 @@ static int el3_rx(struct net_device *dev, int worklimit)
1039 unsigned int ioaddr = dev->base_addr; 1027 unsigned int ioaddr = dev->base_addr;
1040 short rx_status; 1028 short rx_status;
1041 1029
1042 DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n", 1030 pr_debug("%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
1043 dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus)); 1031 dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
1044 while (!((rx_status = inw(ioaddr + RxStatus)) & 0x8000) && 1032 while (!((rx_status = inw(ioaddr + RxStatus)) & 0x8000) &&
1045 worklimit > 0) { 1033 worklimit > 0) {
@@ -1061,7 +1049,7 @@ static int el3_rx(struct net_device *dev, int worklimit)
1061 1049
1062 skb = dev_alloc_skb(pkt_len+5); 1050 skb = dev_alloc_skb(pkt_len+5);
1063 1051
1064 DEBUG(3, " Receiving packet size %d status %4.4x.\n", 1052 pr_debug(" Receiving packet size %d status %4.4x.\n",
1065 pkt_len, rx_status); 1053 pkt_len, rx_status);
1066 if (skb != NULL) { 1054 if (skb != NULL) {
1067 skb_reserve(skb, 2); 1055 skb_reserve(skb, 2);
@@ -1072,7 +1060,7 @@ static int el3_rx(struct net_device *dev, int worklimit)
1072 dev->stats.rx_packets++; 1060 dev->stats.rx_packets++;
1073 dev->stats.rx_bytes += pkt_len; 1061 dev->stats.rx_bytes += pkt_len;
1074 } else { 1062 } else {
1075 DEBUG(1, "%s: couldn't allocate a sk_buff of" 1063 pr_debug("%s: couldn't allocate a sk_buff of"
1076 " size %d.\n", dev->name, pkt_len); 1064 " size %d.\n", dev->name, pkt_len);
1077 dev->stats.rx_dropped++; 1065 dev->stats.rx_dropped++;
1078 } 1066 }
@@ -1101,7 +1089,7 @@ static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1101 struct mii_ioctl_data *data = if_mii(rq); 1089 struct mii_ioctl_data *data = if_mii(rq);
1102 int phy = lp->phys & 0x1f; 1090 int phy = lp->phys & 0x1f;
1103 1091
1104 DEBUG(2, "%s: In ioct(%-.6s, %#4.4x) %4.4x %4.4x %4.4x %4.4x.\n", 1092 pr_debug("%s: In ioct(%-.6s, %#4.4x) %4.4x %4.4x %4.4x %4.4x.\n",
1105 dev->name, rq->ifr_ifrn.ifrn_name, cmd, 1093 dev->name, rq->ifr_ifrn.ifrn_name, cmd,
1106 data->phy_id, data->reg_num, data->val_in, data->val_out); 1094 data->phy_id, data->reg_num, data->val_in, data->val_out);
1107 1095
@@ -1178,7 +1166,7 @@ static int el3_close(struct net_device *dev)
1178 struct el3_private *lp = netdev_priv(dev); 1166 struct el3_private *lp = netdev_priv(dev);
1179 struct pcmcia_device *link = lp->p_dev; 1167 struct pcmcia_device *link = lp->p_dev;
1180 1168
1181 DEBUG(2, "%s: shutting down ethercard.\n", dev->name); 1169 dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
1182 1170
1183 if (pcmcia_dev_present(link)) { 1171 if (pcmcia_dev_present(link)) {
1184 unsigned long flags; 1172 unsigned long flags;
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 569fb06793cf..6f8d7e2e5922 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -130,14 +130,6 @@ MODULE_LICENSE("GPL");
130/* Special hook for setting if_port when module is loaded */ 130/* Special hook for setting if_port when module is loaded */
131INT_MODULE_PARM(if_port, 0); 131INT_MODULE_PARM(if_port, 0);
132 132
133#ifdef PCMCIA_DEBUG
134INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
135#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
136static char *version =
137DRV_NAME ".c " DRV_VERSION " 2001/10/13 00:08:50 (David Hinds)";
138#else
139#define DEBUG(n, args...)
140#endif
141 133
142/*====================================================================*/ 134/*====================================================================*/
143 135
@@ -189,7 +181,7 @@ static int tc589_probe(struct pcmcia_device *link)
189 struct el3_private *lp; 181 struct el3_private *lp;
190 struct net_device *dev; 182 struct net_device *dev;
191 183
192 DEBUG(0, "3c589_attach()\n"); 184 dev_dbg(&link->dev, "3c589_attach()\n");
193 185
194 /* Create new ethernet device */ 186 /* Create new ethernet device */
195 dev = alloc_etherdev(sizeof(struct el3_private)); 187 dev = alloc_etherdev(sizeof(struct el3_private));
@@ -202,10 +194,8 @@ static int tc589_probe(struct pcmcia_device *link)
202 spin_lock_init(&lp->lock); 194 spin_lock_init(&lp->lock);
203 link->io.NumPorts1 = 16; 195 link->io.NumPorts1 = 16;
204 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; 196 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
205 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; 197 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
206 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
207 link->irq.Handler = &el3_interrupt; 198 link->irq.Handler = &el3_interrupt;
208 link->irq.Instance = dev;
209 link->conf.Attributes = CONF_ENABLE_IRQ; 199 link->conf.Attributes = CONF_ENABLE_IRQ;
210 link->conf.IntType = INT_MEMORY_AND_IO; 200 link->conf.IntType = INT_MEMORY_AND_IO;
211 link->conf.ConfigIndex = 1; 201 link->conf.ConfigIndex = 1;
@@ -231,7 +221,7 @@ static void tc589_detach(struct pcmcia_device *link)
231{ 221{
232 struct net_device *dev = link->priv; 222 struct net_device *dev = link->priv;
233 223
234 DEBUG(0, "3c589_detach(0x%p)\n", link); 224 dev_dbg(&link->dev, "3c589_detach\n");
235 225
236 if (link->dev_node) 226 if (link->dev_node)
237 unregister_netdev(dev); 227 unregister_netdev(dev);
@@ -249,29 +239,20 @@ static void tc589_detach(struct pcmcia_device *link)
249 239
250======================================================================*/ 240======================================================================*/
251 241
252#define CS_CHECK(fn, ret) \
253do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
254
255static int tc589_config(struct pcmcia_device *link) 242static int tc589_config(struct pcmcia_device *link)
256{ 243{
257 struct net_device *dev = link->priv; 244 struct net_device *dev = link->priv;
258 struct el3_private *lp = netdev_priv(dev); 245 struct el3_private *lp = netdev_priv(dev);
259 tuple_t tuple;
260 __le16 buf[32];
261 __be16 *phys_addr; 246 __be16 *phys_addr;
262 int last_fn, last_ret, i, j, multi = 0, fifo; 247 int ret, i, j, multi = 0, fifo;
263 unsigned int ioaddr; 248 unsigned int ioaddr;
264 char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; 249 char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
250 u8 *buf;
251 size_t len;
265 252
266 DEBUG(0, "3c589_config(0x%p)\n", link); 253 dev_dbg(&link->dev, "3c589_config\n");
267 254
268 phys_addr = (__be16 *)dev->dev_addr; 255 phys_addr = (__be16 *)dev->dev_addr;
269 tuple.Attributes = 0;
270 tuple.TupleData = (cisdata_t *)buf;
271 tuple.TupleDataMax = sizeof(buf);
272 tuple.TupleOffset = 0;
273 tuple.Attributes = TUPLE_RETURN_COMMON;
274
275 /* Is this a 3c562? */ 256 /* Is this a 3c562? */
276 if (link->manf_id != MANFID_3COM) 257 if (link->manf_id != MANFID_3COM)
277 printk(KERN_INFO "3c589_cs: hmmm, is this really a " 258 printk(KERN_INFO "3c589_cs: hmmm, is this really a "
@@ -287,12 +268,16 @@ static int tc589_config(struct pcmcia_device *link)
287 if (i == 0) 268 if (i == 0)
288 break; 269 break;
289 } 270 }
290 if (i != 0) { 271 if (i != 0)
291 cs_error(link, RequestIO, i);
292 goto failed; 272 goto failed;
293 } 273
294 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 274 ret = pcmcia_request_irq(link, &link->irq);
295 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 275 if (ret)
276 goto failed;
277
278 ret = pcmcia_request_configuration(link, &link->conf);
279 if (ret)
280 goto failed;
296 281
297 dev->irq = link->irq.AssignedIRQ; 282 dev->irq = link->irq.AssignedIRQ;
298 dev->base_addr = link->io.BasePort1; 283 dev->base_addr = link->io.BasePort1;
@@ -301,12 +286,13 @@ static int tc589_config(struct pcmcia_device *link)
301 286
302 /* The 3c589 has an extra EEPROM for configuration info, including 287 /* The 3c589 has an extra EEPROM for configuration info, including
303 the hardware address. The 3c562 puts the address in the CIS. */ 288 the hardware address. The 3c562 puts the address in the CIS. */
304 tuple.DesiredTuple = 0x88; 289 len = pcmcia_get_tuple(link, 0x88, &buf);
305 if (pcmcia_get_first_tuple(link, &tuple) == 0) { 290 if (buf && len >= 6) {
306 pcmcia_get_tuple_data(link, &tuple); 291 for (i = 0; i < 3; i++)
307 for (i = 0; i < 3; i++) 292 phys_addr[i] = htons(le16_to_cpu(buf[i*2]));
308 phys_addr[i] = htons(le16_to_cpu(buf[i])); 293 kfree(buf);
309 } else { 294 } else {
295 kfree(buf); /* 0 < len < 6 */
310 for (i = 0; i < 3; i++) 296 for (i = 0; i < 3; i++)
311 phys_addr[i] = htons(read_eeprom(ioaddr, i)); 297 phys_addr[i] = htons(read_eeprom(ioaddr, i));
312 if (phys_addr[0] == htons(0x6060)) { 298 if (phys_addr[0] == htons(0x6060)) {
@@ -328,7 +314,7 @@ static int tc589_config(struct pcmcia_device *link)
328 printk(KERN_ERR "3c589_cs: invalid if_port requested\n"); 314 printk(KERN_ERR "3c589_cs: invalid if_port requested\n");
329 315
330 link->dev_node = &lp->node; 316 link->dev_node = &lp->node;
331 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 317 SET_NETDEV_DEV(dev, &link->dev);
332 318
333 if (register_netdev(dev) != 0) { 319 if (register_netdev(dev) != 0) {
334 printk(KERN_ERR "3c589_cs: register_netdev() failed\n"); 320 printk(KERN_ERR "3c589_cs: register_netdev() failed\n");
@@ -347,8 +333,6 @@ static int tc589_config(struct pcmcia_device *link)
347 if_names[dev->if_port]); 333 if_names[dev->if_port]);
348 return 0; 334 return 0;
349 335
350cs_failed:
351 cs_error(link, last_fn, last_ret);
352failed: 336failed:
353 tc589_release(link); 337 tc589_release(link);
354 return -ENODEV; 338 return -ENODEV;
@@ -511,24 +495,8 @@ static void netdev_get_drvinfo(struct net_device *dev,
511 sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); 495 sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
512} 496}
513 497
514#ifdef PCMCIA_DEBUG
515static u32 netdev_get_msglevel(struct net_device *dev)
516{
517 return pc_debug;
518}
519
520static void netdev_set_msglevel(struct net_device *dev, u32 level)
521{
522 pc_debug = level;
523}
524#endif /* PCMCIA_DEBUG */
525
526static const struct ethtool_ops netdev_ethtool_ops = { 498static const struct ethtool_ops netdev_ethtool_ops = {
527 .get_drvinfo = netdev_get_drvinfo, 499 .get_drvinfo = netdev_get_drvinfo,
528#ifdef PCMCIA_DEBUG
529 .get_msglevel = netdev_get_msglevel,
530 .set_msglevel = netdev_set_msglevel,
531#endif /* PCMCIA_DEBUG */
532}; 500};
533 501
534static int el3_config(struct net_device *dev, struct ifmap *map) 502static int el3_config(struct net_device *dev, struct ifmap *map)
@@ -563,7 +531,7 @@ static int el3_open(struct net_device *dev)
563 lp->media.expires = jiffies + HZ; 531 lp->media.expires = jiffies + HZ;
564 add_timer(&lp->media); 532 add_timer(&lp->media);
565 533
566 DEBUG(1, "%s: opened, status %4.4x.\n", 534 dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
567 dev->name, inw(dev->base_addr + EL3_STATUS)); 535 dev->name, inw(dev->base_addr + EL3_STATUS));
568 536
569 return 0; 537 return 0;
@@ -596,7 +564,7 @@ static void pop_tx_status(struct net_device *dev)
596 if (tx_status & 0x30) 564 if (tx_status & 0x30)
597 tc589_wait_for_completion(dev, TxReset); 565 tc589_wait_for_completion(dev, TxReset);
598 if (tx_status & 0x38) { 566 if (tx_status & 0x38) {
599 DEBUG(1, "%s: transmit error: status 0x%02x\n", 567 pr_debug("%s: transmit error: status 0x%02x\n",
600 dev->name, tx_status); 568 dev->name, tx_status);
601 outw(TxEnable, ioaddr + EL3_CMD); 569 outw(TxEnable, ioaddr + EL3_CMD);
602 dev->stats.tx_aborted_errors++; 570 dev->stats.tx_aborted_errors++;
@@ -612,7 +580,7 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
612 struct el3_private *priv = netdev_priv(dev); 580 struct el3_private *priv = netdev_priv(dev);
613 unsigned long flags; 581 unsigned long flags;
614 582
615 DEBUG(3, "%s: el3_start_xmit(length = %ld) called, " 583 pr_debug("%s: el3_start_xmit(length = %ld) called, "
616 "status %4.4x.\n", dev->name, (long)skb->len, 584 "status %4.4x.\n", dev->name, (long)skb->len,
617 inw(ioaddr + EL3_STATUS)); 585 inw(ioaddr + EL3_STATUS));
618 586
@@ -654,14 +622,14 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
654 622
655 ioaddr = dev->base_addr; 623 ioaddr = dev->base_addr;
656 624
657 DEBUG(3, "%s: interrupt, status %4.4x.\n", 625 pr_debug("%s: interrupt, status %4.4x.\n",
658 dev->name, inw(ioaddr + EL3_STATUS)); 626 dev->name, inw(ioaddr + EL3_STATUS));
659 627
660 spin_lock(&lp->lock); 628 spin_lock(&lp->lock);
661 while ((status = inw(ioaddr + EL3_STATUS)) & 629 while ((status = inw(ioaddr + EL3_STATUS)) &
662 (IntLatch | RxComplete | StatsFull)) { 630 (IntLatch | RxComplete | StatsFull)) {
663 if ((status & 0xe000) != 0x2000) { 631 if ((status & 0xe000) != 0x2000) {
664 DEBUG(1, "%s: interrupt from dead card\n", dev->name); 632 pr_debug("%s: interrupt from dead card\n", dev->name);
665 handled = 0; 633 handled = 0;
666 break; 634 break;
667 } 635 }
@@ -670,7 +638,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
670 el3_rx(dev); 638 el3_rx(dev);
671 639
672 if (status & TxAvailable) { 640 if (status & TxAvailable) {
673 DEBUG(3, " TX room bit was handled.\n"); 641 pr_debug(" TX room bit was handled.\n");
674 /* There's room in the FIFO for a full-sized packet. */ 642 /* There's room in the FIFO for a full-sized packet. */
675 outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); 643 outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
676 netif_wake_queue(dev); 644 netif_wake_queue(dev);
@@ -722,7 +690,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
722 690
723 lp->last_irq = jiffies; 691 lp->last_irq = jiffies;
724 spin_unlock(&lp->lock); 692 spin_unlock(&lp->lock);
725 DEBUG(3, "%s: exiting interrupt, status %4.4x.\n", 693 pr_debug("%s: exiting interrupt, status %4.4x.\n",
726 dev->name, inw(ioaddr + EL3_STATUS)); 694 dev->name, inw(ioaddr + EL3_STATUS));
727 return IRQ_RETVAL(handled); 695 return IRQ_RETVAL(handled);
728} 696}
@@ -833,7 +801,7 @@ static void update_stats(struct net_device *dev)
833{ 801{
834 unsigned int ioaddr = dev->base_addr; 802 unsigned int ioaddr = dev->base_addr;
835 803
836 DEBUG(2, "%s: updating the statistics.\n", dev->name); 804 pr_debug("%s: updating the statistics.\n", dev->name);
837 /* Turn off statistics updates while reading. */ 805 /* Turn off statistics updates while reading. */
838 outw(StatsDisable, ioaddr + EL3_CMD); 806 outw(StatsDisable, ioaddr + EL3_CMD);
839 /* Switch to the stats window, and read everything. */ 807 /* Switch to the stats window, and read everything. */
@@ -861,7 +829,7 @@ static int el3_rx(struct net_device *dev)
861 int worklimit = 32; 829 int worklimit = 32;
862 short rx_status; 830 short rx_status;
863 831
864 DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n", 832 pr_debug("%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
865 dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS)); 833 dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
866 while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) && 834 while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
867 worklimit > 0) { 835 worklimit > 0) {
@@ -883,7 +851,7 @@ static int el3_rx(struct net_device *dev)
883 851
884 skb = dev_alloc_skb(pkt_len+5); 852 skb = dev_alloc_skb(pkt_len+5);
885 853
886 DEBUG(3, " Receiving packet size %d status %4.4x.\n", 854 pr_debug(" Receiving packet size %d status %4.4x.\n",
887 pkt_len, rx_status); 855 pkt_len, rx_status);
888 if (skb != NULL) { 856 if (skb != NULL) {
889 skb_reserve(skb, 2); 857 skb_reserve(skb, 2);
@@ -894,7 +862,7 @@ static int el3_rx(struct net_device *dev)
894 dev->stats.rx_packets++; 862 dev->stats.rx_packets++;
895 dev->stats.rx_bytes += pkt_len; 863 dev->stats.rx_bytes += pkt_len;
896 } else { 864 } else {
897 DEBUG(1, "%s: couldn't allocate a sk_buff of" 865 pr_debug("%s: couldn't allocate a sk_buff of"
898 " size %d.\n", dev->name, pkt_len); 866 " size %d.\n", dev->name, pkt_len);
899 dev->stats.rx_dropped++; 867 dev->stats.rx_dropped++;
900 } 868 }
@@ -935,7 +903,7 @@ static int el3_close(struct net_device *dev)
935 struct pcmcia_device *link = lp->p_dev; 903 struct pcmcia_device *link = lp->p_dev;
936 unsigned int ioaddr = dev->base_addr; 904 unsigned int ioaddr = dev->base_addr;
937 905
938 DEBUG(1, "%s: shutting down ethercard.\n", dev->name); 906 dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
939 907
940 if (pcmcia_dev_present(link)) { 908 if (pcmcia_dev_present(link)) {
941 /* Turn off statistics ASAP. We update dev->stats below. */ 909 /* Turn off statistics ASAP. We update dev->stats below. */
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 3131a59a8d32..800597b82d18 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -75,16 +75,6 @@ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
75MODULE_DESCRIPTION("Asix AX88190 PCMCIA ethernet driver"); 75MODULE_DESCRIPTION("Asix AX88190 PCMCIA ethernet driver");
76MODULE_LICENSE("GPL"); 76MODULE_LICENSE("GPL");
77 77
78#ifdef PCMCIA_DEBUG
79#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
80
81INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
82#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
83static char *version =
84"axnet_cs.c 1.28 2002/06/29 06:27:37 (David Hinds)";
85#else
86#define DEBUG(n, args...)
87#endif
88 78
89/*====================================================================*/ 79/*====================================================================*/
90 80
@@ -167,7 +157,7 @@ static int axnet_probe(struct pcmcia_device *link)
167 struct net_device *dev; 157 struct net_device *dev;
168 struct ei_device *ei_local; 158 struct ei_device *ei_local;
169 159
170 DEBUG(0, "axnet_attach()\n"); 160 dev_dbg(&link->dev, "axnet_attach()\n");
171 161
172 dev = alloc_etherdev(sizeof(struct ei_device) + sizeof(axnet_dev_t)); 162 dev = alloc_etherdev(sizeof(struct ei_device) + sizeof(axnet_dev_t));
173 if (!dev) 163 if (!dev)
@@ -180,7 +170,6 @@ static int axnet_probe(struct pcmcia_device *link)
180 info->p_dev = link; 170 info->p_dev = link;
181 link->priv = dev; 171 link->priv = dev;
182 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 172 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
183 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
184 link->conf.Attributes = CONF_ENABLE_IRQ; 173 link->conf.Attributes = CONF_ENABLE_IRQ;
185 link->conf.IntType = INT_MEMORY_AND_IO; 174 link->conf.IntType = INT_MEMORY_AND_IO;
186 175
@@ -205,7 +194,7 @@ static void axnet_detach(struct pcmcia_device *link)
205{ 194{
206 struct net_device *dev = link->priv; 195 struct net_device *dev = link->priv;
207 196
208 DEBUG(0, "axnet_detach(0x%p)\n", link); 197 dev_dbg(&link->dev, "axnet_detach(0x%p)\n", link);
209 198
210 if (link->dev_node) 199 if (link->dev_node)
211 unregister_netdev(dev); 200 unregister_netdev(dev);
@@ -272,9 +261,6 @@ static int get_prom(struct pcmcia_device *link)
272 261
273======================================================================*/ 262======================================================================*/
274 263
275#define CS_CHECK(fn, ret) \
276do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
277
278static int try_io_port(struct pcmcia_device *link) 264static int try_io_port(struct pcmcia_device *link)
279{ 265{
280 int j, ret; 266 int j, ret;
@@ -341,26 +327,29 @@ static int axnet_config(struct pcmcia_device *link)
341{ 327{
342 struct net_device *dev = link->priv; 328 struct net_device *dev = link->priv;
343 axnet_dev_t *info = PRIV(dev); 329 axnet_dev_t *info = PRIV(dev);
344 int i, j, j2, last_ret, last_fn; 330 int i, j, j2, ret;
345 331
346 DEBUG(0, "axnet_config(0x%p)\n", link); 332 dev_dbg(&link->dev, "axnet_config(0x%p)\n", link);
347 333
348 /* don't trust the CIS on this; Linksys got it wrong */ 334 /* don't trust the CIS on this; Linksys got it wrong */
349 link->conf.Present = 0x63; 335 link->conf.Present = 0x63;
350 last_ret = pcmcia_loop_config(link, axnet_configcheck, NULL); 336 ret = pcmcia_loop_config(link, axnet_configcheck, NULL);
351 if (last_ret != 0) { 337 if (ret != 0)
352 cs_error(link, RequestIO, last_ret);
353 goto failed; 338 goto failed;
354 }
355 339
356 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 340 ret = pcmcia_request_irq(link, &link->irq);
341 if (ret)
342 goto failed;
357 343
358 if (link->io.NumPorts2 == 8) { 344 if (link->io.NumPorts2 == 8) {
359 link->conf.Attributes |= CONF_ENABLE_SPKR; 345 link->conf.Attributes |= CONF_ENABLE_SPKR;
360 link->conf.Status = CCSR_AUDIO_ENA; 346 link->conf.Status = CCSR_AUDIO_ENA;
361 } 347 }
362 348
363 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 349 ret = pcmcia_request_configuration(link, &link->conf);
350 if (ret)
351 goto failed;
352
364 dev->irq = link->irq.AssignedIRQ; 353 dev->irq = link->irq.AssignedIRQ;
365 dev->base_addr = link->io.BasePort1; 354 dev->base_addr = link->io.BasePort1;
366 355
@@ -410,7 +399,7 @@ static int axnet_config(struct pcmcia_device *link)
410 399
411 info->phy_id = (i < 32) ? i : -1; 400 info->phy_id = (i < 32) ? i : -1;
412 link->dev_node = &info->node; 401 link->dev_node = &info->node;
413 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 402 SET_NETDEV_DEV(dev, &link->dev);
414 403
415 if (register_netdev(dev) != 0) { 404 if (register_netdev(dev) != 0) {
416 printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n"); 405 printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n");
@@ -426,14 +415,12 @@ static int axnet_config(struct pcmcia_device *link)
426 dev->base_addr, dev->irq, 415 dev->base_addr, dev->irq,
427 dev->dev_addr); 416 dev->dev_addr);
428 if (info->phy_id != -1) { 417 if (info->phy_id != -1) {
429 DEBUG(0, " MII transceiver at index %d, status %x.\n", info->phy_id, j); 418 dev_dbg(&link->dev, " MII transceiver at index %d, status %x.\n", info->phy_id, j);
430 } else { 419 } else {
431 printk(KERN_NOTICE " No MII transceivers found!\n"); 420 printk(KERN_NOTICE " No MII transceivers found!\n");
432 } 421 }
433 return 0; 422 return 0;
434 423
435cs_failed:
436 cs_error(link, last_fn, last_ret);
437failed: 424failed:
438 axnet_release(link); 425 axnet_release(link);
439 return -ENODEV; 426 return -ENODEV;
@@ -543,7 +530,7 @@ static int axnet_open(struct net_device *dev)
543 struct pcmcia_device *link = info->p_dev; 530 struct pcmcia_device *link = info->p_dev;
544 unsigned int nic_base = dev->base_addr; 531 unsigned int nic_base = dev->base_addr;
545 532
546 DEBUG(2, "axnet_open('%s')\n", dev->name); 533 dev_dbg(&link->dev, "axnet_open('%s')\n", dev->name);
547 534
548 if (!pcmcia_dev_present(link)) 535 if (!pcmcia_dev_present(link))
549 return -ENODEV; 536 return -ENODEV;
@@ -572,7 +559,7 @@ static int axnet_close(struct net_device *dev)
572 axnet_dev_t *info = PRIV(dev); 559 axnet_dev_t *info = PRIV(dev);
573 struct pcmcia_device *link = info->p_dev; 560 struct pcmcia_device *link = info->p_dev;
574 561
575 DEBUG(2, "axnet_close('%s')\n", dev->name); 562 dev_dbg(&link->dev, "axnet_close('%s')\n", dev->name);
576 563
577 ax_close(dev); 564 ax_close(dev);
578 free_irq(dev->irq, dev); 565 free_irq(dev->irq, dev);
@@ -741,10 +728,8 @@ static void block_input(struct net_device *dev, int count,
741 int xfer_count = count; 728 int xfer_count = count;
742 char *buf = skb->data; 729 char *buf = skb->data;
743 730
744#ifdef PCMCIA_DEBUG
745 if ((ei_debug > 4) && (count != 4)) 731 if ((ei_debug > 4) && (count != 4))
746 printk(KERN_DEBUG "%s: [bi=%d]\n", dev->name, count+4); 732 pr_debug("%s: [bi=%d]\n", dev->name, count+4);
747#endif
748 outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO); 733 outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
749 outb_p(ring_offset >> 8, nic_base + EN0_RSARHI); 734 outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
750 outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD); 735 outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
@@ -762,10 +747,7 @@ static void block_output(struct net_device *dev, int count,
762{ 747{
763 unsigned int nic_base = dev->base_addr; 748 unsigned int nic_base = dev->base_addr;
764 749
765#ifdef PCMCIA_DEBUG 750 pr_debug("%s: [bo=%d]\n", dev->name, count);
766 if (ei_debug > 4)
767 printk(KERN_DEBUG "%s: [bo=%d]\n", dev->name, count);
768#endif
769 751
770 /* Round the count up for word writes. Do we need to do this? 752 /* Round the count up for word writes. Do we need to do this?
771 What effect will an odd byte count have on the 8390? 753 What effect will an odd byte count have on the 8390?
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
index 7b5c77b7bd27..21d9c9d815d1 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -53,11 +53,7 @@
53 53
54#define VERSION "arcnet: COM20020 PCMCIA support loaded.\n" 54#define VERSION "arcnet: COM20020 PCMCIA support loaded.\n"
55 55
56#ifdef PCMCIA_DEBUG 56#ifdef DEBUG
57
58static int pc_debug = PCMCIA_DEBUG;
59module_param(pc_debug, int, 0);
60#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
61 57
62static void regdump(struct net_device *dev) 58static void regdump(struct net_device *dev)
63{ 59{
@@ -92,7 +88,6 @@ static void regdump(struct net_device *dev)
92 88
93#else 89#else
94 90
95#define DEBUG(n, args...) do { } while (0)
96static inline void regdump(struct net_device *dev) { } 91static inline void regdump(struct net_device *dev) { }
97 92
98#endif 93#endif
@@ -144,7 +139,7 @@ static int com20020_probe(struct pcmcia_device *p_dev)
144 struct net_device *dev; 139 struct net_device *dev;
145 struct arcnet_local *lp; 140 struct arcnet_local *lp;
146 141
147 DEBUG(0, "com20020_attach()\n"); 142 dev_dbg(&p_dev->dev, "com20020_attach()\n");
148 143
149 /* Create new network device */ 144 /* Create new network device */
150 info = kzalloc(sizeof(struct com20020_dev_t), GFP_KERNEL); 145 info = kzalloc(sizeof(struct com20020_dev_t), GFP_KERNEL);
@@ -169,11 +164,10 @@ static int com20020_probe(struct pcmcia_device *p_dev)
169 p_dev->io.NumPorts1 = 16; 164 p_dev->io.NumPorts1 = 16;
170 p_dev->io.IOAddrLines = 16; 165 p_dev->io.IOAddrLines = 16;
171 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 166 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
172 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
173 p_dev->conf.Attributes = CONF_ENABLE_IRQ; 167 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
174 p_dev->conf.IntType = INT_MEMORY_AND_IO; 168 p_dev->conf.IntType = INT_MEMORY_AND_IO;
175 169
176 p_dev->irq.Instance = info->dev = dev; 170 info->dev = dev;
177 p_dev->priv = info; 171 p_dev->priv = info;
178 172
179 return com20020_config(p_dev); 173 return com20020_config(p_dev);
@@ -198,12 +192,12 @@ static void com20020_detach(struct pcmcia_device *link)
198 struct com20020_dev_t *info = link->priv; 192 struct com20020_dev_t *info = link->priv;
199 struct net_device *dev = info->dev; 193 struct net_device *dev = info->dev;
200 194
201 DEBUG(1,"detach...\n"); 195 dev_dbg(&link->dev, "detach...\n");
202 196
203 DEBUG(0, "com20020_detach(0x%p)\n", link); 197 dev_dbg(&link->dev, "com20020_detach\n");
204 198
205 if (link->dev_node) { 199 if (link->dev_node) {
206 DEBUG(1,"unregister...\n"); 200 dev_dbg(&link->dev, "unregister...\n");
207 201
208 unregister_netdev(dev); 202 unregister_netdev(dev);
209 203
@@ -218,16 +212,16 @@ static void com20020_detach(struct pcmcia_device *link)
218 com20020_release(link); 212 com20020_release(link);
219 213
220 /* Unlink device structure, free bits */ 214 /* Unlink device structure, free bits */
221 DEBUG(1,"unlinking...\n"); 215 dev_dbg(&link->dev, "unlinking...\n");
222 if (link->priv) 216 if (link->priv)
223 { 217 {
224 dev = info->dev; 218 dev = info->dev;
225 if (dev) 219 if (dev)
226 { 220 {
227 DEBUG(1,"kfree...\n"); 221 dev_dbg(&link->dev, "kfree...\n");
228 free_netdev(dev); 222 free_netdev(dev);
229 } 223 }
230 DEBUG(1,"kfree2...\n"); 224 dev_dbg(&link->dev, "kfree2...\n");
231 kfree(info); 225 kfree(info);
232 } 226 }
233 227
@@ -241,25 +235,22 @@ static void com20020_detach(struct pcmcia_device *link)
241 235
242======================================================================*/ 236======================================================================*/
243 237
244#define CS_CHECK(fn, ret) \
245do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
246
247static int com20020_config(struct pcmcia_device *link) 238static int com20020_config(struct pcmcia_device *link)
248{ 239{
249 struct arcnet_local *lp; 240 struct arcnet_local *lp;
250 com20020_dev_t *info; 241 com20020_dev_t *info;
251 struct net_device *dev; 242 struct net_device *dev;
252 int i, last_ret, last_fn; 243 int i, ret;
253 int ioaddr; 244 int ioaddr;
254 245
255 info = link->priv; 246 info = link->priv;
256 dev = info->dev; 247 dev = info->dev;
257 248
258 DEBUG(1,"config...\n"); 249 dev_dbg(&link->dev, "config...\n");
259 250
260 DEBUG(0, "com20020_config(0x%p)\n", link); 251 dev_dbg(&link->dev, "com20020_config\n");
261 252
262 DEBUG(1,"arcnet: baseport1 is %Xh\n", link->io.BasePort1); 253 dev_dbg(&link->dev, "baseport1 is %Xh\n", link->io.BasePort1);
263 i = -ENODEV; 254 i = -ENODEV;
264 if (!link->io.BasePort1) 255 if (!link->io.BasePort1)
265 { 256 {
@@ -276,26 +267,27 @@ static int com20020_config(struct pcmcia_device *link)
276 267
277 if (i != 0) 268 if (i != 0)
278 { 269 {
279 DEBUG(1,"arcnet: requestIO failed totally!\n"); 270 dev_dbg(&link->dev, "requestIO failed totally!\n");
280 goto failed; 271 goto failed;
281 } 272 }
282 273
283 ioaddr = dev->base_addr = link->io.BasePort1; 274 ioaddr = dev->base_addr = link->io.BasePort1;
284 DEBUG(1,"arcnet: got ioaddr %Xh\n", ioaddr); 275 dev_dbg(&link->dev, "got ioaddr %Xh\n", ioaddr);
285 276
286 DEBUG(1,"arcnet: request IRQ %d (%Xh/%Xh)\n", 277 dev_dbg(&link->dev, "request IRQ %d\n",
287 link->irq.AssignedIRQ, 278 link->irq.AssignedIRQ);
288 link->irq.IRQInfo1, link->irq.IRQInfo2);
289 i = pcmcia_request_irq(link, &link->irq); 279 i = pcmcia_request_irq(link, &link->irq);
290 if (i != 0) 280 if (i != 0)
291 { 281 {
292 DEBUG(1,"arcnet: requestIRQ failed totally!\n"); 282 dev_dbg(&link->dev, "requestIRQ failed totally!\n");
293 goto failed; 283 goto failed;
294 } 284 }
295 285
296 dev->irq = link->irq.AssignedIRQ; 286 dev->irq = link->irq.AssignedIRQ;
297 287
298 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 288 ret = pcmcia_request_configuration(link, &link->conf);
289 if (ret)
290 goto failed;
299 291
300 if (com20020_check(dev)) 292 if (com20020_check(dev))
301 { 293 {
@@ -308,26 +300,25 @@ static int com20020_config(struct pcmcia_device *link)
308 lp->card_flags = ARC_CAN_10MBIT; /* pretend all of them can 10Mbit */ 300 lp->card_flags = ARC_CAN_10MBIT; /* pretend all of them can 10Mbit */
309 301
310 link->dev_node = &info->node; 302 link->dev_node = &info->node;
311 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 303 SET_NETDEV_DEV(dev, &link->dev);
312 304
313 i = com20020_found(dev, 0); /* calls register_netdev */ 305 i = com20020_found(dev, 0); /* calls register_netdev */
314 306
315 if (i != 0) { 307 if (i != 0) {
316 DEBUG(1,KERN_NOTICE "com20020_cs: com20020_found() failed\n"); 308 dev_printk(KERN_NOTICE, &link->dev,
309 "com20020_cs: com20020_found() failed\n");
317 link->dev_node = NULL; 310 link->dev_node = NULL;
318 goto failed; 311 goto failed;
319 } 312 }
320 313
321 strcpy(info->node.dev_name, dev->name); 314 strcpy(info->node.dev_name, dev->name);
322 315
323 DEBUG(1,KERN_INFO "%s: port %#3lx, irq %d\n", 316 dev_dbg(&link->dev,KERN_INFO "%s: port %#3lx, irq %d\n",
324 dev->name, dev->base_addr, dev->irq); 317 dev->name, dev->base_addr, dev->irq);
325 return 0; 318 return 0;
326 319
327cs_failed:
328 cs_error(link, last_fn, last_ret);
329failed: 320failed:
330 DEBUG(1,"com20020_config failed...\n"); 321 dev_dbg(&link->dev, "com20020_config failed...\n");
331 com20020_release(link); 322 com20020_release(link);
332 return -ENODEV; 323 return -ENODEV;
333} /* com20020_config */ 324} /* com20020_config */
@@ -342,7 +333,7 @@ failed:
342 333
343static void com20020_release(struct pcmcia_device *link) 334static void com20020_release(struct pcmcia_device *link)
344{ 335{
345 DEBUG(0, "com20020_release(0x%p)\n", link); 336 dev_dbg(&link->dev, "com20020_release\n");
346 pcmcia_disable_device(link); 337 pcmcia_disable_device(link);
347} 338}
348 339
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 7e01fbdb87e0..6e3e1ced6db4 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -72,13 +72,6 @@ MODULE_LICENSE("GPL");
72/* 0:4KB*2 TX buffer else:8KB*2 TX buffer */ 72/* 0:4KB*2 TX buffer else:8KB*2 TX buffer */
73INT_MODULE_PARM(sram_config, 0); 73INT_MODULE_PARM(sram_config, 0);
74 74
75#ifdef PCMCIA_DEBUG
76INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
77#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
78static char *version = DRV_NAME ".c " DRV_VERSION " 2002/03/23";
79#else
80#define DEBUG(n, args...)
81#endif
82 75
83/*====================================================================*/ 76/*====================================================================*/
84/* 77/*
@@ -245,7 +238,7 @@ static int fmvj18x_probe(struct pcmcia_device *link)
245 local_info_t *lp; 238 local_info_t *lp;
246 struct net_device *dev; 239 struct net_device *dev;
247 240
248 DEBUG(0, "fmvj18x_attach()\n"); 241 dev_dbg(&link->dev, "fmvj18x_attach()\n");
249 242
250 /* Make up a FMVJ18x specific data structure */ 243 /* Make up a FMVJ18x specific data structure */
251 dev = alloc_etherdev(sizeof(local_info_t)); 244 dev = alloc_etherdev(sizeof(local_info_t));
@@ -262,10 +255,8 @@ static int fmvj18x_probe(struct pcmcia_device *link)
262 link->io.IOAddrLines = 5; 255 link->io.IOAddrLines = 5;
263 256
264 /* Interrupt setup */ 257 /* Interrupt setup */
265 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; 258 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
266 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
267 link->irq.Handler = &fjn_interrupt; 259 link->irq.Handler = &fjn_interrupt;
268 link->irq.Instance = dev;
269 260
270 /* General socket configuration */ 261 /* General socket configuration */
271 link->conf.Attributes = CONF_ENABLE_IRQ; 262 link->conf.Attributes = CONF_ENABLE_IRQ;
@@ -285,7 +276,7 @@ static void fmvj18x_detach(struct pcmcia_device *link)
285{ 276{
286 struct net_device *dev = link->priv; 277 struct net_device *dev = link->priv;
287 278
288 DEBUG(0, "fmvj18x_detach(0x%p)\n", link); 279 dev_dbg(&link->dev, "fmvj18x_detach\n");
289 280
290 if (link->dev_node) 281 if (link->dev_node)
291 unregister_netdev(dev); 282 unregister_netdev(dev);
@@ -297,9 +288,6 @@ static void fmvj18x_detach(struct pcmcia_device *link)
297 288
298/*====================================================================*/ 289/*====================================================================*/
299 290
300#define CS_CHECK(fn, ret) \
301do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
302
303static int mfc_try_io_port(struct pcmcia_device *link) 291static int mfc_try_io_port(struct pcmcia_device *link)
304{ 292{
305 int i, ret; 293 int i, ret;
@@ -341,33 +329,38 @@ static int ungermann_try_io_port(struct pcmcia_device *link)
341 return ret; /* RequestIO failed */ 329 return ret; /* RequestIO failed */
342} 330}
343 331
332static int fmvj18x_ioprobe(struct pcmcia_device *p_dev,
333 cistpl_cftable_entry_t *cfg,
334 cistpl_cftable_entry_t *dflt,
335 unsigned int vcc,
336 void *priv_data)
337{
338 return 0; /* strange, but that's what the code did already before... */
339}
340
344static int fmvj18x_config(struct pcmcia_device *link) 341static int fmvj18x_config(struct pcmcia_device *link)
345{ 342{
346 struct net_device *dev = link->priv; 343 struct net_device *dev = link->priv;
347 local_info_t *lp = netdev_priv(dev); 344 local_info_t *lp = netdev_priv(dev);
348 tuple_t tuple; 345 int i, ret;
349 cisparse_t parse;
350 u_short buf[32];
351 int i, last_fn = 0, last_ret = 0, ret;
352 unsigned int ioaddr; 346 unsigned int ioaddr;
353 cardtype_t cardtype; 347 cardtype_t cardtype;
354 char *card_name = "unknown"; 348 char *card_name = "unknown";
355 u_char *node_id; 349 u8 *buf;
350 size_t len;
351 u_char buggybuf[32];
352
353 dev_dbg(&link->dev, "fmvj18x_config\n");
356 354
357 DEBUG(0, "fmvj18x_config(0x%p)\n", link); 355 len = pcmcia_get_tuple(link, CISTPL_FUNCE, &buf);
356 kfree(buf);
358 357
359 tuple.TupleData = (u_char *)buf; 358 if (len) {
360 tuple.TupleDataMax = 64;
361 tuple.TupleOffset = 0;
362 tuple.DesiredTuple = CISTPL_FUNCE;
363 tuple.TupleOffset = 0;
364 if (pcmcia_get_first_tuple(link, &tuple) == 0) {
365 /* Yes, I have CISTPL_FUNCE. Let's check CISTPL_MANFID */ 359 /* Yes, I have CISTPL_FUNCE. Let's check CISTPL_MANFID */
366 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 360 ret = pcmcia_loop_config(link, fmvj18x_ioprobe, NULL);
367 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 361 if (ret != 0)
368 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple)); 362 goto failed;
369 CS_CHECK(ParseTuple, pcmcia_parse_tuple(&tuple, &parse)); 363
370 link->conf.ConfigIndex = parse.cftable_entry.index;
371 switch (link->manf_id) { 364 switch (link->manf_id) {
372 case MANFID_TDK: 365 case MANFID_TDK:
373 cardtype = TDK; 366 cardtype = TDK;
@@ -433,17 +426,24 @@ static int fmvj18x_config(struct pcmcia_device *link)
433 426
434 if (link->io.NumPorts2 != 0) { 427 if (link->io.NumPorts2 != 0) {
435 link->irq.Attributes = 428 link->irq.Attributes =
436 IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED|IRQ_HANDLE_PRESENT; 429 IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
437 ret = mfc_try_io_port(link); 430 ret = mfc_try_io_port(link);
438 if (ret != 0) goto cs_failed; 431 if (ret != 0) goto failed;
439 } else if (cardtype == UNGERMANN) { 432 } else if (cardtype == UNGERMANN) {
440 ret = ungermann_try_io_port(link); 433 ret = ungermann_try_io_port(link);
441 if (ret != 0) goto cs_failed; 434 if (ret != 0) goto failed;
442 } else { 435 } else {
443 CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io)); 436 ret = pcmcia_request_io(link, &link->io);
437 if (ret)
438 goto failed;
444 } 439 }
445 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 440 ret = pcmcia_request_irq(link, &link->irq);
446 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 441 if (ret)
442 goto failed;
443 ret = pcmcia_request_configuration(link, &link->conf);
444 if (ret)
445 goto failed;
446
447 dev->irq = link->irq.AssignedIRQ; 447 dev->irq = link->irq.AssignedIRQ;
448 dev->base_addr = link->io.BasePort1; 448 dev->base_addr = link->io.BasePort1;
449 449
@@ -474,21 +474,21 @@ static int fmvj18x_config(struct pcmcia_device *link)
474 case CONTEC: 474 case CONTEC:
475 case NEC: 475 case NEC:
476 case KME: 476 case KME:
477 tuple.DesiredTuple = CISTPL_FUNCE;
478 tuple.TupleOffset = 0;
479 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
480 tuple.TupleOffset = 0;
481 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple));
482 if (cardtype == MBH10304) { 477 if (cardtype == MBH10304) {
483 /* MBH10304's CIS_FUNCE is corrupted */
484 node_id = &(tuple.TupleData[5]);
485 card_name = "FMV-J182"; 478 card_name = "FMV-J182";
486 } else { 479
487 while (tuple.TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID ) { 480 len = pcmcia_get_tuple(link, CISTPL_FUNCE, &buf);
488 CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(link, &tuple)); 481 if (len < 11) {
489 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple)); 482 kfree(buf);
483 goto failed;
490 } 484 }
491 node_id = &(tuple.TupleData[2]); 485 /* Read MACID from CIS */
486 for (i = 5; i < 11; i++)
487 dev->dev_addr[i] = buf[i];
488 kfree(buf);
489 } else {
490 if (pcmcia_get_mac_from_cis(link, dev))
491 goto failed;
492 if( cardtype == TDK ) { 492 if( cardtype == TDK ) {
493 card_name = "TDK LAK-CD021"; 493 card_name = "TDK LAK-CD021";
494 } else if( cardtype == LA501 ) { 494 } else if( cardtype == LA501 ) {
@@ -501,9 +501,6 @@ static int fmvj18x_config(struct pcmcia_device *link)
501 card_name = "C-NET(PC)C"; 501 card_name = "C-NET(PC)C";
502 } 502 }
503 } 503 }
504 /* Read MACID from CIS */
505 for (i = 0; i < 6; i++)
506 dev->dev_addr[i] = node_id[i];
507 break; 504 break;
508 case UNGERMANN: 505 case UNGERMANN:
509 /* Read MACID from register */ 506 /* Read MACID from register */
@@ -513,12 +510,12 @@ static int fmvj18x_config(struct pcmcia_device *link)
513 break; 510 break;
514 case XXX10304: 511 case XXX10304:
515 /* Read MACID from Buggy CIS */ 512 /* Read MACID from Buggy CIS */
516 if (fmvj18x_get_hwinfo(link, tuple.TupleData) == -1) { 513 if (fmvj18x_get_hwinfo(link, buggybuf) == -1) {
517 printk(KERN_NOTICE "fmvj18x_cs: unable to read hardware net address.\n"); 514 printk(KERN_NOTICE "fmvj18x_cs: unable to read hardware net address.\n");
518 goto failed; 515 goto failed;
519 } 516 }
520 for (i = 0 ; i < 6; i++) { 517 for (i = 0 ; i < 6; i++) {
521 dev->dev_addr[i] = tuple.TupleData[i]; 518 dev->dev_addr[i] = buggybuf[i];
522 } 519 }
523 card_name = "FMV-J182"; 520 card_name = "FMV-J182";
524 break; 521 break;
@@ -533,7 +530,7 @@ static int fmvj18x_config(struct pcmcia_device *link)
533 530
534 lp->cardtype = cardtype; 531 lp->cardtype = cardtype;
535 link->dev_node = &lp->node; 532 link->dev_node = &lp->node;
536 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 533 SET_NETDEV_DEV(dev, &link->dev);
537 534
538 if (register_netdev(dev) != 0) { 535 if (register_netdev(dev) != 0) {
539 printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n"); 536 printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n");
@@ -551,9 +548,6 @@ static int fmvj18x_config(struct pcmcia_device *link)
551 548
552 return 0; 549 return 0;
553 550
554cs_failed:
555 /* All Card Services errors end up here */
556 cs_error(link, last_fn, last_ret);
557failed: 551failed:
558 fmvj18x_release(link); 552 fmvj18x_release(link);
559 return -ENODEV; 553 return -ENODEV;
@@ -571,16 +565,14 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
571 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; 565 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
572 req.Base = 0; req.Size = 0; 566 req.Base = 0; req.Size = 0;
573 req.AccessSpeed = 0; 567 req.AccessSpeed = 0;
574 i = pcmcia_request_window(&link, &req, &link->win); 568 i = pcmcia_request_window(link, &req, &link->win);
575 if (i != 0) { 569 if (i != 0)
576 cs_error(link, RequestWindow, i);
577 return -1; 570 return -1;
578 }
579 571
580 base = ioremap(req.Base, req.Size); 572 base = ioremap(req.Base, req.Size);
581 mem.Page = 0; 573 mem.Page = 0;
582 mem.CardOffset = 0; 574 mem.CardOffset = 0;
583 pcmcia_map_mem_page(link->win, &mem); 575 pcmcia_map_mem_page(link, link->win, &mem);
584 576
585 /* 577 /*
586 * MBH10304 CISTPL_FUNCE_LAN_NODE_ID format 578 * MBH10304 CISTPL_FUNCE_LAN_NODE_ID format
@@ -605,9 +597,7 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
605 } 597 }
606 598
607 iounmap(base); 599 iounmap(base);
608 j = pcmcia_release_window(link->win); 600 j = pcmcia_release_window(link, link->win);
609 if (j != 0)
610 cs_error(link, ReleaseWindow, j);
611 return (i != 0x200) ? 0 : -1; 601 return (i != 0x200) ? 0 : -1;
612 602
613} /* fmvj18x_get_hwinfo */ 603} /* fmvj18x_get_hwinfo */
@@ -626,11 +616,9 @@ static int fmvj18x_setup_mfc(struct pcmcia_device *link)
626 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; 616 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
627 req.Base = 0; req.Size = 0; 617 req.Base = 0; req.Size = 0;
628 req.AccessSpeed = 0; 618 req.AccessSpeed = 0;
629 i = pcmcia_request_window(&link, &req, &link->win); 619 i = pcmcia_request_window(link, &req, &link->win);
630 if (i != 0) { 620 if (i != 0)
631 cs_error(link, RequestWindow, i);
632 return -1; 621 return -1;
633 }
634 622
635 lp->base = ioremap(req.Base, req.Size); 623 lp->base = ioremap(req.Base, req.Size);
636 if (lp->base == NULL) { 624 if (lp->base == NULL) {
@@ -640,11 +628,10 @@ static int fmvj18x_setup_mfc(struct pcmcia_device *link)
640 628
641 mem.Page = 0; 629 mem.Page = 0;
642 mem.CardOffset = 0; 630 mem.CardOffset = 0;
643 i = pcmcia_map_mem_page(link->win, &mem); 631 i = pcmcia_map_mem_page(link, link->win, &mem);
644 if (i != 0) { 632 if (i != 0) {
645 iounmap(lp->base); 633 iounmap(lp->base);
646 lp->base = NULL; 634 lp->base = NULL;
647 cs_error(link, MapMemPage, i);
648 return -1; 635 return -1;
649 } 636 }
650 637
@@ -671,15 +658,13 @@ static void fmvj18x_release(struct pcmcia_device *link)
671 u_char __iomem *tmp; 658 u_char __iomem *tmp;
672 int j; 659 int j;
673 660
674 DEBUG(0, "fmvj18x_release(0x%p)\n", link); 661 dev_dbg(&link->dev, "fmvj18x_release\n");
675 662
676 if (lp->base != NULL) { 663 if (lp->base != NULL) {
677 tmp = lp->base; 664 tmp = lp->base;
678 lp->base = NULL; /* set NULL before iounmap */ 665 lp->base = NULL; /* set NULL before iounmap */
679 iounmap(tmp); 666 iounmap(tmp);
680 j = pcmcia_release_window(link->win); 667 j = pcmcia_release_window(link, link->win);
681 if (j != 0)
682 cs_error(link, ReleaseWindow, j);
683 } 668 }
684 669
685 pcmcia_disable_device(link); 670 pcmcia_disable_device(link);
@@ -788,8 +773,8 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id)
788 outb(tx_stat, ioaddr + TX_STATUS); 773 outb(tx_stat, ioaddr + TX_STATUS);
789 outb(rx_stat, ioaddr + RX_STATUS); 774 outb(rx_stat, ioaddr + RX_STATUS);
790 775
791 DEBUG(4, "%s: interrupt, rx_status %02x.\n", dev->name, rx_stat); 776 pr_debug("%s: interrupt, rx_status %02x.\n", dev->name, rx_stat);
792 DEBUG(4, " tx_status %02x.\n", tx_stat); 777 pr_debug(" tx_status %02x.\n", tx_stat);
793 778
794 if (rx_stat || (inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) { 779 if (rx_stat || (inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) {
795 /* there is packet(s) in rx buffer */ 780 /* there is packet(s) in rx buffer */
@@ -809,8 +794,8 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id)
809 } 794 }
810 netif_wake_queue(dev); 795 netif_wake_queue(dev);
811 } 796 }
812 DEBUG(4, "%s: exiting interrupt,\n", dev->name); 797 pr_debug("%s: exiting interrupt,\n", dev->name);
813 DEBUG(4, " tx_status %02x, rx_status %02x.\n", tx_stat, rx_stat); 798 pr_debug(" tx_status %02x, rx_status %02x.\n", tx_stat, rx_stat);
814 799
815 outb(D_TX_INTR, ioaddr + TX_INTR); 800 outb(D_TX_INTR, ioaddr + TX_INTR);
816 outb(D_RX_INTR, ioaddr + RX_INTR); 801 outb(D_RX_INTR, ioaddr + RX_INTR);
@@ -882,7 +867,7 @@ static netdev_tx_t fjn_start_xmit(struct sk_buff *skb,
882 return NETDEV_TX_BUSY; 867 return NETDEV_TX_BUSY;
883 } 868 }
884 869
885 DEBUG(4, "%s: Transmitting a packet of length %lu.\n", 870 pr_debug("%s: Transmitting a packet of length %lu.\n",
886 dev->name, (unsigned long)skb->len); 871 dev->name, (unsigned long)skb->len);
887 dev->stats.tx_bytes += skb->len; 872 dev->stats.tx_bytes += skb->len;
888 873
@@ -937,7 +922,7 @@ static void fjn_reset(struct net_device *dev)
937 unsigned int ioaddr = dev->base_addr; 922 unsigned int ioaddr = dev->base_addr;
938 int i; 923 int i;
939 924
940 DEBUG(4, "fjn_reset(%s) called.\n",dev->name); 925 pr_debug("fjn_reset(%s) called.\n",dev->name);
941 926
942 /* Reset controller */ 927 /* Reset controller */
943 if( sram_config == 0 ) 928 if( sram_config == 0 )
@@ -1015,13 +1000,13 @@ static void fjn_rx(struct net_device *dev)
1015 unsigned int ioaddr = dev->base_addr; 1000 unsigned int ioaddr = dev->base_addr;
1016 int boguscount = 10; /* 5 -> 10: by agy 19940922 */ 1001 int boguscount = 10; /* 5 -> 10: by agy 19940922 */
1017 1002
1018 DEBUG(4, "%s: in rx_packet(), rx_status %02x.\n", 1003 pr_debug("%s: in rx_packet(), rx_status %02x.\n",
1019 dev->name, inb(ioaddr + RX_STATUS)); 1004 dev->name, inb(ioaddr + RX_STATUS));
1020 1005
1021 while ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) { 1006 while ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) {
1022 u_short status = inw(ioaddr + DATAPORT); 1007 u_short status = inw(ioaddr + DATAPORT);
1023 1008
1024 DEBUG(4, "%s: Rxing packet mode %02x status %04x.\n", 1009 pr_debug("%s: Rxing packet mode %02x status %04x.\n",
1025 dev->name, inb(ioaddr + RX_MODE), status); 1010 dev->name, inb(ioaddr + RX_MODE), status);
1026#ifndef final_version 1011#ifndef final_version
1027 if (status == 0) { 1012 if (status == 0) {
@@ -1061,16 +1046,14 @@ static void fjn_rx(struct net_device *dev)
1061 (pkt_len + 1) >> 1); 1046 (pkt_len + 1) >> 1);
1062 skb->protocol = eth_type_trans(skb, dev); 1047 skb->protocol = eth_type_trans(skb, dev);
1063 1048
1064#ifdef PCMCIA_DEBUG 1049 {
1065 if (pc_debug > 5) {
1066 int i; 1050 int i;
1067 printk(KERN_DEBUG "%s: Rxed packet of length %d: ", 1051 pr_debug("%s: Rxed packet of length %d: ",
1068 dev->name, pkt_len); 1052 dev->name, pkt_len);
1069 for (i = 0; i < 14; i++) 1053 for (i = 0; i < 14; i++)
1070 printk(" %02x", skb->data[i]); 1054 pr_debug(" %02x", skb->data[i]);
1071 printk(".\n"); 1055 pr_debug(".\n");
1072 } 1056 }
1073#endif
1074 1057
1075 netif_rx(skb); 1058 netif_rx(skb);
1076 dev->stats.rx_packets++; 1059 dev->stats.rx_packets++;
@@ -1094,7 +1077,7 @@ static void fjn_rx(struct net_device *dev)
1094 } 1077 }
1095 1078
1096 if (i > 0) 1079 if (i > 0)
1097 DEBUG(5, "%s: Exint Rx packet with mode %02x after " 1080 pr_debug("%s: Exint Rx packet with mode %02x after "
1098 "%d ticks.\n", dev->name, inb(ioaddr + RX_MODE), i); 1081 "%d ticks.\n", dev->name, inb(ioaddr + RX_MODE), i);
1099 } 1082 }
1100*/ 1083*/
@@ -1112,24 +1095,8 @@ static void netdev_get_drvinfo(struct net_device *dev,
1112 sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); 1095 sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
1113} 1096}
1114 1097
1115#ifdef PCMCIA_DEBUG
1116static u32 netdev_get_msglevel(struct net_device *dev)
1117{
1118 return pc_debug;
1119}
1120
1121static void netdev_set_msglevel(struct net_device *dev, u32 level)
1122{
1123 pc_debug = level;
1124}
1125#endif /* PCMCIA_DEBUG */
1126
1127static const struct ethtool_ops netdev_ethtool_ops = { 1098static const struct ethtool_ops netdev_ethtool_ops = {
1128 .get_drvinfo = netdev_get_drvinfo, 1099 .get_drvinfo = netdev_get_drvinfo,
1129#ifdef PCMCIA_DEBUG
1130 .get_msglevel = netdev_get_msglevel,
1131 .set_msglevel = netdev_set_msglevel,
1132#endif /* PCMCIA_DEBUG */
1133}; 1100};
1134 1101
1135static int fjn_config(struct net_device *dev, struct ifmap *map){ 1102static int fjn_config(struct net_device *dev, struct ifmap *map){
@@ -1141,7 +1108,7 @@ static int fjn_open(struct net_device *dev)
1141 struct local_info_t *lp = netdev_priv(dev); 1108 struct local_info_t *lp = netdev_priv(dev);
1142 struct pcmcia_device *link = lp->p_dev; 1109 struct pcmcia_device *link = lp->p_dev;
1143 1110
1144 DEBUG(4, "fjn_open('%s').\n", dev->name); 1111 pr_debug("fjn_open('%s').\n", dev->name);
1145 1112
1146 if (!pcmcia_dev_present(link)) 1113 if (!pcmcia_dev_present(link))
1147 return -ENODEV; 1114 return -ENODEV;
@@ -1167,7 +1134,7 @@ static int fjn_close(struct net_device *dev)
1167 struct pcmcia_device *link = lp->p_dev; 1134 struct pcmcia_device *link = lp->p_dev;
1168 unsigned int ioaddr = dev->base_addr; 1135 unsigned int ioaddr = dev->base_addr;
1169 1136
1170 DEBUG(4, "fjn_close('%s').\n", dev->name); 1137 pr_debug("fjn_close('%s').\n", dev->name);
1171 1138
1172 lp->open_time = 0; 1139 lp->open_time = 0;
1173 netif_stop_queue(dev); 1140 netif_stop_queue(dev);
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index 06618af1a468..37f4a6fdc3ef 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -69,17 +69,6 @@
69#define PCMCIA 69#define PCMCIA
70#include "../tokenring/ibmtr.c" 70#include "../tokenring/ibmtr.c"
71 71
72#ifdef PCMCIA_DEBUG
73static int pc_debug = PCMCIA_DEBUG;
74module_param(pc_debug, int, 0);
75#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
76static char *version =
77"ibmtr_cs.c 1.10 1996/01/06 05:19:00 (Steve Kipisz)\n"
78" 2.2.7 1999/05/03 12:00:00 (Mike Phillips)\n"
79" 2.4.2 2001/30/28 Midnight (Burt Silverman)\n";
80#else
81#define DEBUG(n, args...)
82#endif
83 72
84/*====================================================================*/ 73/*====================================================================*/
85 74
@@ -130,6 +119,12 @@ static const struct ethtool_ops netdev_ethtool_ops = {
130 .get_drvinfo = netdev_get_drvinfo, 119 .get_drvinfo = netdev_get_drvinfo,
131}; 120};
132 121
122static irqreturn_t ibmtr_interrupt(int irq, void *dev_id) {
123 ibmtr_dev_t *info = dev_id;
124 struct net_device *dev = info->dev;
125 return tok_interrupt(irq, dev);
126};
127
133/*====================================================================== 128/*======================================================================
134 129
135 ibmtr_attach() creates an "instance" of the driver, allocating 130 ibmtr_attach() creates an "instance" of the driver, allocating
@@ -143,7 +138,7 @@ static int __devinit ibmtr_attach(struct pcmcia_device *link)
143 ibmtr_dev_t *info; 138 ibmtr_dev_t *info;
144 struct net_device *dev; 139 struct net_device *dev;
145 140
146 DEBUG(0, "ibmtr_attach()\n"); 141 dev_dbg(&link->dev, "ibmtr_attach()\n");
147 142
148 /* Create new token-ring device */ 143 /* Create new token-ring device */
149 info = kzalloc(sizeof(*info), GFP_KERNEL); 144 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -161,14 +156,13 @@ static int __devinit ibmtr_attach(struct pcmcia_device *link)
161 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 156 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
162 link->io.NumPorts1 = 4; 157 link->io.NumPorts1 = 4;
163 link->io.IOAddrLines = 16; 158 link->io.IOAddrLines = 16;
164 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 159 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
165 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 160 link->irq.Handler = ibmtr_interrupt;
166 link->irq.Handler = &tok_interrupt;
167 link->conf.Attributes = CONF_ENABLE_IRQ; 161 link->conf.Attributes = CONF_ENABLE_IRQ;
168 link->conf.IntType = INT_MEMORY_AND_IO; 162 link->conf.IntType = INT_MEMORY_AND_IO;
169 link->conf.Present = PRESENT_OPTION; 163 link->conf.Present = PRESENT_OPTION;
170 164
171 link->irq.Instance = info->dev = dev; 165 info->dev = dev;
172 166
173 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 167 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
174 168
@@ -190,7 +184,7 @@ static void ibmtr_detach(struct pcmcia_device *link)
190 struct net_device *dev = info->dev; 184 struct net_device *dev = info->dev;
191 struct tok_info *ti = netdev_priv(dev); 185 struct tok_info *ti = netdev_priv(dev);
192 186
193 DEBUG(0, "ibmtr_detach(0x%p)\n", link); 187 dev_dbg(&link->dev, "ibmtr_detach\n");
194 188
195 /* 189 /*
196 * When the card removal interrupt hits tok_interrupt(), 190 * When the card removal interrupt hits tok_interrupt(),
@@ -217,9 +211,6 @@ static void ibmtr_detach(struct pcmcia_device *link)
217 211
218======================================================================*/ 212======================================================================*/
219 213
220#define CS_CHECK(fn, ret) \
221do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
222
223static int __devinit ibmtr_config(struct pcmcia_device *link) 214static int __devinit ibmtr_config(struct pcmcia_device *link)
224{ 215{
225 ibmtr_dev_t *info = link->priv; 216 ibmtr_dev_t *info = link->priv;
@@ -227,9 +218,9 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
227 struct tok_info *ti = netdev_priv(dev); 218 struct tok_info *ti = netdev_priv(dev);
228 win_req_t req; 219 win_req_t req;
229 memreq_t mem; 220 memreq_t mem;
230 int i, last_ret, last_fn; 221 int i, ret;
231 222
232 DEBUG(0, "ibmtr_config(0x%p)\n", link); 223 dev_dbg(&link->dev, "ibmtr_config\n");
233 224
234 link->conf.ConfigIndex = 0x61; 225 link->conf.ConfigIndex = 0x61;
235 226
@@ -241,11 +232,15 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
241 if (i != 0) { 232 if (i != 0) {
242 /* Couldn't get 0xA20-0xA23. Try ALTERNATE at 0xA24-0xA27. */ 233 /* Couldn't get 0xA20-0xA23. Try ALTERNATE at 0xA24-0xA27. */
243 link->io.BasePort1 = 0xA24; 234 link->io.BasePort1 = 0xA24;
244 CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io)); 235 ret = pcmcia_request_io(link, &link->io);
236 if (ret)
237 goto failed;
245 } 238 }
246 dev->base_addr = link->io.BasePort1; 239 dev->base_addr = link->io.BasePort1;
247 240
248 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 241 ret = pcmcia_request_irq(link, &link->irq);
242 if (ret)
243 goto failed;
249 dev->irq = link->irq.AssignedIRQ; 244 dev->irq = link->irq.AssignedIRQ;
250 ti->irq = link->irq.AssignedIRQ; 245 ti->irq = link->irq.AssignedIRQ;
251 ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq); 246 ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq);
@@ -256,11 +251,15 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
256 req.Base = 0; 251 req.Base = 0;
257 req.Size = 0x2000; 252 req.Size = 0x2000;
258 req.AccessSpeed = 250; 253 req.AccessSpeed = 250;
259 CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &link->win)); 254 ret = pcmcia_request_window(link, &req, &link->win);
255 if (ret)
256 goto failed;
260 257
261 mem.CardOffset = mmiobase; 258 mem.CardOffset = mmiobase;
262 mem.Page = 0; 259 mem.Page = 0;
263 CS_CHECK(MapMemPage, pcmcia_map_mem_page(link->win, &mem)); 260 ret = pcmcia_map_mem_page(link, link->win, &mem);
261 if (ret)
262 goto failed;
264 ti->mmio = ioremap(req.Base, req.Size); 263 ti->mmio = ioremap(req.Base, req.Size);
265 264
266 /* Allocate the SRAM memory window */ 265 /* Allocate the SRAM memory window */
@@ -269,17 +268,23 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
269 req.Base = 0; 268 req.Base = 0;
270 req.Size = sramsize * 1024; 269 req.Size = sramsize * 1024;
271 req.AccessSpeed = 250; 270 req.AccessSpeed = 250;
272 CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &info->sram_win_handle)); 271 ret = pcmcia_request_window(link, &req, &info->sram_win_handle);
272 if (ret)
273 goto failed;
273 274
274 mem.CardOffset = srambase; 275 mem.CardOffset = srambase;
275 mem.Page = 0; 276 mem.Page = 0;
276 CS_CHECK(MapMemPage, pcmcia_map_mem_page(info->sram_win_handle, &mem)); 277 ret = pcmcia_map_mem_page(link, info->sram_win_handle, &mem);
278 if (ret)
279 goto failed;
277 280
278 ti->sram_base = mem.CardOffset >> 12; 281 ti->sram_base = mem.CardOffset >> 12;
279 ti->sram_virt = ioremap(req.Base, req.Size); 282 ti->sram_virt = ioremap(req.Base, req.Size);
280 ti->sram_phys = req.Base; 283 ti->sram_phys = req.Base;
281 284
282 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 285 ret = pcmcia_request_configuration(link, &link->conf);
286 if (ret)
287 goto failed;
283 288
284 /* Set up the Token-Ring Controller Configuration Register and 289 /* Set up the Token-Ring Controller Configuration Register and
285 turn on the card. Check the "Local Area Network Credit Card 290 turn on the card. Check the "Local Area Network Credit Card
@@ -287,7 +292,7 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
287 ibmtr_hw_setup(dev, mmiobase); 292 ibmtr_hw_setup(dev, mmiobase);
288 293
289 link->dev_node = &info->node; 294 link->dev_node = &info->node;
290 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 295 SET_NETDEV_DEV(dev, &link->dev);
291 296
292 i = ibmtr_probe_card(dev); 297 i = ibmtr_probe_card(dev);
293 if (i != 0) { 298 if (i != 0) {
@@ -305,8 +310,6 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
305 dev->dev_addr); 310 dev->dev_addr);
306 return 0; 311 return 0;
307 312
308cs_failed:
309 cs_error(link, last_fn, last_ret);
310failed: 313failed:
311 ibmtr_release(link); 314 ibmtr_release(link);
312 return -ENODEV; 315 return -ENODEV;
@@ -325,12 +328,12 @@ static void ibmtr_release(struct pcmcia_device *link)
325 ibmtr_dev_t *info = link->priv; 328 ibmtr_dev_t *info = link->priv;
326 struct net_device *dev = info->dev; 329 struct net_device *dev = info->dev;
327 330
328 DEBUG(0, "ibmtr_release(0x%p)\n", link); 331 dev_dbg(&link->dev, "ibmtr_release\n");
329 332
330 if (link->win) { 333 if (link->win) {
331 struct tok_info *ti = netdev_priv(dev); 334 struct tok_info *ti = netdev_priv(dev);
332 iounmap(ti->mmio); 335 iounmap(ti->mmio);
333 pcmcia_release_window(info->sram_win_handle); 336 pcmcia_release_window(link, info->sram_win_handle);
334 } 337 }
335 pcmcia_disable_device(link); 338 pcmcia_disable_device(link);
336} 339}
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 5ed6339c52bc..dae5ef6b2609 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -381,13 +381,6 @@ typedef struct _mace_private {
381Private Global Variables 381Private Global Variables
382---------------------------------------------------------------------------- */ 382---------------------------------------------------------------------------- */
383 383
384#ifdef PCMCIA_DEBUG
385static char rcsid[] =
386"nmclan_cs.c,v 0.16 1995/07/01 06:42:17 rpao Exp rpao";
387static char *version =
388DRV_NAME " " DRV_VERSION " (Roger C. Pao)";
389#endif
390
391static const char *if_names[]={ 384static const char *if_names[]={
392 "Auto", "10baseT", "BNC", 385 "Auto", "10baseT", "BNC",
393}; 386};
@@ -406,12 +399,6 @@ MODULE_LICENSE("GPL");
406/* 0=auto, 1=10baseT, 2 = 10base2, default=auto */ 399/* 0=auto, 1=10baseT, 2 = 10base2, default=auto */
407INT_MODULE_PARM(if_port, 0); 400INT_MODULE_PARM(if_port, 0);
408 401
409#ifdef PCMCIA_DEBUG
410INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
411#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
412#else
413#define DEBUG(n, args...)
414#endif
415 402
416/* ---------------------------------------------------------------------------- 403/* ----------------------------------------------------------------------------
417Function Prototypes 404Function Prototypes
@@ -462,8 +449,7 @@ static int nmclan_probe(struct pcmcia_device *link)
462 mace_private *lp; 449 mace_private *lp;
463 struct net_device *dev; 450 struct net_device *dev;
464 451
465 DEBUG(0, "nmclan_attach()\n"); 452 dev_dbg(&link->dev, "nmclan_attach()\n");
466 DEBUG(1, "%s\n", rcsid);
467 453
468 /* Create new ethernet device */ 454 /* Create new ethernet device */
469 dev = alloc_etherdev(sizeof(mace_private)); 455 dev = alloc_etherdev(sizeof(mace_private));
@@ -477,10 +463,8 @@ static int nmclan_probe(struct pcmcia_device *link)
477 link->io.NumPorts1 = 32; 463 link->io.NumPorts1 = 32;
478 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 464 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
479 link->io.IOAddrLines = 5; 465 link->io.IOAddrLines = 5;
480 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 466 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
481 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
482 link->irq.Handler = &mace_interrupt; 467 link->irq.Handler = &mace_interrupt;
483 link->irq.Instance = dev;
484 link->conf.Attributes = CONF_ENABLE_IRQ; 468 link->conf.Attributes = CONF_ENABLE_IRQ;
485 link->conf.IntType = INT_MEMORY_AND_IO; 469 link->conf.IntType = INT_MEMORY_AND_IO;
486 link->conf.ConfigIndex = 1; 470 link->conf.ConfigIndex = 1;
@@ -507,7 +491,7 @@ static void nmclan_detach(struct pcmcia_device *link)
507{ 491{
508 struct net_device *dev = link->priv; 492 struct net_device *dev = link->priv;
509 493
510 DEBUG(0, "nmclan_detach(0x%p)\n", link); 494 dev_dbg(&link->dev, "nmclan_detach\n");
511 495
512 if (link->dev_node) 496 if (link->dev_node)
513 unregister_netdev(dev); 497 unregister_netdev(dev);
@@ -654,37 +638,40 @@ nmclan_config
654 ethernet device available to the system. 638 ethernet device available to the system.
655---------------------------------------------------------------------------- */ 639---------------------------------------------------------------------------- */
656 640
657#define CS_CHECK(fn, ret) \
658 do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
659
660static int nmclan_config(struct pcmcia_device *link) 641static int nmclan_config(struct pcmcia_device *link)
661{ 642{
662 struct net_device *dev = link->priv; 643 struct net_device *dev = link->priv;
663 mace_private *lp = netdev_priv(dev); 644 mace_private *lp = netdev_priv(dev);
664 tuple_t tuple; 645 u8 *buf;
665 u_char buf[64]; 646 size_t len;
666 int i, last_ret, last_fn; 647 int i, ret;
667 unsigned int ioaddr; 648 unsigned int ioaddr;
668 649
669 DEBUG(0, "nmclan_config(0x%p)\n", link); 650 dev_dbg(&link->dev, "nmclan_config\n");
651
652 ret = pcmcia_request_io(link, &link->io);
653 if (ret)
654 goto failed;
655 ret = pcmcia_request_irq(link, &link->irq);
656 if (ret)
657 goto failed;
658 ret = pcmcia_request_configuration(link, &link->conf);
659 if (ret)
660 goto failed;
670 661
671 CS_CHECK(RequestIO, pcmcia_request_io(link, &link->io));
672 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
673 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
674 dev->irq = link->irq.AssignedIRQ; 662 dev->irq = link->irq.AssignedIRQ;
675 dev->base_addr = link->io.BasePort1; 663 dev->base_addr = link->io.BasePort1;
676 664
677 ioaddr = dev->base_addr; 665 ioaddr = dev->base_addr;
678 666
679 /* Read the ethernet address from the CIS. */ 667 /* Read the ethernet address from the CIS. */
680 tuple.DesiredTuple = 0x80 /* CISTPL_CFTABLE_ENTRY_MISC */; 668 len = pcmcia_get_tuple(link, 0x80, &buf);
681 tuple.TupleData = buf; 669 if (!buf || len < ETHER_ADDR_LEN) {
682 tuple.TupleDataMax = 64; 670 kfree(buf);
683 tuple.TupleOffset = 0; 671 goto failed;
684 tuple.Attributes = 0; 672 }
685 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 673 memcpy(dev->dev_addr, buf, ETHER_ADDR_LEN);
686 CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link, &tuple)); 674 kfree(buf);
687 memcpy(dev->dev_addr, tuple.TupleData, ETHER_ADDR_LEN);
688 675
689 /* Verify configuration by reading the MACE ID. */ 676 /* Verify configuration by reading the MACE ID. */
690 { 677 {
@@ -693,7 +680,7 @@ static int nmclan_config(struct pcmcia_device *link)
693 sig[0] = mace_read(lp, ioaddr, MACE_CHIPIDL); 680 sig[0] = mace_read(lp, ioaddr, MACE_CHIPIDL);
694 sig[1] = mace_read(lp, ioaddr, MACE_CHIPIDH); 681 sig[1] = mace_read(lp, ioaddr, MACE_CHIPIDH);
695 if ((sig[0] == 0x40) && ((sig[1] & 0x0F) == 0x09)) { 682 if ((sig[0] == 0x40) && ((sig[1] & 0x0F) == 0x09)) {
696 DEBUG(0, "nmclan_cs configured: mace id=%x %x\n", 683 dev_dbg(&link->dev, "nmclan_cs configured: mace id=%x %x\n",
697 sig[0], sig[1]); 684 sig[0], sig[1]);
698 } else { 685 } else {
699 printk(KERN_NOTICE "nmclan_cs: mace id not found: %x %x should" 686 printk(KERN_NOTICE "nmclan_cs: mace id not found: %x %x should"
@@ -712,7 +699,7 @@ static int nmclan_config(struct pcmcia_device *link)
712 printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n"); 699 printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n");
713 700
714 link->dev_node = &lp->node; 701 link->dev_node = &lp->node;
715 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 702 SET_NETDEV_DEV(dev, &link->dev);
716 703
717 i = register_netdev(dev); 704 i = register_netdev(dev);
718 if (i != 0) { 705 if (i != 0) {
@@ -729,8 +716,6 @@ static int nmclan_config(struct pcmcia_device *link)
729 dev->dev_addr); 716 dev->dev_addr);
730 return 0; 717 return 0;
731 718
732cs_failed:
733 cs_error(link, last_fn, last_ret);
734failed: 719failed:
735 nmclan_release(link); 720 nmclan_release(link);
736 return -ENODEV; 721 return -ENODEV;
@@ -744,7 +729,7 @@ nmclan_release
744---------------------------------------------------------------------------- */ 729---------------------------------------------------------------------------- */
745static void nmclan_release(struct pcmcia_device *link) 730static void nmclan_release(struct pcmcia_device *link)
746{ 731{
747 DEBUG(0, "nmclan_release(0x%p)\n", link); 732 dev_dbg(&link->dev, "nmclan_release\n");
748 pcmcia_disable_device(link); 733 pcmcia_disable_device(link);
749} 734}
750 735
@@ -795,7 +780,7 @@ static void nmclan_reset(struct net_device *dev)
795 /* Reset Xilinx */ 780 /* Reset Xilinx */
796 reg.Action = CS_WRITE; 781 reg.Action = CS_WRITE;
797 reg.Offset = CISREG_COR; 782 reg.Offset = CISREG_COR;
798 DEBUG(1, "nmclan_reset: OrigCorValue=0x%lX, resetting...\n", 783 dev_dbg(&link->dev, "nmclan_reset: OrigCorValue=0x%lX, resetting...\n",
799 OrigCorValue); 784 OrigCorValue);
800 reg.Value = COR_SOFT_RESET; 785 reg.Value = COR_SOFT_RESET;
801 pcmcia_access_configuration_register(link, &reg); 786 pcmcia_access_configuration_register(link, &reg);
@@ -872,7 +857,7 @@ static int mace_close(struct net_device *dev)
872 mace_private *lp = netdev_priv(dev); 857 mace_private *lp = netdev_priv(dev);
873 struct pcmcia_device *link = lp->p_dev; 858 struct pcmcia_device *link = lp->p_dev;
874 859
875 DEBUG(2, "%s: shutting down ethercard.\n", dev->name); 860 dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
876 861
877 /* Mask off all interrupts from the MACE chip. */ 862 /* Mask off all interrupts from the MACE chip. */
878 outb(0xFF, ioaddr + AM2150_MACE_BASE + MACE_IMR); 863 outb(0xFF, ioaddr + AM2150_MACE_BASE + MACE_IMR);
@@ -891,24 +876,8 @@ static void netdev_get_drvinfo(struct net_device *dev,
891 sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); 876 sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
892} 877}
893 878
894#ifdef PCMCIA_DEBUG
895static u32 netdev_get_msglevel(struct net_device *dev)
896{
897 return pc_debug;
898}
899
900static void netdev_set_msglevel(struct net_device *dev, u32 level)
901{
902 pc_debug = level;
903}
904#endif /* PCMCIA_DEBUG */
905
906static const struct ethtool_ops netdev_ethtool_ops = { 879static const struct ethtool_ops netdev_ethtool_ops = {
907 .get_drvinfo = netdev_get_drvinfo, 880 .get_drvinfo = netdev_get_drvinfo,
908#ifdef PCMCIA_DEBUG
909 .get_msglevel = netdev_get_msglevel,
910 .set_msglevel = netdev_set_msglevel,
911#endif /* PCMCIA_DEBUG */
912}; 881};
913 882
914/* ---------------------------------------------------------------------------- 883/* ----------------------------------------------------------------------------
@@ -946,7 +915,7 @@ static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
946 915
947 netif_stop_queue(dev); 916 netif_stop_queue(dev);
948 917
949 DEBUG(3, "%s: mace_start_xmit(length = %ld) called.\n", 918 pr_debug("%s: mace_start_xmit(length = %ld) called.\n",
950 dev->name, (long)skb->len); 919 dev->name, (long)skb->len);
951 920
952#if (!TX_INTERRUPTABLE) 921#if (!TX_INTERRUPTABLE)
@@ -1008,7 +977,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
1008 int IntrCnt = MACE_MAX_IR_ITERATIONS; 977 int IntrCnt = MACE_MAX_IR_ITERATIONS;
1009 978
1010 if (dev == NULL) { 979 if (dev == NULL) {
1011 DEBUG(2, "mace_interrupt(): irq 0x%X for unknown device.\n", 980 pr_debug("mace_interrupt(): irq 0x%X for unknown device.\n",
1012 irq); 981 irq);
1013 return IRQ_NONE; 982 return IRQ_NONE;
1014 } 983 }
@@ -1031,7 +1000,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
1031 } 1000 }
1032 1001
1033 if (!netif_device_present(dev)) { 1002 if (!netif_device_present(dev)) {
1034 DEBUG(2, "%s: interrupt from dead card\n", dev->name); 1003 pr_debug("%s: interrupt from dead card\n", dev->name);
1035 return IRQ_NONE; 1004 return IRQ_NONE;
1036 } 1005 }
1037 1006
@@ -1039,7 +1008,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
1039 /* WARNING: MACE_IR is a READ/CLEAR port! */ 1008 /* WARNING: MACE_IR is a READ/CLEAR port! */
1040 status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR); 1009 status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR);
1041 1010
1042 DEBUG(3, "mace_interrupt: irq 0x%X status 0x%X.\n", irq, status); 1011 pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status);
1043 1012
1044 if (status & MACE_IR_RCVINT) { 1013 if (status & MACE_IR_RCVINT) {
1045 mace_rx(dev, MACE_MAX_RX_ITERATIONS); 1014 mace_rx(dev, MACE_MAX_RX_ITERATIONS);
@@ -1158,7 +1127,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
1158 ) { 1127 ) {
1159 rx_status = inw(ioaddr + AM2150_RCV); 1128 rx_status = inw(ioaddr + AM2150_RCV);
1160 1129
1161 DEBUG(3, "%s: in mace_rx(), framecnt 0x%X, rx_status" 1130 pr_debug("%s: in mace_rx(), framecnt 0x%X, rx_status"
1162 " 0x%X.\n", dev->name, rx_framecnt, rx_status); 1131 " 0x%X.\n", dev->name, rx_framecnt, rx_status);
1163 1132
1164 if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */ 1133 if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */
@@ -1185,7 +1154,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
1185 lp->mace_stats.rfs_rcvcc += inb(ioaddr + AM2150_RCV); 1154 lp->mace_stats.rfs_rcvcc += inb(ioaddr + AM2150_RCV);
1186 /* rcv collision count */ 1155 /* rcv collision count */
1187 1156
1188 DEBUG(3, " receiving packet size 0x%X rx_status" 1157 pr_debug(" receiving packet size 0x%X rx_status"
1189 " 0x%X.\n", pkt_len, rx_status); 1158 " 0x%X.\n", pkt_len, rx_status);
1190 1159
1191 skb = dev_alloc_skb(pkt_len+2); 1160 skb = dev_alloc_skb(pkt_len+2);
@@ -1204,7 +1173,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
1204 outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */ 1173 outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
1205 continue; 1174 continue;
1206 } else { 1175 } else {
1207 DEBUG(1, "%s: couldn't allocate a sk_buff of size" 1176 pr_debug("%s: couldn't allocate a sk_buff of size"
1208 " %d.\n", dev->name, pkt_len); 1177 " %d.\n", dev->name, pkt_len);
1209 lp->linux_stats.rx_dropped++; 1178 lp->linux_stats.rx_dropped++;
1210 } 1179 }
@@ -1220,28 +1189,28 @@ pr_linux_stats
1220---------------------------------------------------------------------------- */ 1189---------------------------------------------------------------------------- */
1221static void pr_linux_stats(struct net_device_stats *pstats) 1190static void pr_linux_stats(struct net_device_stats *pstats)
1222{ 1191{
1223 DEBUG(2, "pr_linux_stats\n"); 1192 pr_debug("pr_linux_stats\n");
1224 DEBUG(2, " rx_packets=%-7ld tx_packets=%ld\n", 1193 pr_debug(" rx_packets=%-7ld tx_packets=%ld\n",
1225 (long)pstats->rx_packets, (long)pstats->tx_packets); 1194 (long)pstats->rx_packets, (long)pstats->tx_packets);
1226 DEBUG(2, " rx_errors=%-7ld tx_errors=%ld\n", 1195 pr_debug(" rx_errors=%-7ld tx_errors=%ld\n",
1227 (long)pstats->rx_errors, (long)pstats->tx_errors); 1196 (long)pstats->rx_errors, (long)pstats->tx_errors);
1228 DEBUG(2, " rx_dropped=%-7ld tx_dropped=%ld\n", 1197 pr_debug(" rx_dropped=%-7ld tx_dropped=%ld\n",
1229 (long)pstats->rx_dropped, (long)pstats->tx_dropped); 1198 (long)pstats->rx_dropped, (long)pstats->tx_dropped);
1230 DEBUG(2, " multicast=%-7ld collisions=%ld\n", 1199 pr_debug(" multicast=%-7ld collisions=%ld\n",
1231 (long)pstats->multicast, (long)pstats->collisions); 1200 (long)pstats->multicast, (long)pstats->collisions);
1232 1201
1233 DEBUG(2, " rx_length_errors=%-7ld rx_over_errors=%ld\n", 1202 pr_debug(" rx_length_errors=%-7ld rx_over_errors=%ld\n",
1234 (long)pstats->rx_length_errors, (long)pstats->rx_over_errors); 1203 (long)pstats->rx_length_errors, (long)pstats->rx_over_errors);
1235 DEBUG(2, " rx_crc_errors=%-7ld rx_frame_errors=%ld\n", 1204 pr_debug(" rx_crc_errors=%-7ld rx_frame_errors=%ld\n",
1236 (long)pstats->rx_crc_errors, (long)pstats->rx_frame_errors); 1205 (long)pstats->rx_crc_errors, (long)pstats->rx_frame_errors);
1237 DEBUG(2, " rx_fifo_errors=%-7ld rx_missed_errors=%ld\n", 1206 pr_debug(" rx_fifo_errors=%-7ld rx_missed_errors=%ld\n",
1238 (long)pstats->rx_fifo_errors, (long)pstats->rx_missed_errors); 1207 (long)pstats->rx_fifo_errors, (long)pstats->rx_missed_errors);
1239 1208
1240 DEBUG(2, " tx_aborted_errors=%-7ld tx_carrier_errors=%ld\n", 1209 pr_debug(" tx_aborted_errors=%-7ld tx_carrier_errors=%ld\n",
1241 (long)pstats->tx_aborted_errors, (long)pstats->tx_carrier_errors); 1210 (long)pstats->tx_aborted_errors, (long)pstats->tx_carrier_errors);
1242 DEBUG(2, " tx_fifo_errors=%-7ld tx_heartbeat_errors=%ld\n", 1211 pr_debug(" tx_fifo_errors=%-7ld tx_heartbeat_errors=%ld\n",
1243 (long)pstats->tx_fifo_errors, (long)pstats->tx_heartbeat_errors); 1212 (long)pstats->tx_fifo_errors, (long)pstats->tx_heartbeat_errors);
1244 DEBUG(2, " tx_window_errors=%ld\n", 1213 pr_debug(" tx_window_errors=%ld\n",
1245 (long)pstats->tx_window_errors); 1214 (long)pstats->tx_window_errors);
1246} /* pr_linux_stats */ 1215} /* pr_linux_stats */
1247 1216
@@ -1250,48 +1219,48 @@ pr_mace_stats
1250---------------------------------------------------------------------------- */ 1219---------------------------------------------------------------------------- */
1251static void pr_mace_stats(mace_statistics *pstats) 1220static void pr_mace_stats(mace_statistics *pstats)
1252{ 1221{
1253 DEBUG(2, "pr_mace_stats\n"); 1222 pr_debug("pr_mace_stats\n");
1254 1223
1255 DEBUG(2, " xmtsv=%-7d uflo=%d\n", 1224 pr_debug(" xmtsv=%-7d uflo=%d\n",
1256 pstats->xmtsv, pstats->uflo); 1225 pstats->xmtsv, pstats->uflo);
1257 DEBUG(2, " lcol=%-7d more=%d\n", 1226 pr_debug(" lcol=%-7d more=%d\n",
1258 pstats->lcol, pstats->more); 1227 pstats->lcol, pstats->more);
1259 DEBUG(2, " one=%-7d defer=%d\n", 1228 pr_debug(" one=%-7d defer=%d\n",
1260 pstats->one, pstats->defer); 1229 pstats->one, pstats->defer);
1261 DEBUG(2, " lcar=%-7d rtry=%d\n", 1230 pr_debug(" lcar=%-7d rtry=%d\n",
1262 pstats->lcar, pstats->rtry); 1231 pstats->lcar, pstats->rtry);
1263 1232
1264 /* MACE_XMTRC */ 1233 /* MACE_XMTRC */
1265 DEBUG(2, " exdef=%-7d xmtrc=%d\n", 1234 pr_debug(" exdef=%-7d xmtrc=%d\n",
1266 pstats->exdef, pstats->xmtrc); 1235 pstats->exdef, pstats->xmtrc);
1267 1236
1268 /* RFS1--Receive Status (RCVSTS) */ 1237 /* RFS1--Receive Status (RCVSTS) */
1269 DEBUG(2, " oflo=%-7d clsn=%d\n", 1238 pr_debug(" oflo=%-7d clsn=%d\n",
1270 pstats->oflo, pstats->clsn); 1239 pstats->oflo, pstats->clsn);
1271 DEBUG(2, " fram=%-7d fcs=%d\n", 1240 pr_debug(" fram=%-7d fcs=%d\n",
1272 pstats->fram, pstats->fcs); 1241 pstats->fram, pstats->fcs);
1273 1242
1274 /* RFS2--Runt Packet Count (RNTPC) */ 1243 /* RFS2--Runt Packet Count (RNTPC) */
1275 /* RFS3--Receive Collision Count (RCVCC) */ 1244 /* RFS3--Receive Collision Count (RCVCC) */
1276 DEBUG(2, " rfs_rntpc=%-7d rfs_rcvcc=%d\n", 1245 pr_debug(" rfs_rntpc=%-7d rfs_rcvcc=%d\n",
1277 pstats->rfs_rntpc, pstats->rfs_rcvcc); 1246 pstats->rfs_rntpc, pstats->rfs_rcvcc);
1278 1247
1279 /* MACE_IR */ 1248 /* MACE_IR */
1280 DEBUG(2, " jab=%-7d babl=%d\n", 1249 pr_debug(" jab=%-7d babl=%d\n",
1281 pstats->jab, pstats->babl); 1250 pstats->jab, pstats->babl);
1282 DEBUG(2, " cerr=%-7d rcvcco=%d\n", 1251 pr_debug(" cerr=%-7d rcvcco=%d\n",
1283 pstats->cerr, pstats->rcvcco); 1252 pstats->cerr, pstats->rcvcco);
1284 DEBUG(2, " rntpco=%-7d mpco=%d\n", 1253 pr_debug(" rntpco=%-7d mpco=%d\n",
1285 pstats->rntpco, pstats->mpco); 1254 pstats->rntpco, pstats->mpco);
1286 1255
1287 /* MACE_MPC */ 1256 /* MACE_MPC */
1288 DEBUG(2, " mpc=%d\n", pstats->mpc); 1257 pr_debug(" mpc=%d\n", pstats->mpc);
1289 1258
1290 /* MACE_RNTPC */ 1259 /* MACE_RNTPC */
1291 DEBUG(2, " rntpc=%d\n", pstats->rntpc); 1260 pr_debug(" rntpc=%d\n", pstats->rntpc);
1292 1261
1293 /* MACE_RCVCC */ 1262 /* MACE_RCVCC */
1294 DEBUG(2, " rcvcc=%d\n", pstats->rcvcc); 1263 pr_debug(" rcvcc=%d\n", pstats->rcvcc);
1295 1264
1296} /* pr_mace_stats */ 1265} /* pr_mace_stats */
1297 1266
@@ -1360,7 +1329,7 @@ static struct net_device_stats *mace_get_stats(struct net_device *dev)
1360 1329
1361 update_stats(dev->base_addr, dev); 1330 update_stats(dev->base_addr, dev);
1362 1331
1363 DEBUG(1, "%s: updating the statistics.\n", dev->name); 1332 pr_debug("%s: updating the statistics.\n", dev->name);
1364 pr_linux_stats(&lp->linux_stats); 1333 pr_linux_stats(&lp->linux_stats);
1365 pr_mace_stats(&lp->mace_stats); 1334 pr_mace_stats(&lp->mace_stats);
1366 1335
@@ -1427,7 +1396,7 @@ static void BuildLAF(int *ladrf, int *adr)
1427 ladrf[byte] |= (1 << (hashcode & 7)); 1396 ladrf[byte] |= (1 << (hashcode & 7));
1428 1397
1429#ifdef PCMCIA_DEBUG 1398#ifdef PCMCIA_DEBUG
1430 if (pc_debug > 2) 1399 if (0)
1431 printk(KERN_DEBUG " adr =%pM\n", adr); 1400 printk(KERN_DEBUG " adr =%pM\n", adr);
1432 printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode); 1401 printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode);
1433 for (i = 0; i < 8; i++) 1402 for (i = 0; i < 8; i++)
@@ -1454,12 +1423,12 @@ static void restore_multicast_list(struct net_device *dev)
1454 unsigned int ioaddr = dev->base_addr; 1423 unsigned int ioaddr = dev->base_addr;
1455 int i; 1424 int i;
1456 1425
1457 DEBUG(2, "%s: restoring Rx mode to %d addresses.\n", 1426 pr_debug("%s: restoring Rx mode to %d addresses.\n",
1458 dev->name, num_addrs); 1427 dev->name, num_addrs);
1459 1428
1460 if (num_addrs > 0) { 1429 if (num_addrs > 0) {
1461 1430
1462 DEBUG(1, "Attempt to restore multicast list detected.\n"); 1431 pr_debug("Attempt to restore multicast list detected.\n");
1463 1432
1464 mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_LOGADDR); 1433 mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_LOGADDR);
1465 /* Poll ADDRCHG bit */ 1434 /* Poll ADDRCHG bit */
@@ -1511,11 +1480,11 @@ static void set_multicast_list(struct net_device *dev)
1511 struct dev_mc_list *dmi = dev->mc_list; 1480 struct dev_mc_list *dmi = dev->mc_list;
1512 1481
1513#ifdef PCMCIA_DEBUG 1482#ifdef PCMCIA_DEBUG
1514 if (pc_debug > 1) { 1483 {
1515 static int old; 1484 static int old;
1516 if (dev->mc_count != old) { 1485 if (dev->mc_count != old) {
1517 old = dev->mc_count; 1486 old = dev->mc_count;
1518 DEBUG(0, "%s: setting Rx mode to %d addresses.\n", 1487 pr_debug("%s: setting Rx mode to %d addresses.\n",
1519 dev->name, old); 1488 dev->name, old);
1520 } 1489 }
1521 } 1490 }
@@ -1546,7 +1515,7 @@ static void restore_multicast_list(struct net_device *dev)
1546 unsigned int ioaddr = dev->base_addr; 1515 unsigned int ioaddr = dev->base_addr;
1547 mace_private *lp = netdev_priv(dev); 1516 mace_private *lp = netdev_priv(dev);
1548 1517
1549 DEBUG(2, "%s: restoring Rx mode to %d addresses.\n", dev->name, 1518 pr_debug("%s: restoring Rx mode to %d addresses.\n", dev->name,
1550 lp->multicast_num_addrs); 1519 lp->multicast_num_addrs);
1551 1520
1552 if (dev->flags & IFF_PROMISC) { 1521 if (dev->flags & IFF_PROMISC) {
@@ -1567,11 +1536,11 @@ static void set_multicast_list(struct net_device *dev)
1567 mace_private *lp = netdev_priv(dev); 1536 mace_private *lp = netdev_priv(dev);
1568 1537
1569#ifdef PCMCIA_DEBUG 1538#ifdef PCMCIA_DEBUG
1570 if (pc_debug > 1) { 1539 {
1571 static int old; 1540 static int old;
1572 if (dev->mc_count != old) { 1541 if (dev->mc_count != old) {
1573 old = dev->mc_count; 1542 old = dev->mc_count;
1574 DEBUG(0, "%s: setting Rx mode to %d addresses.\n", 1543 pr_debug("%s: setting Rx mode to %d addresses.\n",
1575 dev->name, old); 1544 dev->name, old);
1576 } 1545 }
1577 } 1546 }
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 94c9ad2746bc..cbe462ed221f 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -71,15 +71,6 @@
71 71
72static const char *if_names[] = { "auto", "10baseT", "10base2"}; 72static const char *if_names[] = { "auto", "10baseT", "10base2"};
73 73
74#ifdef PCMCIA_DEBUG
75static int pc_debug = PCMCIA_DEBUG;
76module_param(pc_debug, int, 0);
77#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
78static char *version =
79"pcnet_cs.c 1.153 2003/11/09 18:53:09 (David Hinds)";
80#else
81#define DEBUG(n, args...)
82#endif
83 74
84/*====================================================================*/ 75/*====================================================================*/
85 76
@@ -265,7 +256,7 @@ static int pcnet_probe(struct pcmcia_device *link)
265 pcnet_dev_t *info; 256 pcnet_dev_t *info;
266 struct net_device *dev; 257 struct net_device *dev;
267 258
268 DEBUG(0, "pcnet_attach()\n"); 259 dev_dbg(&link->dev, "pcnet_attach()\n");
269 260
270 /* Create new ethernet device */ 261 /* Create new ethernet device */
271 dev = __alloc_ei_netdev(sizeof(pcnet_dev_t)); 262 dev = __alloc_ei_netdev(sizeof(pcnet_dev_t));
@@ -275,7 +266,6 @@ static int pcnet_probe(struct pcmcia_device *link)
275 link->priv = dev; 266 link->priv = dev;
276 267
277 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 268 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
278 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
279 link->conf.Attributes = CONF_ENABLE_IRQ; 269 link->conf.Attributes = CONF_ENABLE_IRQ;
280 link->conf.IntType = INT_MEMORY_AND_IO; 270 link->conf.IntType = INT_MEMORY_AND_IO;
281 271
@@ -297,7 +287,7 @@ static void pcnet_detach(struct pcmcia_device *link)
297{ 287{
298 struct net_device *dev = link->priv; 288 struct net_device *dev = link->priv;
299 289
300 DEBUG(0, "pcnet_detach(0x%p)\n", link); 290 dev_dbg(&link->dev, "pcnet_detach\n");
301 291
302 if (link->dev_node) 292 if (link->dev_node)
303 unregister_netdev(dev); 293 unregister_netdev(dev);
@@ -326,17 +316,15 @@ static hw_info_t *get_hwinfo(struct pcmcia_device *link)
326 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; 316 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
327 req.Base = 0; req.Size = 0; 317 req.Base = 0; req.Size = 0;
328 req.AccessSpeed = 0; 318 req.AccessSpeed = 0;
329 i = pcmcia_request_window(&link, &req, &link->win); 319 i = pcmcia_request_window(link, &req, &link->win);
330 if (i != 0) { 320 if (i != 0)
331 cs_error(link, RequestWindow, i);
332 return NULL; 321 return NULL;
333 }
334 322
335 virt = ioremap(req.Base, req.Size); 323 virt = ioremap(req.Base, req.Size);
336 mem.Page = 0; 324 mem.Page = 0;
337 for (i = 0; i < NR_INFO; i++) { 325 for (i = 0; i < NR_INFO; i++) {
338 mem.CardOffset = hw_info[i].offset & ~(req.Size-1); 326 mem.CardOffset = hw_info[i].offset & ~(req.Size-1);
339 pcmcia_map_mem_page(link->win, &mem); 327 pcmcia_map_mem_page(link, link->win, &mem);
340 base = &virt[hw_info[i].offset & (req.Size-1)]; 328 base = &virt[hw_info[i].offset & (req.Size-1)];
341 if ((readb(base+0) == hw_info[i].a0) && 329 if ((readb(base+0) == hw_info[i].a0) &&
342 (readb(base+2) == hw_info[i].a1) && 330 (readb(base+2) == hw_info[i].a1) &&
@@ -348,9 +336,7 @@ static hw_info_t *get_hwinfo(struct pcmcia_device *link)
348 } 336 }
349 337
350 iounmap(virt); 338 iounmap(virt);
351 j = pcmcia_release_window(link->win); 339 j = pcmcia_release_window(link, link->win);
352 if (j != 0)
353 cs_error(link, ReleaseWindow, j);
354 return (i < NR_INFO) ? hw_info+i : NULL; 340 return (i < NR_INFO) ? hw_info+i : NULL;
355} /* get_hwinfo */ 341} /* get_hwinfo */
356 342
@@ -495,9 +481,6 @@ static hw_info_t *get_hwired(struct pcmcia_device *link)
495 481
496======================================================================*/ 482======================================================================*/
497 483
498#define CS_CHECK(fn, ret) \
499do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
500
501static int try_io_port(struct pcmcia_device *link) 484static int try_io_port(struct pcmcia_device *link)
502{ 485{
503 int j, ret; 486 int j, ret;
@@ -567,19 +550,19 @@ static int pcnet_config(struct pcmcia_device *link)
567{ 550{
568 struct net_device *dev = link->priv; 551 struct net_device *dev = link->priv;
569 pcnet_dev_t *info = PRIV(dev); 552 pcnet_dev_t *info = PRIV(dev);
570 int last_ret, last_fn, start_pg, stop_pg, cm_offset; 553 int ret, start_pg, stop_pg, cm_offset;
571 int has_shmem = 0; 554 int has_shmem = 0;
572 hw_info_t *local_hw_info; 555 hw_info_t *local_hw_info;
573 556
574 DEBUG(0, "pcnet_config(0x%p)\n", link); 557 dev_dbg(&link->dev, "pcnet_config\n");
575 558
576 last_ret = pcmcia_loop_config(link, pcnet_confcheck, &has_shmem); 559 ret = pcmcia_loop_config(link, pcnet_confcheck, &has_shmem);
577 if (last_ret) { 560 if (ret)
578 cs_error(link, RequestIO, last_ret);
579 goto failed; 561 goto failed;
580 }
581 562
582 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 563 ret = pcmcia_request_irq(link, &link->irq);
564 if (ret)
565 goto failed;
583 566
584 if (link->io.NumPorts2 == 8) { 567 if (link->io.NumPorts2 == 8) {
585 link->conf.Attributes |= CONF_ENABLE_SPKR; 568 link->conf.Attributes |= CONF_ENABLE_SPKR;
@@ -589,7 +572,9 @@ static int pcnet_config(struct pcmcia_device *link)
589 (link->card_id == PRODID_IBM_HOME_AND_AWAY)) 572 (link->card_id == PRODID_IBM_HOME_AND_AWAY))
590 link->conf.ConfigIndex |= 0x10; 573 link->conf.ConfigIndex |= 0x10;
591 574
592 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 575 ret = pcmcia_request_configuration(link, &link->conf);
576 if (ret)
577 goto failed;
593 dev->irq = link->irq.AssignedIRQ; 578 dev->irq = link->irq.AssignedIRQ;
594 dev->base_addr = link->io.BasePort1; 579 dev->base_addr = link->io.BasePort1;
595 if (info->flags & HAS_MISC_REG) { 580 if (info->flags & HAS_MISC_REG) {
@@ -660,7 +645,7 @@ static int pcnet_config(struct pcmcia_device *link)
660 mii_phy_probe(dev); 645 mii_phy_probe(dev);
661 646
662 link->dev_node = &info->node; 647 link->dev_node = &info->node;
663 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 648 SET_NETDEV_DEV(dev, &link->dev);
664 649
665 if (register_netdev(dev) != 0) { 650 if (register_netdev(dev) != 0) {
666 printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n"); 651 printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n");
@@ -687,8 +672,6 @@ static int pcnet_config(struct pcmcia_device *link)
687 printk(" hw_addr %pM\n", dev->dev_addr); 672 printk(" hw_addr %pM\n", dev->dev_addr);
688 return 0; 673 return 0;
689 674
690cs_failed:
691 cs_error(link, last_fn, last_ret);
692failed: 675failed:
693 pcnet_release(link); 676 pcnet_release(link);
694 return -ENODEV; 677 return -ENODEV;
@@ -706,7 +689,7 @@ static void pcnet_release(struct pcmcia_device *link)
706{ 689{
707 pcnet_dev_t *info = PRIV(link->priv); 690 pcnet_dev_t *info = PRIV(link->priv);
708 691
709 DEBUG(0, "pcnet_release(0x%p)\n", link); 692 dev_dbg(&link->dev, "pcnet_release\n");
710 693
711 if (info->flags & USE_SHMEM) 694 if (info->flags & USE_SHMEM)
712 iounmap(info->base); 695 iounmap(info->base);
@@ -960,7 +943,7 @@ static void mii_phy_probe(struct net_device *dev)
960 phyid = tmp << 16; 943 phyid = tmp << 16;
961 phyid |= mdio_read(mii_addr, i, MII_PHYID_REG2); 944 phyid |= mdio_read(mii_addr, i, MII_PHYID_REG2);
962 phyid &= MII_PHYID_REV_MASK; 945 phyid &= MII_PHYID_REV_MASK;
963 DEBUG(0, "%s: MII at %d is 0x%08x\n", dev->name, i, phyid); 946 pr_debug("%s: MII at %d is 0x%08x\n", dev->name, i, phyid);
964 if (phyid == AM79C9XX_HOME_PHY) { 947 if (phyid == AM79C9XX_HOME_PHY) {
965 info->pna_phy = i; 948 info->pna_phy = i;
966 } else if (phyid != AM79C9XX_ETH_PHY) { 949 } else if (phyid != AM79C9XX_ETH_PHY) {
@@ -976,7 +959,7 @@ static int pcnet_open(struct net_device *dev)
976 struct pcmcia_device *link = info->p_dev; 959 struct pcmcia_device *link = info->p_dev;
977 unsigned int nic_base = dev->base_addr; 960 unsigned int nic_base = dev->base_addr;
978 961
979 DEBUG(2, "pcnet_open('%s')\n", dev->name); 962 dev_dbg(&link->dev, "pcnet_open('%s')\n", dev->name);
980 963
981 if (!pcmcia_dev_present(link)) 964 if (!pcmcia_dev_present(link))
982 return -ENODEV; 965 return -ENODEV;
@@ -1008,7 +991,7 @@ static int pcnet_close(struct net_device *dev)
1008 pcnet_dev_t *info = PRIV(dev); 991 pcnet_dev_t *info = PRIV(dev);
1009 struct pcmcia_device *link = info->p_dev; 992 struct pcmcia_device *link = info->p_dev;
1010 993
1011 DEBUG(2, "pcnet_close('%s')\n", dev->name); 994 dev_dbg(&link->dev, "pcnet_close('%s')\n", dev->name);
1012 995
1013 ei_close(dev); 996 ei_close(dev);
1014 free_irq(dev->irq, dev); 997 free_irq(dev->irq, dev);
@@ -1251,10 +1234,8 @@ static void dma_block_input(struct net_device *dev, int count,
1251 int xfer_count = count; 1234 int xfer_count = count;
1252 char *buf = skb->data; 1235 char *buf = skb->data;
1253 1236
1254#ifdef PCMCIA_DEBUG
1255 if ((ei_debug > 4) && (count != 4)) 1237 if ((ei_debug > 4) && (count != 4))
1256 printk(KERN_DEBUG "%s: [bi=%d]\n", dev->name, count+4); 1238 pr_debug("%s: [bi=%d]\n", dev->name, count+4);
1257#endif
1258 if (ei_status.dmaing) { 1239 if (ei_status.dmaing) {
1259 printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input." 1240 printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input."
1260 "[DMAstat:%1x][irqlock:%1x]\n", 1241 "[DMAstat:%1x][irqlock:%1x]\n",
@@ -1495,7 +1476,7 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
1495 pcnet_dev_t *info = PRIV(dev); 1476 pcnet_dev_t *info = PRIV(dev);
1496 win_req_t req; 1477 win_req_t req;
1497 memreq_t mem; 1478 memreq_t mem;
1498 int i, window_size, offset, last_ret, last_fn; 1479 int i, window_size, offset, ret;
1499 1480
1500 window_size = (stop_pg - start_pg) << 8; 1481 window_size = (stop_pg - start_pg) << 8;
1501 if (window_size > 32 * 1024) 1482 if (window_size > 32 * 1024)
@@ -1509,13 +1490,17 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
1509 req.Attributes |= WIN_USE_WAIT; 1490 req.Attributes |= WIN_USE_WAIT;
1510 req.Base = 0; req.Size = window_size; 1491 req.Base = 0; req.Size = window_size;
1511 req.AccessSpeed = mem_speed; 1492 req.AccessSpeed = mem_speed;
1512 CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &link->win)); 1493 ret = pcmcia_request_window(link, &req, &link->win);
1494 if (ret)
1495 goto failed;
1513 1496
1514 mem.CardOffset = (start_pg << 8) + cm_offset; 1497 mem.CardOffset = (start_pg << 8) + cm_offset;
1515 offset = mem.CardOffset % window_size; 1498 offset = mem.CardOffset % window_size;
1516 mem.CardOffset -= offset; 1499 mem.CardOffset -= offset;
1517 mem.Page = 0; 1500 mem.Page = 0;
1518 CS_CHECK(MapMemPage, pcmcia_map_mem_page(link->win, &mem)); 1501 ret = pcmcia_map_mem_page(link, link->win, &mem);
1502 if (ret)
1503 goto failed;
1519 1504
1520 /* Try scribbling on the buffer */ 1505 /* Try scribbling on the buffer */
1521 info->base = ioremap(req.Base, window_size); 1506 info->base = ioremap(req.Base, window_size);
@@ -1527,8 +1512,8 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
1527 pcnet_reset_8390(dev); 1512 pcnet_reset_8390(dev);
1528 if (i != (TX_PAGES<<8)) { 1513 if (i != (TX_PAGES<<8)) {
1529 iounmap(info->base); 1514 iounmap(info->base);
1530 pcmcia_release_window(link->win); 1515 pcmcia_release_window(link, link->win);
1531 info->base = NULL; link->win = NULL; 1516 info->base = NULL; link->win = 0;
1532 goto failed; 1517 goto failed;
1533 } 1518 }
1534 1519
@@ -1549,8 +1534,6 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg,
1549 info->flags |= USE_SHMEM; 1534 info->flags |= USE_SHMEM;
1550 return 0; 1535 return 0;
1551 1536
1552cs_failed:
1553 cs_error(link, last_fn, last_ret);
1554failed: 1537failed:
1555 return 1; 1538 return 1;
1556} 1539}
@@ -1788,7 +1771,6 @@ static int __init init_pcnet_cs(void)
1788 1771
1789static void __exit exit_pcnet_cs(void) 1772static void __exit exit_pcnet_cs(void)
1790{ 1773{
1791 DEBUG(0, "pcnet_cs: unloading\n");
1792 pcmcia_unregister_driver(&pcnet_driver); 1774 pcmcia_unregister_driver(&pcnet_driver);
1793} 1775}
1794 1776
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 7bde2cd34c7e..9e0da370912e 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -79,14 +79,6 @@ MODULE_FIRMWARE(FIRMWARE_NAME);
79*/ 79*/
80INT_MODULE_PARM(if_port, 0); 80INT_MODULE_PARM(if_port, 0);
81 81
82#ifdef PCMCIA_DEBUG
83INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
84static const char *version =
85"smc91c92_cs.c 1.123 2006/11/09 Donald Becker, becker@scyld.com.\n";
86#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
87#else
88#define DEBUG(n, args...)
89#endif
90 82
91#define DRV_NAME "smc91c92_cs" 83#define DRV_NAME "smc91c92_cs"
92#define DRV_VERSION "1.123" 84#define DRV_VERSION "1.123"
@@ -126,12 +118,6 @@ struct smc_private {
126 int rx_ovrn; 118 int rx_ovrn;
127}; 119};
128 120
129struct smc_cfg_mem {
130 tuple_t tuple;
131 cisparse_t parse;
132 u_char buf[255];
133};
134
135/* Special definitions for Megahertz multifunction cards */ 121/* Special definitions for Megahertz multifunction cards */
136#define MEGAHERTZ_ISR 0x0380 122#define MEGAHERTZ_ISR 0x0380
137 123
@@ -329,7 +315,7 @@ static int smc91c92_probe(struct pcmcia_device *link)
329 struct smc_private *smc; 315 struct smc_private *smc;
330 struct net_device *dev; 316 struct net_device *dev;
331 317
332 DEBUG(0, "smc91c92_attach()\n"); 318 dev_dbg(&link->dev, "smc91c92_attach()\n");
333 319
334 /* Create new ethernet device */ 320 /* Create new ethernet device */
335 dev = alloc_etherdev(sizeof(struct smc_private)); 321 dev = alloc_etherdev(sizeof(struct smc_private));
@@ -343,10 +329,8 @@ static int smc91c92_probe(struct pcmcia_device *link)
343 link->io.NumPorts1 = 16; 329 link->io.NumPorts1 = 16;
344 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 330 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
345 link->io.IOAddrLines = 4; 331 link->io.IOAddrLines = 4;
346 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; 332 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
347 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
348 link->irq.Handler = &smc_interrupt; 333 link->irq.Handler = &smc_interrupt;
349 link->irq.Instance = dev;
350 link->conf.Attributes = CONF_ENABLE_IRQ; 334 link->conf.Attributes = CONF_ENABLE_IRQ;
351 link->conf.IntType = INT_MEMORY_AND_IO; 335 link->conf.IntType = INT_MEMORY_AND_IO;
352 336
@@ -377,7 +361,7 @@ static void smc91c92_detach(struct pcmcia_device *link)
377{ 361{
378 struct net_device *dev = link->priv; 362 struct net_device *dev = link->priv;
379 363
380 DEBUG(0, "smc91c92_detach(0x%p)\n", link); 364 dev_dbg(&link->dev, "smc91c92_detach\n");
381 365
382 if (link->dev_node) 366 if (link->dev_node)
383 unregister_netdev(dev); 367 unregister_netdev(dev);
@@ -408,34 +392,7 @@ static int cvt_ascii_address(struct net_device *dev, char *s)
408 return 0; 392 return 0;
409} 393}
410 394
411/*====================================================================*/ 395/*====================================================================
412
413static int first_tuple(struct pcmcia_device *handle, tuple_t *tuple,
414 cisparse_t *parse)
415{
416 int i;
417
418 i = pcmcia_get_first_tuple(handle, tuple);
419 if (i != 0)
420 return i;
421 i = pcmcia_get_tuple_data(handle, tuple);
422 if (i != 0)
423 return i;
424 return pcmcia_parse_tuple(tuple, parse);
425}
426
427static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple,
428 cisparse_t *parse)
429{
430 int i;
431
432 if ((i = pcmcia_get_next_tuple(handle, tuple)) != 0 ||
433 (i = pcmcia_get_tuple_data(handle, tuple)) != 0)
434 return i;
435 return pcmcia_parse_tuple(tuple, parse);
436}
437
438/*======================================================================
439 396
440 Configuration stuff for Megahertz cards 397 Configuration stuff for Megahertz cards
441 398
@@ -490,19 +447,14 @@ static int mhz_mfc_config(struct pcmcia_device *link)
490{ 447{
491 struct net_device *dev = link->priv; 448 struct net_device *dev = link->priv;
492 struct smc_private *smc = netdev_priv(dev); 449 struct smc_private *smc = netdev_priv(dev);
493 struct smc_cfg_mem *cfg_mem;
494 win_req_t req; 450 win_req_t req;
495 memreq_t mem; 451 memreq_t mem;
496 int i; 452 int i;
497 453
498 cfg_mem = kmalloc(sizeof(struct smc_cfg_mem), GFP_KERNEL);
499 if (!cfg_mem)
500 return -ENOMEM;
501
502 link->conf.Attributes |= CONF_ENABLE_SPKR; 454 link->conf.Attributes |= CONF_ENABLE_SPKR;
503 link->conf.Status = CCSR_AUDIO_ENA; 455 link->conf.Status = CCSR_AUDIO_ENA;
504 link->irq.Attributes = 456 link->irq.Attributes =
505 IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED|IRQ_HANDLE_PRESENT; 457 IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
506 link->io.IOAddrLines = 16; 458 link->io.IOAddrLines = 16;
507 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 459 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
508 link->io.NumPorts2 = 8; 460 link->io.NumPorts2 = 8;
@@ -510,91 +462,80 @@ static int mhz_mfc_config(struct pcmcia_device *link)
510 /* The Megahertz combo cards have modem-like CIS entries, so 462 /* The Megahertz combo cards have modem-like CIS entries, so
511 we have to explicitly try a bunch of port combinations. */ 463 we have to explicitly try a bunch of port combinations. */
512 if (pcmcia_loop_config(link, mhz_mfc_config_check, NULL)) 464 if (pcmcia_loop_config(link, mhz_mfc_config_check, NULL))
513 goto free_cfg_mem; 465 return -ENODEV;
466
514 dev->base_addr = link->io.BasePort1; 467 dev->base_addr = link->io.BasePort1;
515 468
516 /* Allocate a memory window, for accessing the ISR */ 469 /* Allocate a memory window, for accessing the ISR */
517 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; 470 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
518 req.Base = req.Size = 0; 471 req.Base = req.Size = 0;
519 req.AccessSpeed = 0; 472 req.AccessSpeed = 0;
520 i = pcmcia_request_window(&link, &req, &link->win); 473 i = pcmcia_request_window(link, &req, &link->win);
521 if (i != 0) 474 if (i != 0)
522 goto free_cfg_mem; 475 return -ENODEV;
476
523 smc->base = ioremap(req.Base, req.Size); 477 smc->base = ioremap(req.Base, req.Size);
524 mem.CardOffset = mem.Page = 0; 478 mem.CardOffset = mem.Page = 0;
525 if (smc->manfid == MANFID_MOTOROLA) 479 if (smc->manfid == MANFID_MOTOROLA)
526 mem.CardOffset = link->conf.ConfigBase; 480 mem.CardOffset = link->conf.ConfigBase;
527 i = pcmcia_map_mem_page(link->win, &mem); 481 i = pcmcia_map_mem_page(link, link->win, &mem);
528 482
529 if ((i == 0) 483 if ((i == 0)
530 && (smc->manfid == MANFID_MEGAHERTZ) 484 && (smc->manfid == MANFID_MEGAHERTZ)
531 && (smc->cardid == PRODID_MEGAHERTZ_EM3288)) 485 && (smc->cardid == PRODID_MEGAHERTZ_EM3288))
532 mhz_3288_power(link); 486 mhz_3288_power(link);
533 487
534free_cfg_mem: 488 return 0;
535 kfree(cfg_mem);
536 return -ENODEV;
537} 489}
538 490
539static int mhz_setup(struct pcmcia_device *link) 491static int pcmcia_get_versmac(struct pcmcia_device *p_dev,
492 tuple_t *tuple,
493 void *priv)
540{ 494{
541 struct net_device *dev = link->priv; 495 struct net_device *dev = priv;
542 struct smc_cfg_mem *cfg_mem; 496 cisparse_t parse;
543 tuple_t *tuple;
544 cisparse_t *parse;
545 u_char *buf, *station_addr;
546 int rc;
547 497
548 cfg_mem = kmalloc(sizeof(struct smc_cfg_mem), GFP_KERNEL); 498 if (pcmcia_parse_tuple(tuple, &parse))
549 if (!cfg_mem) 499 return -EINVAL;
550 return -1;
551 500
552 tuple = &cfg_mem->tuple; 501 if ((parse.version_1.ns > 3) &&
553 parse = &cfg_mem->parse; 502 (cvt_ascii_address(dev,
554 buf = cfg_mem->buf; 503 (parse.version_1.str + parse.version_1.ofs[3]))))
504 return 0;
555 505
556 tuple->Attributes = tuple->TupleOffset = 0; 506 return -EINVAL;
557 tuple->TupleData = (cisdata_t *)buf; 507};
558 tuple->TupleDataMax = 255; 508
509static int mhz_setup(struct pcmcia_device *link)
510{
511 struct net_device *dev = link->priv;
512 size_t len;
513 u8 *buf;
514 int rc;
559 515
560 /* Read the station address from the CIS. It is stored as the last 516 /* Read the station address from the CIS. It is stored as the last
561 (fourth) string in the Version 1 Version/ID tuple. */ 517 (fourth) string in the Version 1 Version/ID tuple. */
562 tuple->DesiredTuple = CISTPL_VERS_1; 518 if ((link->prod_id[3]) &&
563 if (first_tuple(link, tuple, parse) != 0) { 519 (cvt_ascii_address(dev, link->prod_id[3]) == 0))
564 rc = -1; 520 return 0;
565 goto free_cfg_mem; 521
566 } 522 /* Workarounds for broken cards start here. */
567 /* Ugh -- the EM1144 card has two VERS_1 tuples!?! */ 523 /* Ugh -- the EM1144 card has two VERS_1 tuples!?! */
568 if (next_tuple(link, tuple, parse) != 0) 524 if (!pcmcia_loop_tuple(link, CISTPL_VERS_1, pcmcia_get_versmac, dev))
569 first_tuple(link, tuple, parse); 525 return 0;
570 if (parse->version_1.ns > 3) {
571 station_addr = parse->version_1.str + parse->version_1.ofs[3];
572 if (cvt_ascii_address(dev, station_addr) == 0) {
573 rc = 0;
574 goto free_cfg_mem;
575 }
576 }
577 526
578 /* Another possibility: for the EM3288, in a special tuple */ 527 /* Another possibility: for the EM3288, in a special tuple */
579 tuple->DesiredTuple = 0x81;
580 if (pcmcia_get_first_tuple(link, tuple) != 0) {
581 rc = -1;
582 goto free_cfg_mem;
583 }
584 if (pcmcia_get_tuple_data(link, tuple) != 0) {
585 rc = -1;
586 goto free_cfg_mem;
587 }
588 buf[12] = '\0';
589 if (cvt_ascii_address(dev, buf) == 0) {
590 rc = 0;
591 goto free_cfg_mem;
592 }
593 rc = -1; 528 rc = -1;
594free_cfg_mem: 529 len = pcmcia_get_tuple(link, 0x81, &buf);
595 kfree(cfg_mem); 530 if (buf && len >= 13) {
596 return rc; 531 buf[12] = '\0';
597} 532 if (cvt_ascii_address(dev, buf))
533 rc = 0;
534 }
535 kfree(buf);
536
537 return rc;
538};
598 539
599/*====================================================================== 540/*======================================================================
600 541
@@ -684,58 +625,21 @@ static int smc_config(struct pcmcia_device *link)
684 return i; 625 return i;
685} 626}
686 627
628
687static int smc_setup(struct pcmcia_device *link) 629static int smc_setup(struct pcmcia_device *link)
688{ 630{
689 struct net_device *dev = link->priv; 631 struct net_device *dev = link->priv;
690 struct smc_cfg_mem *cfg_mem;
691 tuple_t *tuple;
692 cisparse_t *parse;
693 cistpl_lan_node_id_t *node_id;
694 u_char *buf, *station_addr;
695 int i, rc;
696
697 cfg_mem = kmalloc(sizeof(struct smc_cfg_mem), GFP_KERNEL);
698 if (!cfg_mem)
699 return -ENOMEM;
700
701 tuple = &cfg_mem->tuple;
702 parse = &cfg_mem->parse;
703 buf = cfg_mem->buf;
704
705 tuple->Attributes = tuple->TupleOffset = 0;
706 tuple->TupleData = (cisdata_t *)buf;
707 tuple->TupleDataMax = 255;
708 632
709 /* Check for a LAN function extension tuple */ 633 /* Check for a LAN function extension tuple */
710 tuple->DesiredTuple = CISTPL_FUNCE; 634 if (!pcmcia_get_mac_from_cis(link, dev))
711 i = first_tuple(link, tuple, parse); 635 return 0;
712 while (i == 0) { 636
713 if (parse->funce.type == CISTPL_FUNCE_LAN_NODE_ID)
714 break;
715 i = next_tuple(link, tuple, parse);
716 }
717 if (i == 0) {
718 node_id = (cistpl_lan_node_id_t *)parse->funce.data;
719 if (node_id->nb == 6) {
720 for (i = 0; i < 6; i++)
721 dev->dev_addr[i] = node_id->id[i];
722 rc = 0;
723 goto free_cfg_mem;
724 }
725 }
726 /* Try the third string in the Version 1 Version/ID tuple. */ 637 /* Try the third string in the Version 1 Version/ID tuple. */
727 if (link->prod_id[2]) { 638 if (link->prod_id[2]) {
728 station_addr = link->prod_id[2]; 639 if (cvt_ascii_address(dev, link->prod_id[2]) == 0)
729 if (cvt_ascii_address(dev, station_addr) == 0) { 640 return 0;
730 rc = 0;
731 goto free_cfg_mem;
732 }
733 } 641 }
734 642 return -1;
735 rc = -1;
736free_cfg_mem:
737 kfree(cfg_mem);
738 return rc;
739} 643}
740 644
741/*====================================================================*/ 645/*====================================================================*/
@@ -749,7 +653,7 @@ static int osi_config(struct pcmcia_device *link)
749 link->conf.Attributes |= CONF_ENABLE_SPKR; 653 link->conf.Attributes |= CONF_ENABLE_SPKR;
750 link->conf.Status = CCSR_AUDIO_ENA; 654 link->conf.Status = CCSR_AUDIO_ENA;
751 link->irq.Attributes = 655 link->irq.Attributes =
752 IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED|IRQ_HANDLE_PRESENT; 656 IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
753 link->io.NumPorts1 = 64; 657 link->io.NumPorts1 = 64;
754 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 658 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
755 link->io.NumPorts2 = 8; 659 link->io.NumPorts2 = 8;
@@ -794,41 +698,31 @@ static int osi_load_firmware(struct pcmcia_device *link)
794 return err; 698 return err;
795} 699}
796 700
797static int osi_setup(struct pcmcia_device *link, u_short manfid, u_short cardid) 701static int pcmcia_osi_mac(struct pcmcia_device *p_dev,
702 tuple_t *tuple,
703 void *priv)
798{ 704{
799 struct net_device *dev = link->priv; 705 struct net_device *dev = priv;
800 struct smc_cfg_mem *cfg_mem; 706 int i;
801 tuple_t *tuple;
802 u_char *buf;
803 int i, rc;
804 707
805 cfg_mem = kmalloc(sizeof(struct smc_cfg_mem), GFP_KERNEL); 708 if (tuple->TupleDataLen < 8)
806 if (!cfg_mem) 709 return -EINVAL;
807 return -1; 710 if (tuple->TupleData[0] != 0x04)
711 return -EINVAL;
712 for (i = 0; i < 6; i++)
713 dev->dev_addr[i] = tuple->TupleData[i+2];
714 return 0;
715};
808 716
809 tuple = &cfg_mem->tuple;
810 buf = cfg_mem->buf;
811 717
812 tuple->Attributes = TUPLE_RETURN_COMMON; 718static int osi_setup(struct pcmcia_device *link, u_short manfid, u_short cardid)
813 tuple->TupleData = (cisdata_t *)buf; 719{
814 tuple->TupleDataMax = 255; 720 struct net_device *dev = link->priv;
815 tuple->TupleOffset = 0; 721 int rc;
816 722
817 /* Read the station address from tuple 0x90, subtuple 0x04 */ 723 /* Read the station address from tuple 0x90, subtuple 0x04 */
818 tuple->DesiredTuple = 0x90; 724 if (pcmcia_loop_tuple(link, 0x90, pcmcia_osi_mac, dev))
819 i = pcmcia_get_first_tuple(link, tuple); 725 return -1;
820 while (i == 0) {
821 i = pcmcia_get_tuple_data(link, tuple);
822 if ((i != 0) || (buf[0] == 0x04))
823 break;
824 i = pcmcia_get_next_tuple(link, tuple);
825 }
826 if (i != 0) {
827 rc = -1;
828 goto free_cfg_mem;
829 }
830 for (i = 0; i < 6; i++)
831 dev->dev_addr[i] = buf[i+2];
832 726
833 if (((manfid == MANFID_OSITECH) && 727 if (((manfid == MANFID_OSITECH) &&
834 (cardid == PRODID_OSITECH_SEVEN)) || 728 (cardid == PRODID_OSITECH_SEVEN)) ||
@@ -836,20 +730,17 @@ static int osi_setup(struct pcmcia_device *link, u_short manfid, u_short cardid)
836 (cardid == PRODID_PSION_NET100))) { 730 (cardid == PRODID_PSION_NET100))) {
837 rc = osi_load_firmware(link); 731 rc = osi_load_firmware(link);
838 if (rc) 732 if (rc)
839 goto free_cfg_mem; 733 return rc;
840 } else if (manfid == MANFID_OSITECH) { 734 } else if (manfid == MANFID_OSITECH) {
841 /* Make sure both functions are powered up */ 735 /* Make sure both functions are powered up */
842 set_bits(0x300, link->io.BasePort1 + OSITECH_AUI_PWR); 736 set_bits(0x300, link->io.BasePort1 + OSITECH_AUI_PWR);
843 /* Now, turn on the interrupt for both card functions */ 737 /* Now, turn on the interrupt for both card functions */
844 set_bits(0x300, link->io.BasePort1 + OSITECH_RESET_ISR); 738 set_bits(0x300, link->io.BasePort1 + OSITECH_RESET_ISR);
845 DEBUG(2, "AUI/PWR: %4.4x RESET/ISR: %4.4x\n", 739 dev_dbg(&link->dev, "AUI/PWR: %4.4x RESET/ISR: %4.4x\n",
846 inw(link->io.BasePort1 + OSITECH_AUI_PWR), 740 inw(link->io.BasePort1 + OSITECH_AUI_PWR),
847 inw(link->io.BasePort1 + OSITECH_RESET_ISR)); 741 inw(link->io.BasePort1 + OSITECH_RESET_ISR));
848 } 742 }
849 rc = 0; 743 return 0;
850free_cfg_mem:
851 kfree(cfg_mem);
852 return rc;
853} 744}
854 745
855static int smc91c92_suspend(struct pcmcia_device *link) 746static int smc91c92_suspend(struct pcmcia_device *link)
@@ -959,12 +850,6 @@ static int check_sig(struct pcmcia_device *link)
959 850
960======================================================================*/ 851======================================================================*/
961 852
962#define CS_EXIT_TEST(ret, svc, label) \
963if (ret != 0) { \
964 cs_error(link, svc, ret); \
965 goto label; \
966}
967
968static int smc91c92_config(struct pcmcia_device *link) 853static int smc91c92_config(struct pcmcia_device *link)
969{ 854{
970 struct net_device *dev = link->priv; 855 struct net_device *dev = link->priv;
@@ -974,7 +859,7 @@ static int smc91c92_config(struct pcmcia_device *link)
974 unsigned int ioaddr; 859 unsigned int ioaddr;
975 u_long mir; 860 u_long mir;
976 861
977 DEBUG(0, "smc91c92_config(0x%p)\n", link); 862 dev_dbg(&link->dev, "smc91c92_config\n");
978 863
979 smc->manfid = link->manf_id; 864 smc->manfid = link->manf_id;
980 smc->cardid = link->card_id; 865 smc->cardid = link->card_id;
@@ -990,12 +875,15 @@ static int smc91c92_config(struct pcmcia_device *link)
990 } else { 875 } else {
991 i = smc_config(link); 876 i = smc_config(link);
992 } 877 }
993 CS_EXIT_TEST(i, RequestIO, config_failed); 878 if (i)
879 goto config_failed;
994 880
995 i = pcmcia_request_irq(link, &link->irq); 881 i = pcmcia_request_irq(link, &link->irq);
996 CS_EXIT_TEST(i, RequestIRQ, config_failed); 882 if (i)
883 goto config_failed;
997 i = pcmcia_request_configuration(link, &link->conf); 884 i = pcmcia_request_configuration(link, &link->conf);
998 CS_EXIT_TEST(i, RequestConfiguration, config_failed); 885 if (i)
886 goto config_failed;
999 887
1000 if (smc->manfid == MANFID_MOTOROLA) 888 if (smc->manfid == MANFID_MOTOROLA)
1001 mot_config(link); 889 mot_config(link);
@@ -1074,7 +962,7 @@ static int smc91c92_config(struct pcmcia_device *link)
1074 } 962 }
1075 963
1076 link->dev_node = &smc->node; 964 link->dev_node = &smc->node;
1077 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 965 SET_NETDEV_DEV(dev, &link->dev);
1078 966
1079 if (register_netdev(dev) != 0) { 967 if (register_netdev(dev) != 0) {
1080 printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n"); 968 printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n");
@@ -1100,7 +988,7 @@ static int smc91c92_config(struct pcmcia_device *link)
1100 988
1101 if (smc->cfg & CFG_MII_SELECT) { 989 if (smc->cfg & CFG_MII_SELECT) {
1102 if (smc->mii_if.phy_id != -1) { 990 if (smc->mii_if.phy_id != -1) {
1103 DEBUG(0, " MII transceiver at index %d, status %x.\n", 991 dev_dbg(&link->dev, " MII transceiver at index %d, status %x.\n",
1104 smc->mii_if.phy_id, j); 992 smc->mii_if.phy_id, j);
1105 } else { 993 } else {
1106 printk(KERN_NOTICE " No MII transceivers found!\n"); 994 printk(KERN_NOTICE " No MII transceivers found!\n");
@@ -1110,7 +998,7 @@ static int smc91c92_config(struct pcmcia_device *link)
1110 998
1111config_undo: 999config_undo:
1112 unregister_netdev(dev); 1000 unregister_netdev(dev);
1113config_failed: /* CS_EXIT_TEST() calls jump to here... */ 1001config_failed:
1114 smc91c92_release(link); 1002 smc91c92_release(link);
1115 return -ENODEV; 1003 return -ENODEV;
1116} /* smc91c92_config */ 1004} /* smc91c92_config */
@@ -1125,7 +1013,7 @@ config_failed: /* CS_EXIT_TEST() calls jump to here... */
1125 1013
1126static void smc91c92_release(struct pcmcia_device *link) 1014static void smc91c92_release(struct pcmcia_device *link)
1127{ 1015{
1128 DEBUG(0, "smc91c92_release(0x%p)\n", link); 1016 dev_dbg(&link->dev, "smc91c92_release\n");
1129 if (link->win) { 1017 if (link->win) {
1130 struct net_device *dev = link->priv; 1018 struct net_device *dev = link->priv;
1131 struct smc_private *smc = netdev_priv(dev); 1019 struct smc_private *smc = netdev_priv(dev);
@@ -1222,10 +1110,10 @@ static int smc_open(struct net_device *dev)
1222 struct smc_private *smc = netdev_priv(dev); 1110 struct smc_private *smc = netdev_priv(dev);
1223 struct pcmcia_device *link = smc->p_dev; 1111 struct pcmcia_device *link = smc->p_dev;
1224 1112
1225#ifdef PCMCIA_DEBUG 1113 dev_dbg(&link->dev, "%s: smc_open(%p), ID/Window %4.4x.\n",
1226 DEBUG(0, "%s: smc_open(%p), ID/Window %4.4x.\n",
1227 dev->name, dev, inw(dev->base_addr + BANK_SELECT)); 1114 dev->name, dev, inw(dev->base_addr + BANK_SELECT));
1228 if (pc_debug > 1) smc_dump(dev); 1115#ifdef PCMCIA_DEBUG
1116 smc_dump(dev);
1229#endif 1117#endif
1230 1118
1231 /* Check that the PCMCIA card is still here. */ 1119 /* Check that the PCMCIA card is still here. */
@@ -1260,7 +1148,7 @@ static int smc_close(struct net_device *dev)
1260 struct pcmcia_device *link = smc->p_dev; 1148 struct pcmcia_device *link = smc->p_dev;
1261 unsigned int ioaddr = dev->base_addr; 1149 unsigned int ioaddr = dev->base_addr;
1262 1150
1263 DEBUG(0, "%s: smc_close(), status %4.4x.\n", 1151 dev_dbg(&link->dev, "%s: smc_close(), status %4.4x.\n",
1264 dev->name, inw(ioaddr + BANK_SELECT)); 1152 dev->name, inw(ioaddr + BANK_SELECT));
1265 1153
1266 netif_stop_queue(dev); 1154 netif_stop_queue(dev);
@@ -1327,7 +1215,7 @@ static void smc_hardware_send_packet(struct net_device * dev)
1327 u_char *buf = skb->data; 1215 u_char *buf = skb->data;
1328 u_int length = skb->len; /* The chip will pad to ethernet min. */ 1216 u_int length = skb->len; /* The chip will pad to ethernet min. */
1329 1217
1330 DEBUG(2, "%s: Trying to xmit packet of length %d.\n", 1218 pr_debug("%s: Trying to xmit packet of length %d.\n",
1331 dev->name, length); 1219 dev->name, length);
1332 1220
1333 /* send the packet length: +6 for status word, length, and ctl */ 1221 /* send the packet length: +6 for status word, length, and ctl */
@@ -1382,7 +1270,7 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
1382 1270
1383 netif_stop_queue(dev); 1271 netif_stop_queue(dev);
1384 1272
1385 DEBUG(2, "%s: smc_start_xmit(length = %d) called," 1273 pr_debug("%s: smc_start_xmit(length = %d) called,"
1386 " status %4.4x.\n", dev->name, skb->len, inw(ioaddr + 2)); 1274 " status %4.4x.\n", dev->name, skb->len, inw(ioaddr + 2));
1387 1275
1388 if (smc->saved_skb) { 1276 if (smc->saved_skb) {
@@ -1429,7 +1317,7 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
1429 } 1317 }
1430 1318
1431 /* Otherwise defer until the Tx-space-allocated interrupt. */ 1319 /* Otherwise defer until the Tx-space-allocated interrupt. */
1432 DEBUG(2, "%s: memory allocation deferred.\n", dev->name); 1320 pr_debug("%s: memory allocation deferred.\n", dev->name);
1433 outw((IM_ALLOC_INT << 8) | (ir & 0xff00), ioaddr + INTERRUPT); 1321 outw((IM_ALLOC_INT << 8) | (ir & 0xff00), ioaddr + INTERRUPT);
1434 spin_unlock_irqrestore(&smc->lock, flags); 1322 spin_unlock_irqrestore(&smc->lock, flags);
1435 1323
@@ -1494,7 +1382,7 @@ static void smc_eph_irq(struct net_device *dev)
1494 1382
1495 SMC_SELECT_BANK(0); 1383 SMC_SELECT_BANK(0);
1496 ephs = inw(ioaddr + EPH); 1384 ephs = inw(ioaddr + EPH);
1497 DEBUG(2, "%s: Ethernet protocol handler interrupt, status" 1385 pr_debug("%s: Ethernet protocol handler interrupt, status"
1498 " %4.4x.\n", dev->name, ephs); 1386 " %4.4x.\n", dev->name, ephs);
1499 /* Could be a counter roll-over warning: update stats. */ 1387 /* Could be a counter roll-over warning: update stats. */
1500 card_stats = inw(ioaddr + COUNTER); 1388 card_stats = inw(ioaddr + COUNTER);
@@ -1534,7 +1422,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
1534 1422
1535 ioaddr = dev->base_addr; 1423 ioaddr = dev->base_addr;
1536 1424
1537 DEBUG(3, "%s: SMC91c92 interrupt %d at %#x.\n", dev->name, 1425 pr_debug("%s: SMC91c92 interrupt %d at %#x.\n", dev->name,
1538 irq, ioaddr); 1426 irq, ioaddr);
1539 1427
1540 spin_lock(&smc->lock); 1428 spin_lock(&smc->lock);
@@ -1543,7 +1431,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
1543 if ((saved_bank & 0xff00) != 0x3300) { 1431 if ((saved_bank & 0xff00) != 0x3300) {
1544 /* The device does not exist -- the card could be off-line, or 1432 /* The device does not exist -- the card could be off-line, or
1545 maybe it has been ejected. */ 1433 maybe it has been ejected. */
1546 DEBUG(1, "%s: SMC91c92 interrupt %d for non-existent" 1434 pr_debug("%s: SMC91c92 interrupt %d for non-existent"
1547 "/ejected device.\n", dev->name, irq); 1435 "/ejected device.\n", dev->name, irq);
1548 handled = 0; 1436 handled = 0;
1549 goto irq_done; 1437 goto irq_done;
@@ -1557,7 +1445,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
1557 1445
1558 do { /* read the status flag, and mask it */ 1446 do { /* read the status flag, and mask it */
1559 status = inw(ioaddr + INTERRUPT) & 0xff; 1447 status = inw(ioaddr + INTERRUPT) & 0xff;
1560 DEBUG(3, "%s: Status is %#2.2x (mask %#2.2x).\n", dev->name, 1448 pr_debug("%s: Status is %#2.2x (mask %#2.2x).\n", dev->name,
1561 status, mask); 1449 status, mask);
1562 if ((status & mask) == 0) { 1450 if ((status & mask) == 0) {
1563 if (bogus_cnt == INTR_WORK) 1451 if (bogus_cnt == INTR_WORK)
@@ -1602,7 +1490,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
1602 smc_eph_irq(dev); 1490 smc_eph_irq(dev);
1603 } while (--bogus_cnt); 1491 } while (--bogus_cnt);
1604 1492
1605 DEBUG(3, " Restoring saved registers mask %2.2x bank %4.4x" 1493 pr_debug(" Restoring saved registers mask %2.2x bank %4.4x"
1606 " pointer %4.4x.\n", mask, saved_bank, saved_pointer); 1494 " pointer %4.4x.\n", mask, saved_bank, saved_pointer);
1607 1495
1608 /* restore state register */ 1496 /* restore state register */
@@ -1610,7 +1498,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
1610 outw(saved_pointer, ioaddr + POINTER); 1498 outw(saved_pointer, ioaddr + POINTER);
1611 SMC_SELECT_BANK(saved_bank); 1499 SMC_SELECT_BANK(saved_bank);
1612 1500
1613 DEBUG(3, "%s: Exiting interrupt IRQ%d.\n", dev->name, irq); 1501 pr_debug("%s: Exiting interrupt IRQ%d.\n", dev->name, irq);
1614 1502
1615irq_done: 1503irq_done:
1616 1504
@@ -1661,7 +1549,7 @@ static void smc_rx(struct net_device *dev)
1661 rx_status = inw(ioaddr + DATA_1); 1549 rx_status = inw(ioaddr + DATA_1);
1662 packet_length = inw(ioaddr + DATA_1) & 0x07ff; 1550 packet_length = inw(ioaddr + DATA_1) & 0x07ff;
1663 1551
1664 DEBUG(2, "%s: Receive status %4.4x length %d.\n", 1552 pr_debug("%s: Receive status %4.4x length %d.\n",
1665 dev->name, rx_status, packet_length); 1553 dev->name, rx_status, packet_length);
1666 1554
1667 if (!(rx_status & RS_ERRORS)) { 1555 if (!(rx_status & RS_ERRORS)) {
@@ -1672,7 +1560,7 @@ static void smc_rx(struct net_device *dev)
1672 skb = dev_alloc_skb(packet_length+2); 1560 skb = dev_alloc_skb(packet_length+2);
1673 1561
1674 if (skb == NULL) { 1562 if (skb == NULL) {
1675 DEBUG(1, "%s: Low memory, packet dropped.\n", dev->name); 1563 pr_debug("%s: Low memory, packet dropped.\n", dev->name);
1676 dev->stats.rx_dropped++; 1564 dev->stats.rx_dropped++;
1677 outw(MC_RELEASE, ioaddr + MMU_CMD); 1565 outw(MC_RELEASE, ioaddr + MMU_CMD);
1678 return; 1566 return;
@@ -1832,7 +1720,7 @@ static void smc_reset(struct net_device *dev)
1832 struct smc_private *smc = netdev_priv(dev); 1720 struct smc_private *smc = netdev_priv(dev);
1833 int i; 1721 int i;
1834 1722
1835 DEBUG(0, "%s: smc91c92 reset called.\n", dev->name); 1723 pr_debug("%s: smc91c92 reset called.\n", dev->name);
1836 1724
1837 /* The first interaction must be a write to bring the chip out 1725 /* The first interaction must be a write to bring the chip out
1838 of sleep mode. */ 1726 of sleep mode. */
@@ -2149,18 +2037,6 @@ static u32 smc_get_link(struct net_device *dev)
2149 return ret; 2037 return ret;
2150} 2038}
2151 2039
2152#ifdef PCMCIA_DEBUG
2153static u32 smc_get_msglevel(struct net_device *dev)
2154{
2155 return pc_debug;
2156}
2157
2158static void smc_set_msglevel(struct net_device *dev, u32 val)
2159{
2160 pc_debug = val;
2161}
2162#endif
2163
2164static int smc_nway_reset(struct net_device *dev) 2040static int smc_nway_reset(struct net_device *dev)
2165{ 2041{
2166 struct smc_private *smc = netdev_priv(dev); 2042 struct smc_private *smc = netdev_priv(dev);
@@ -2184,10 +2060,6 @@ static const struct ethtool_ops ethtool_ops = {
2184 .get_settings = smc_get_settings, 2060 .get_settings = smc_get_settings,
2185 .set_settings = smc_set_settings, 2061 .set_settings = smc_set_settings,
2186 .get_link = smc_get_link, 2062 .get_link = smc_get_link,
2187#ifdef PCMCIA_DEBUG
2188 .get_msglevel = smc_get_msglevel,
2189 .set_msglevel = smc_set_msglevel,
2190#endif
2191 .nway_reset = smc_nway_reset, 2063 .nway_reset = smc_nway_reset,
2192}; 2064};
2193 2065
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index cf8423102538..fe504b7f369f 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -211,20 +211,6 @@ enum xirc_cmd { /* Commands */
211 211
212static const char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" }; 212static const char *if_names[] = { "Auto", "10BaseT", "10Base2", "AUI", "100BaseT" };
213 213
214/****************
215 * All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
216 * you do not define PCMCIA_DEBUG at all, all the debug code will be
217 * left out. If you compile with PCMCIA_DEBUG=0, the debug code will
218 * be present but disabled -- but it can then be enabled for specific
219 * modules at load time with a 'pc_debug=#' option to insmod.
220 */
221#ifdef PCMCIA_DEBUG
222static int pc_debug = PCMCIA_DEBUG;
223module_param(pc_debug, int, 0);
224#define DEBUG(n, args...) if (pc_debug>(n)) printk(KDBG_XIRC args)
225#else
226#define DEBUG(n, args...)
227#endif
228 214
229#define KDBG_XIRC KERN_DEBUG "xirc2ps_cs: " 215#define KDBG_XIRC KERN_DEBUG "xirc2ps_cs: "
230#define KERR_XIRC KERN_ERR "xirc2ps_cs: " 216#define KERR_XIRC KERN_ERR "xirc2ps_cs: "
@@ -359,7 +345,7 @@ static void xirc_tx_timeout(struct net_device *dev);
359static void xirc2ps_tx_timeout_task(struct work_struct *work); 345static void xirc2ps_tx_timeout_task(struct work_struct *work);
360static void set_addresses(struct net_device *dev); 346static void set_addresses(struct net_device *dev);
361static void set_multicast_list(struct net_device *dev); 347static void set_multicast_list(struct net_device *dev);
362static int set_card_type(struct pcmcia_device *link, const void *s); 348static int set_card_type(struct pcmcia_device *link);
363static int do_config(struct net_device *dev, struct ifmap *map); 349static int do_config(struct net_device *dev, struct ifmap *map);
364static int do_open(struct net_device *dev); 350static int do_open(struct net_device *dev);
365static int do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 351static int do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
@@ -371,28 +357,6 @@ static void do_powerdown(struct net_device *dev);
371static int do_stop(struct net_device *dev); 357static int do_stop(struct net_device *dev);
372 358
373/*=============== Helper functions =========================*/ 359/*=============== Helper functions =========================*/
374static int
375first_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
376{
377 int err;
378
379 if ((err = pcmcia_get_first_tuple(handle, tuple)) == 0 &&
380 (err = pcmcia_get_tuple_data(handle, tuple)) == 0)
381 err = pcmcia_parse_tuple(tuple, parse);
382 return err;
383}
384
385static int
386next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
387{
388 int err;
389
390 if ((err = pcmcia_get_next_tuple(handle, tuple)) == 0 &&
391 (err = pcmcia_get_tuple_data(handle, tuple)) == 0)
392 err = pcmcia_parse_tuple(tuple, parse);
393 return err;
394}
395
396#define SelectPage(pgnr) outb((pgnr), ioaddr + XIRCREG_PR) 360#define SelectPage(pgnr) outb((pgnr), ioaddr + XIRCREG_PR)
397#define GetByte(reg) ((unsigned)inb(ioaddr + (reg))) 361#define GetByte(reg) ((unsigned)inb(ioaddr + (reg)))
398#define GetWord(reg) ((unsigned)inw(ioaddr + (reg))) 362#define GetWord(reg) ((unsigned)inw(ioaddr + (reg)))
@@ -400,7 +364,7 @@ next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
400#define PutWord(reg,value) outw((value), ioaddr+(reg)) 364#define PutWord(reg,value) outw((value), ioaddr+(reg))
401 365
402/*====== Functions used for debugging =================================*/ 366/*====== Functions used for debugging =================================*/
403#if defined(PCMCIA_DEBUG) && 0 /* reading regs may change system status */ 367#if 0 /* reading regs may change system status */
404static void 368static void
405PrintRegisters(struct net_device *dev) 369PrintRegisters(struct net_device *dev)
406{ 370{
@@ -432,7 +396,7 @@ PrintRegisters(struct net_device *dev)
432 } 396 }
433 } 397 }
434} 398}
435#endif /* PCMCIA_DEBUG */ 399#endif /* 0 */
436 400
437/*============== MII Management functions ===============*/ 401/*============== MII Management functions ===============*/
438 402
@@ -576,7 +540,7 @@ xirc2ps_probe(struct pcmcia_device *link)
576 struct net_device *dev; 540 struct net_device *dev;
577 local_info_t *local; 541 local_info_t *local;
578 542
579 DEBUG(0, "attach()\n"); 543 dev_dbg(&link->dev, "attach()\n");
580 544
581 /* Allocate the device structure */ 545 /* Allocate the device structure */
582 dev = alloc_etherdev(sizeof(local_info_t)); 546 dev = alloc_etherdev(sizeof(local_info_t));
@@ -592,7 +556,6 @@ xirc2ps_probe(struct pcmcia_device *link)
592 link->conf.IntType = INT_MEMORY_AND_IO; 556 link->conf.IntType = INT_MEMORY_AND_IO;
593 link->conf.ConfigIndex = 1; 557 link->conf.ConfigIndex = 1;
594 link->irq.Handler = xirc2ps_interrupt; 558 link->irq.Handler = xirc2ps_interrupt;
595 link->irq.Instance = dev;
596 559
597 /* Fill in card specific entries */ 560 /* Fill in card specific entries */
598 dev->netdev_ops = &netdev_ops; 561 dev->netdev_ops = &netdev_ops;
@@ -615,7 +578,7 @@ xirc2ps_detach(struct pcmcia_device *link)
615{ 578{
616 struct net_device *dev = link->priv; 579 struct net_device *dev = link->priv;
617 580
618 DEBUG(0, "detach(0x%p)\n", link); 581 dev_dbg(&link->dev, "detach\n");
619 582
620 if (link->dev_node) 583 if (link->dev_node)
621 unregister_netdev(dev); 584 unregister_netdev(dev);
@@ -644,17 +607,25 @@ xirc2ps_detach(struct pcmcia_device *link)
644 * 607 *
645 */ 608 */
646static int 609static int
647set_card_type(struct pcmcia_device *link, const void *s) 610set_card_type(struct pcmcia_device *link)
648{ 611{
649 struct net_device *dev = link->priv; 612 struct net_device *dev = link->priv;
650 local_info_t *local = netdev_priv(dev); 613 local_info_t *local = netdev_priv(dev);
651 #ifdef PCMCIA_DEBUG 614 u8 *buf;
652 unsigned cisrev = ((const unsigned char *)s)[2]; 615 unsigned int cisrev, mediaid, prodid;
653 #endif 616 size_t len;
654 unsigned mediaid= ((const unsigned char *)s)[3]; 617
655 unsigned prodid = ((const unsigned char *)s)[4]; 618 len = pcmcia_get_tuple(link, CISTPL_MANFID, &buf);
619 if (len < 5) {
620 dev_err(&link->dev, "invalid CIS -- sorry\n");
621 return 0;
622 }
656 623
657 DEBUG(0, "cisrev=%02x mediaid=%02x prodid=%02x\n", 624 cisrev = buf[2];
625 mediaid = buf[3];
626 prodid = buf[4];
627
628 dev_dbg(&link->dev, "cisrev=%02x mediaid=%02x prodid=%02x\n",
658 cisrev, mediaid, prodid); 629 cisrev, mediaid, prodid);
659 630
660 local->mohawk = 0; 631 local->mohawk = 0;
@@ -761,6 +732,26 @@ xirc2ps_config_check(struct pcmcia_device *p_dev,
761 732
762} 733}
763 734
735
736static int pcmcia_get_mac_ce(struct pcmcia_device *p_dev,
737 tuple_t *tuple,
738 void *priv)
739{
740 struct net_device *dev = priv;
741 int i;
742
743 if (tuple->TupleDataLen != 13)
744 return -EINVAL;
745 if ((tuple->TupleData[0] != 2) || (tuple->TupleData[1] != 1) ||
746 (tuple->TupleData[2] != 6))
747 return -EINVAL;
748 /* another try (James Lehmer's CE2 version 4.1)*/
749 for (i = 2; i < 6; i++)
750 dev->dev_addr[i] = tuple->TupleData[i+2];
751 return 0;
752};
753
754
764/**************** 755/****************
765 * xirc2ps_config() is scheduled to run after a CARD_INSERTION event 756 * xirc2ps_config() is scheduled to run after a CARD_INSERTION event
766 * is received, to configure the PCMCIA socket, and to make the 757 * is received, to configure the PCMCIA socket, and to make the
@@ -772,33 +763,21 @@ xirc2ps_config(struct pcmcia_device * link)
772 struct net_device *dev = link->priv; 763 struct net_device *dev = link->priv;
773 local_info_t *local = netdev_priv(dev); 764 local_info_t *local = netdev_priv(dev);
774 unsigned int ioaddr; 765 unsigned int ioaddr;
775 tuple_t tuple; 766 int err;
776 cisparse_t parse; 767 u8 *buf;
777 int err, i; 768 size_t len;
778 u_char buf[64];
779 cistpl_lan_node_id_t *node_id = (cistpl_lan_node_id_t*)parse.funce.data;
780 769
781 local->dingo_ccr = NULL; 770 local->dingo_ccr = NULL;
782 771
783 DEBUG(0, "config(0x%p)\n", link); 772 dev_dbg(&link->dev, "config\n");
784
785 /*
786 * This reads the card's CONFIG tuple to find its configuration
787 * registers.
788 */
789 tuple.Attributes = 0;
790 tuple.TupleData = buf;
791 tuple.TupleDataMax = 64;
792 tuple.TupleOffset = 0;
793 773
794 /* Is this a valid card */ 774 /* Is this a valid card */
795 tuple.DesiredTuple = CISTPL_MANFID; 775 if (link->has_manf_id == 0) {
796 if ((err=first_tuple(link, &tuple, &parse))) {
797 printk(KNOT_XIRC "manfid not found in CIS\n"); 776 printk(KNOT_XIRC "manfid not found in CIS\n");
798 goto failure; 777 goto failure;
799 } 778 }
800 779
801 switch(parse.manfid.manf) { 780 switch (link->manf_id) {
802 case MANFID_XIRCOM: 781 case MANFID_XIRCOM:
803 local->manf_str = "Xircom"; 782 local->manf_str = "Xircom";
804 break; 783 break;
@@ -817,65 +796,44 @@ xirc2ps_config(struct pcmcia_device * link)
817 break; 796 break;
818 default: 797 default:
819 printk(KNOT_XIRC "Unknown Card Manufacturer ID: 0x%04x\n", 798 printk(KNOT_XIRC "Unknown Card Manufacturer ID: 0x%04x\n",
820 (unsigned)parse.manfid.manf); 799 (unsigned)link->manf_id);
821 goto failure; 800 goto failure;
822 } 801 }
823 DEBUG(0, "found %s card\n", local->manf_str); 802 dev_dbg(&link->dev, "found %s card\n", local->manf_str);
824 803
825 if (!set_card_type(link, buf)) { 804 if (!set_card_type(link)) {
826 printk(KNOT_XIRC "this card is not supported\n"); 805 printk(KNOT_XIRC "this card is not supported\n");
827 goto failure; 806 goto failure;
828 } 807 }
829 808
830 /* get the ethernet address from the CIS */ 809 /* get the ethernet address from the CIS */
831 tuple.DesiredTuple = CISTPL_FUNCE; 810 err = pcmcia_get_mac_from_cis(link, dev);
832 for (err = first_tuple(link, &tuple, &parse); !err; 811
833 err = next_tuple(link, &tuple, &parse)) { 812 /* not found: try to get the node-id from tuple 0x89 */
834 /* Once I saw two CISTPL_FUNCE_LAN_NODE_ID entries: 813 if (err) {
835 * the first one with a length of zero the second correct - 814 len = pcmcia_get_tuple(link, 0x89, &buf);
836 * so I skip all entries with length 0 */ 815 /* data layout looks like tuple 0x22 */
837 if (parse.funce.type == CISTPL_FUNCE_LAN_NODE_ID 816 if (buf && len == 8) {
838 && ((cistpl_lan_node_id_t *)parse.funce.data)->nb) 817 if (*buf == CISTPL_FUNCE_LAN_NODE_ID) {
839 break; 818 int i;
840 } 819 for (i = 2; i < 6; i++)
841 if (err) { /* not found: try to get the node-id from tuple 0x89 */ 820 dev->dev_addr[i] = buf[i+2];
842 tuple.DesiredTuple = 0x89; /* data layout looks like tuple 0x22 */ 821 } else
843 if ((err = pcmcia_get_first_tuple(link, &tuple)) == 0 && 822 err = -1;
844 (err = pcmcia_get_tuple_data(link, &tuple)) == 0) {
845 if (tuple.TupleDataLen == 8 && *buf == CISTPL_FUNCE_LAN_NODE_ID)
846 memcpy(&parse, buf, 8);
847 else
848 err = -1;
849 }
850 }
851 if (err) { /* another try (James Lehmer's CE2 version 4.1)*/
852 tuple.DesiredTuple = CISTPL_FUNCE;
853 for (err = first_tuple(link, &tuple, &parse); !err;
854 err = next_tuple(link, &tuple, &parse)) {
855 if (parse.funce.type == 0x02 && parse.funce.data[0] == 1
856 && parse.funce.data[1] == 6 && tuple.TupleDataLen == 13) {
857 buf[1] = 4;
858 memcpy(&parse, buf+1, 8);
859 break;
860 } 823 }
861 } 824 kfree(buf);
862 } 825 }
826
827 if (err)
828 err = pcmcia_loop_tuple(link, CISTPL_FUNCE, pcmcia_get_mac_ce, dev);
829
863 if (err) { 830 if (err) {
864 printk(KNOT_XIRC "node-id not found in CIS\n"); 831 printk(KNOT_XIRC "node-id not found in CIS\n");
865 goto failure; 832 goto failure;
866 } 833 }
867 node_id = (cistpl_lan_node_id_t *)parse.funce.data;
868 if (node_id->nb != 6) {
869 printk(KNOT_XIRC "malformed node-id in CIS\n");
870 goto failure;
871 }
872 for (i=0; i < 6; i++)
873 dev->dev_addr[i] = node_id->id[i];
874 834
875 link->io.IOAddrLines =10; 835 link->io.IOAddrLines =10;
876 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; 836 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
877 link->irq.Attributes = IRQ_HANDLE_PRESENT;
878 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
879 if (local->modem) { 837 if (local->modem) {
880 int pass; 838 int pass;
881 839
@@ -916,10 +874,8 @@ xirc2ps_config(struct pcmcia_device * link)
916 goto port_found; 874 goto port_found;
917 } 875 }
918 link->io.BasePort1 = 0; /* let CS decide */ 876 link->io.BasePort1 = 0; /* let CS decide */
919 if ((err=pcmcia_request_io(link, &link->io))) { 877 if ((err=pcmcia_request_io(link, &link->io)))
920 cs_error(link, RequestIO, err);
921 goto config_error; 878 goto config_error;
922 }
923 } 879 }
924 port_found: 880 port_found:
925 if (err) 881 if (err)
@@ -929,19 +885,15 @@ xirc2ps_config(struct pcmcia_device * link)
929 * Now allocate an interrupt line. Note that this does not 885 * Now allocate an interrupt line. Note that this does not
930 * actually assign a handler to the interrupt. 886 * actually assign a handler to the interrupt.
931 */ 887 */
932 if ((err=pcmcia_request_irq(link, &link->irq))) { 888 if ((err=pcmcia_request_irq(link, &link->irq)))
933 cs_error(link, RequestIRQ, err);
934 goto config_error; 889 goto config_error;
935 }
936 890
937 /**************** 891 /****************
938 * This actually configures the PCMCIA socket -- setting up 892 * This actually configures the PCMCIA socket -- setting up
939 * the I/O windows and the interrupt mapping. 893 * the I/O windows and the interrupt mapping.
940 */ 894 */
941 if ((err=pcmcia_request_configuration(link, &link->conf))) { 895 if ((err=pcmcia_request_configuration(link, &link->conf)))
942 cs_error(link, RequestConfiguration, err);
943 goto config_error; 896 goto config_error;
944 }
945 897
946 if (local->dingo) { 898 if (local->dingo) {
947 conf_reg_t reg; 899 conf_reg_t reg;
@@ -956,17 +908,13 @@ xirc2ps_config(struct pcmcia_device * link)
956 reg.Action = CS_WRITE; 908 reg.Action = CS_WRITE;
957 reg.Offset = CISREG_IOBASE_0; 909 reg.Offset = CISREG_IOBASE_0;
958 reg.Value = link->io.BasePort2 & 0xff; 910 reg.Value = link->io.BasePort2 & 0xff;
959 if ((err = pcmcia_access_configuration_register(link, &reg))) { 911 if ((err = pcmcia_access_configuration_register(link, &reg)))
960 cs_error(link, AccessConfigurationRegister, err);
961 goto config_error; 912 goto config_error;
962 }
963 reg.Action = CS_WRITE; 913 reg.Action = CS_WRITE;
964 reg.Offset = CISREG_IOBASE_1; 914 reg.Offset = CISREG_IOBASE_1;
965 reg.Value = (link->io.BasePort2 >> 8) & 0xff; 915 reg.Value = (link->io.BasePort2 >> 8) & 0xff;
966 if ((err = pcmcia_access_configuration_register(link, &reg))) { 916 if ((err = pcmcia_access_configuration_register(link, &reg)))
967 cs_error(link, AccessConfigurationRegister, err);
968 goto config_error; 917 goto config_error;
969 }
970 918
971 /* There is no config entry for the Ethernet part which 919 /* There is no config entry for the Ethernet part which
972 * is at 0x0800. So we allocate a window into the attribute 920 * is at 0x0800. So we allocate a window into the attribute
@@ -975,17 +923,14 @@ xirc2ps_config(struct pcmcia_device * link)
975 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; 923 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
976 req.Base = req.Size = 0; 924 req.Base = req.Size = 0;
977 req.AccessSpeed = 0; 925 req.AccessSpeed = 0;
978 if ((err = pcmcia_request_window(&link, &req, &link->win))) { 926 if ((err = pcmcia_request_window(link, &req, &link->win)))
979 cs_error(link, RequestWindow, err);
980 goto config_error; 927 goto config_error;
981 } 928
982 local->dingo_ccr = ioremap(req.Base,0x1000) + 0x0800; 929 local->dingo_ccr = ioremap(req.Base,0x1000) + 0x0800;
983 mem.CardOffset = 0x0; 930 mem.CardOffset = 0x0;
984 mem.Page = 0; 931 mem.Page = 0;
985 if ((err = pcmcia_map_mem_page(link->win, &mem))) { 932 if ((err = pcmcia_map_mem_page(link, link->win, &mem)))
986 cs_error(link, MapMemPage, err);
987 goto config_error; 933 goto config_error;
988 }
989 934
990 /* Setup the CCRs; there are no infos in the CIS about the Ethernet 935 /* Setup the CCRs; there are no infos in the CIS about the Ethernet
991 * part. 936 * part.
@@ -1044,7 +989,7 @@ xirc2ps_config(struct pcmcia_device * link)
1044 do_reset(dev, 1); /* a kludge to make the cem56 work */ 989 do_reset(dev, 1); /* a kludge to make the cem56 work */
1045 990
1046 link->dev_node = &local->node; 991 link->dev_node = &local->node;
1047 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 992 SET_NETDEV_DEV(dev, &link->dev);
1048 993
1049 if ((err=register_netdev(dev))) { 994 if ((err=register_netdev(dev))) {
1050 printk(KNOT_XIRC "register_netdev() failed\n"); 995 printk(KNOT_XIRC "register_netdev() failed\n");
@@ -1077,7 +1022,7 @@ xirc2ps_config(struct pcmcia_device * link)
1077static void 1022static void
1078xirc2ps_release(struct pcmcia_device *link) 1023xirc2ps_release(struct pcmcia_device *link)
1079{ 1024{
1080 DEBUG(0, "release(0x%p)\n", link); 1025 dev_dbg(&link->dev, "release\n");
1081 1026
1082 if (link->win) { 1027 if (link->win) {
1083 struct net_device *dev = link->priv; 1028 struct net_device *dev = link->priv;
@@ -1144,7 +1089,7 @@ xirc2ps_interrupt(int irq, void *dev_id)
1144 PutByte(XIRCREG_CR, 0); 1089 PutByte(XIRCREG_CR, 0);
1145 } 1090 }
1146 1091
1147 DEBUG(6, "%s: interrupt %d at %#x.\n", dev->name, irq, ioaddr); 1092 pr_debug("%s: interrupt %d at %#x.\n", dev->name, irq, ioaddr);
1148 1093
1149 saved_page = GetByte(XIRCREG_PR); 1094 saved_page = GetByte(XIRCREG_PR);
1150 /* Read the ISR to see whats the cause for the interrupt. 1095 /* Read the ISR to see whats the cause for the interrupt.
@@ -1154,7 +1099,7 @@ xirc2ps_interrupt(int irq, void *dev_id)
1154 bytes_rcvd = 0; 1099 bytes_rcvd = 0;
1155 loop_entry: 1100 loop_entry:
1156 if (int_status == 0xff) { /* card may be ejected */ 1101 if (int_status == 0xff) { /* card may be ejected */
1157 DEBUG(3, "%s: interrupt %d for dead card\n", dev->name, irq); 1102 pr_debug("%s: interrupt %d for dead card\n", dev->name, irq);
1158 goto leave; 1103 goto leave;
1159 } 1104 }
1160 eth_status = GetByte(XIRCREG_ESR); 1105 eth_status = GetByte(XIRCREG_ESR);
@@ -1167,7 +1112,7 @@ xirc2ps_interrupt(int irq, void *dev_id)
1167 PutByte(XIRCREG40_TXST0, 0); 1112 PutByte(XIRCREG40_TXST0, 0);
1168 PutByte(XIRCREG40_TXST1, 0); 1113 PutByte(XIRCREG40_TXST1, 0);
1169 1114
1170 DEBUG(3, "%s: ISR=%#2.2x ESR=%#2.2x RSR=%#2.2x TSR=%#4.4x\n", 1115 pr_debug("%s: ISR=%#2.2x ESR=%#2.2x RSR=%#2.2x TSR=%#4.4x\n",
1171 dev->name, int_status, eth_status, rx_status, tx_status); 1116 dev->name, int_status, eth_status, rx_status, tx_status);
1172 1117
1173 /***** receive section ******/ 1118 /***** receive section ******/
@@ -1178,14 +1123,14 @@ xirc2ps_interrupt(int irq, void *dev_id)
1178 /* too many bytes received during this int, drop the rest of the 1123 /* too many bytes received during this int, drop the rest of the
1179 * packets */ 1124 * packets */
1180 dev->stats.rx_dropped++; 1125 dev->stats.rx_dropped++;
1181 DEBUG(2, "%s: RX drop, too much done\n", dev->name); 1126 pr_debug("%s: RX drop, too much done\n", dev->name);
1182 } else if (rsr & PktRxOk) { 1127 } else if (rsr & PktRxOk) {
1183 struct sk_buff *skb; 1128 struct sk_buff *skb;
1184 1129
1185 pktlen = GetWord(XIRCREG0_RBC); 1130 pktlen = GetWord(XIRCREG0_RBC);
1186 bytes_rcvd += pktlen; 1131 bytes_rcvd += pktlen;
1187 1132
1188 DEBUG(5, "rsr=%#02x packet_length=%u\n", rsr, pktlen); 1133 pr_debug("rsr=%#02x packet_length=%u\n", rsr, pktlen);
1189 1134
1190 skb = dev_alloc_skb(pktlen+3); /* 1 extra so we can use insw */ 1135 skb = dev_alloc_skb(pktlen+3); /* 1 extra so we can use insw */
1191 if (!skb) { 1136 if (!skb) {
@@ -1253,19 +1198,19 @@ xirc2ps_interrupt(int irq, void *dev_id)
1253 dev->stats.multicast++; 1198 dev->stats.multicast++;
1254 } 1199 }
1255 } else { /* bad packet */ 1200 } else { /* bad packet */
1256 DEBUG(5, "rsr=%#02x\n", rsr); 1201 pr_debug("rsr=%#02x\n", rsr);
1257 } 1202 }
1258 if (rsr & PktTooLong) { 1203 if (rsr & PktTooLong) {
1259 dev->stats.rx_frame_errors++; 1204 dev->stats.rx_frame_errors++;
1260 DEBUG(3, "%s: Packet too long\n", dev->name); 1205 pr_debug("%s: Packet too long\n", dev->name);
1261 } 1206 }
1262 if (rsr & CRCErr) { 1207 if (rsr & CRCErr) {
1263 dev->stats.rx_crc_errors++; 1208 dev->stats.rx_crc_errors++;
1264 DEBUG(3, "%s: CRC error\n", dev->name); 1209 pr_debug("%s: CRC error\n", dev->name);
1265 } 1210 }
1266 if (rsr & AlignErr) { 1211 if (rsr & AlignErr) {
1267 dev->stats.rx_fifo_errors++; /* okay ? */ 1212 dev->stats.rx_fifo_errors++; /* okay ? */
1268 DEBUG(3, "%s: Alignment error\n", dev->name); 1213 pr_debug("%s: Alignment error\n", dev->name);
1269 } 1214 }
1270 1215
1271 /* clear the received/dropped/error packet */ 1216 /* clear the received/dropped/error packet */
@@ -1277,7 +1222,7 @@ xirc2ps_interrupt(int irq, void *dev_id)
1277 if (rx_status & 0x10) { /* Receive overrun */ 1222 if (rx_status & 0x10) { /* Receive overrun */
1278 dev->stats.rx_over_errors++; 1223 dev->stats.rx_over_errors++;
1279 PutByte(XIRCREG_CR, ClearRxOvrun); 1224 PutByte(XIRCREG_CR, ClearRxOvrun);
1280 DEBUG(3, "receive overrun cleared\n"); 1225 pr_debug("receive overrun cleared\n");
1281 } 1226 }
1282 1227
1283 /***** transmit section ******/ 1228 /***** transmit section ******/
@@ -1290,13 +1235,13 @@ xirc2ps_interrupt(int irq, void *dev_id)
1290 if (nn < n) /* rollover */ 1235 if (nn < n) /* rollover */
1291 dev->stats.tx_packets += 256 - n; 1236 dev->stats.tx_packets += 256 - n;
1292 else if (n == nn) { /* happens sometimes - don't know why */ 1237 else if (n == nn) { /* happens sometimes - don't know why */
1293 DEBUG(0, "PTR not changed?\n"); 1238 pr_debug("PTR not changed?\n");
1294 } else 1239 } else
1295 dev->stats.tx_packets += lp->last_ptr_value - n; 1240 dev->stats.tx_packets += lp->last_ptr_value - n;
1296 netif_wake_queue(dev); 1241 netif_wake_queue(dev);
1297 } 1242 }
1298 if (tx_status & 0x0002) { /* Execessive collissions */ 1243 if (tx_status & 0x0002) { /* Execessive collissions */
1299 DEBUG(0, "tx restarted due to execssive collissions\n"); 1244 pr_debug("tx restarted due to execssive collissions\n");
1300 PutByte(XIRCREG_CR, RestartTx); /* restart transmitter process */ 1245 PutByte(XIRCREG_CR, RestartTx); /* restart transmitter process */
1301 } 1246 }
1302 if (tx_status & 0x0040) 1247 if (tx_status & 0x0040)
@@ -1315,14 +1260,14 @@ xirc2ps_interrupt(int irq, void *dev_id)
1315 maxrx_bytes = 2000; 1260 maxrx_bytes = 2000;
1316 else if (maxrx_bytes > 22000) 1261 else if (maxrx_bytes > 22000)
1317 maxrx_bytes = 22000; 1262 maxrx_bytes = 22000;
1318 DEBUG(1, "set maxrx=%u (rcvd=%u ticks=%lu)\n", 1263 pr_debug("set maxrx=%u (rcvd=%u ticks=%lu)\n",
1319 maxrx_bytes, bytes_rcvd, duration); 1264 maxrx_bytes, bytes_rcvd, duration);
1320 } else if (!duration && maxrx_bytes < 22000) { 1265 } else if (!duration && maxrx_bytes < 22000) {
1321 /* now much faster */ 1266 /* now much faster */
1322 maxrx_bytes += 2000; 1267 maxrx_bytes += 2000;
1323 if (maxrx_bytes > 22000) 1268 if (maxrx_bytes > 22000)
1324 maxrx_bytes = 22000; 1269 maxrx_bytes = 22000;
1325 DEBUG(1, "set maxrx=%u\n", maxrx_bytes); 1270 pr_debug("set maxrx=%u\n", maxrx_bytes);
1326 } 1271 }
1327 } 1272 }
1328 1273
@@ -1372,7 +1317,7 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev)
1372 unsigned freespace; 1317 unsigned freespace;
1373 unsigned pktlen = skb->len; 1318 unsigned pktlen = skb->len;
1374 1319
1375 DEBUG(1, "do_start_xmit(skb=%p, dev=%p) len=%u\n", 1320 pr_debug("do_start_xmit(skb=%p, dev=%p) len=%u\n",
1376 skb, dev, pktlen); 1321 skb, dev, pktlen);
1377 1322
1378 1323
@@ -1398,7 +1343,7 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev)
1398 freespace &= 0x7fff; 1343 freespace &= 0x7fff;
1399 /* TRS doesn't work - (indeed it is eliminated with sil-rev 1) */ 1344 /* TRS doesn't work - (indeed it is eliminated with sil-rev 1) */
1400 okay = pktlen +2 < freespace; 1345 okay = pktlen +2 < freespace;
1401 DEBUG(2 + (okay ? 2 : 0), "%s: avail. tx space=%u%s\n", 1346 pr_debug("%s: avail. tx space=%u%s\n",
1402 dev->name, freespace, okay ? " (okay)":" (not enough)"); 1347 dev->name, freespace, okay ? " (okay)":" (not enough)");
1403 if (!okay) { /* not enough space */ 1348 if (!okay) { /* not enough space */
1404 return NETDEV_TX_BUSY; /* upper layer may decide to requeue this packet */ 1349 return NETDEV_TX_BUSY; /* upper layer may decide to requeue this packet */
@@ -1500,7 +1445,7 @@ do_config(struct net_device *dev, struct ifmap *map)
1500{ 1445{
1501 local_info_t *local = netdev_priv(dev); 1446 local_info_t *local = netdev_priv(dev);
1502 1447
1503 DEBUG(0, "do_config(%p)\n", dev); 1448 pr_debug("do_config(%p)\n", dev);
1504 if (map->port != 255 && map->port != dev->if_port) { 1449 if (map->port != 255 && map->port != dev->if_port) {
1505 if (map->port > 4) 1450 if (map->port > 4)
1506 return -EINVAL; 1451 return -EINVAL;
@@ -1527,7 +1472,7 @@ do_open(struct net_device *dev)
1527 local_info_t *lp = netdev_priv(dev); 1472 local_info_t *lp = netdev_priv(dev);
1528 struct pcmcia_device *link = lp->p_dev; 1473 struct pcmcia_device *link = lp->p_dev;
1529 1474
1530 DEBUG(0, "do_open(%p)\n", dev); 1475 dev_dbg(&link->dev, "do_open(%p)\n", dev);
1531 1476
1532 /* Check that the PCMCIA card is still here. */ 1477 /* Check that the PCMCIA card is still here. */
1533 /* Physical device present signature. */ 1478 /* Physical device present signature. */
@@ -1561,7 +1506,7 @@ do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1561 unsigned int ioaddr = dev->base_addr; 1506 unsigned int ioaddr = dev->base_addr;
1562 struct mii_ioctl_data *data = if_mii(rq); 1507 struct mii_ioctl_data *data = if_mii(rq);
1563 1508
1564 DEBUG(1, "%s: ioctl(%-.6s, %#04x) %04x %04x %04x %04x\n", 1509 pr_debug("%s: ioctl(%-.6s, %#04x) %04x %04x %04x %04x\n",
1565 dev->name, rq->ifr_ifrn.ifrn_name, cmd, 1510 dev->name, rq->ifr_ifrn.ifrn_name, cmd,
1566 data->phy_id, data->reg_num, data->val_in, data->val_out); 1511 data->phy_id, data->reg_num, data->val_in, data->val_out);
1567 1512
@@ -1610,7 +1555,7 @@ do_reset(struct net_device *dev, int full)
1610 unsigned int ioaddr = dev->base_addr; 1555 unsigned int ioaddr = dev->base_addr;
1611 unsigned value; 1556 unsigned value;
1612 1557
1613 DEBUG(0, "%s: do_reset(%p,%d)\n", dev? dev->name:"eth?", dev, full); 1558 pr_debug("%s: do_reset(%p,%d)\n", dev? dev->name:"eth?", dev, full);
1614 1559
1615 hardreset(dev); 1560 hardreset(dev);
1616 PutByte(XIRCREG_CR, SoftReset); /* set */ 1561 PutByte(XIRCREG_CR, SoftReset); /* set */
@@ -1648,8 +1593,8 @@ do_reset(struct net_device *dev, int full)
1648 } 1593 }
1649 msleep(40); /* wait 40 msec to let it complete */ 1594 msleep(40); /* wait 40 msec to let it complete */
1650 1595
1651 #ifdef PCMCIA_DEBUG 1596 #if 0
1652 if (pc_debug) { 1597 {
1653 SelectPage(0); 1598 SelectPage(0);
1654 value = GetByte(XIRCREG_ESR); /* read the ESR */ 1599 value = GetByte(XIRCREG_ESR); /* read the ESR */
1655 printk(KERN_DEBUG "%s: ESR is: %#02x\n", dev->name, value); 1600 printk(KERN_DEBUG "%s: ESR is: %#02x\n", dev->name, value);
@@ -1666,7 +1611,7 @@ do_reset(struct net_device *dev, int full)
1666 value |= DisableLinkPulse; 1611 value |= DisableLinkPulse;
1667 PutByte(XIRCREG1_ECR, value); 1612 PutByte(XIRCREG1_ECR, value);
1668 #endif 1613 #endif
1669 DEBUG(0, "%s: ECR is: %#02x\n", dev->name, value); 1614 pr_debug("%s: ECR is: %#02x\n", dev->name, value);
1670 1615
1671 SelectPage(0x42); 1616 SelectPage(0x42);
1672 PutByte(XIRCREG42_SWC0, 0x20); /* disable source insertion */ 1617 PutByte(XIRCREG42_SWC0, 0x20); /* disable source insertion */
@@ -1844,7 +1789,7 @@ do_powerdown(struct net_device *dev)
1844 1789
1845 unsigned int ioaddr = dev->base_addr; 1790 unsigned int ioaddr = dev->base_addr;
1846 1791
1847 DEBUG(0, "do_powerdown(%p)\n", dev); 1792 pr_debug("do_powerdown(%p)\n", dev);
1848 1793
1849 SelectPage(4); 1794 SelectPage(4);
1850 PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */ 1795 PutByte(XIRCREG4_GPR1, 0); /* clear bit 0: power down */
@@ -1858,7 +1803,7 @@ do_stop(struct net_device *dev)
1858 local_info_t *lp = netdev_priv(dev); 1803 local_info_t *lp = netdev_priv(dev);
1859 struct pcmcia_device *link = lp->p_dev; 1804 struct pcmcia_device *link = lp->p_dev;
1860 1805
1861 DEBUG(0, "do_stop(%p)\n", dev); 1806 dev_dbg(&link->dev, "do_stop(%p)\n", dev);
1862 1807
1863 if (!link) 1808 if (!link)
1864 return -ENODEV; 1809 return -ENODEV;
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 8659d341e769..35897134a5dd 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -139,7 +139,7 @@ out:
139 return NULL; 139 return NULL;
140} 140}
141 141
142static void __devinit mdio_gpio_bus_deinit(struct device *dev) 142static void mdio_gpio_bus_deinit(struct device *dev)
143{ 143{
144 struct mii_bus *bus = dev_get_drvdata(dev); 144 struct mii_bus *bus = dev_get_drvdata(dev);
145 struct mdio_gpio_info *bitbang = bus->priv; 145 struct mdio_gpio_info *bitbang = bus->priv;
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 9bf2a6be9031..965adb6174c3 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1944,8 +1944,15 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1944 } 1944 }
1945 1945
1946 /* Pull completed packets off the queue and receive them. */ 1946 /* Pull completed packets off the queue and receive them. */
1947 while ((skb = ppp_mp_reconstruct(ppp))) 1947 while ((skb = ppp_mp_reconstruct(ppp))) {
1948 ppp_receive_nonmp_frame(ppp, skb); 1948 if (pskb_may_pull(skb, 2))
1949 ppp_receive_nonmp_frame(ppp, skb);
1950 else {
1951 ++ppp->dev->stats.rx_length_errors;
1952 kfree_skb(skb);
1953 ppp_receive_error(ppp);
1954 }
1955 }
1949 1956
1950 return; 1957 return;
1951 1958
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 7dfcb58b0eb4..8b14c6eda7c3 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -1085,7 +1085,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1085 int bar = 0; 1085 int bar = 0;
1086 u16 *adrp; 1086 u16 *adrp;
1087 1087
1088 printk(KERN_INFO "%s\n", version); 1088 printk("%s\n", version);
1089 1089
1090 err = pci_enable_device(pdev); 1090 err = pci_enable_device(pdev);
1091 if (err) 1091 if (err)
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index fa4935678488..0fe2fc90f207 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -3235,6 +3235,10 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
3235 flush_scheduled_work(); 3235 flush_scheduled_work();
3236 3236
3237 unregister_netdev(dev); 3237 unregister_netdev(dev);
3238
3239 /* restore original MAC address */
3240 rtl_rar_set(tp, dev->perm_addr);
3241
3238 rtl_disable_msi(pdev, tp); 3242 rtl_disable_msi(pdev, tp);
3239 rtl8169_release_board(pdev, dev, tp->mmio_addr); 3243 rtl8169_release_board(pdev, dev, tp->mmio_addr);
3240 pci_set_drvdata(pdev, NULL); 3244 pci_set_drvdata(pdev, NULL);
@@ -3243,9 +3247,9 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
3243static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, 3247static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
3244 struct net_device *dev) 3248 struct net_device *dev)
3245{ 3249{
3246 unsigned int mtu = dev->mtu; 3250 unsigned int max_frame = dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
3247 3251
3248 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE; 3252 tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
3249} 3253}
3250 3254
3251static int rtl8169_open(struct net_device *dev) 3255static int rtl8169_open(struct net_device *dev)
@@ -4881,6 +4885,9 @@ static void rtl_shutdown(struct pci_dev *pdev)
4881 4885
4882 rtl8169_net_suspend(dev); 4886 rtl8169_net_suspend(dev);
4883 4887
4888 /* restore original MAC address */
4889 rtl_rar_set(tp, dev->perm_addr);
4890
4884 spin_lock_irq(&tp->lock); 4891 spin_lock_irq(&tp->lock);
4885 4892
4886 rtl8169_asic_down(ioaddr); 4893 rtl8169_asic_down(ioaddr);
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index ddccf5fa56b6..0dd7839322bc 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -3494,6 +3494,7 @@ static void s2io_reset(struct s2io_nic *sp)
3494 3494
3495 /* Restore the PCI state saved during initialization. */ 3495 /* Restore the PCI state saved during initialization. */
3496 pci_restore_state(sp->pdev); 3496 pci_restore_state(sp->pdev);
3497 pci_save_state(sp->pdev);
3497 pci_read_config_word(sp->pdev, 0x2, &val16); 3498 pci_read_config_word(sp->pdev, 0x2, &val16);
3498 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID) 3499 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3499 break; 3500 break;
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 05c91ee6921e..f12206bdbb75 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -2283,7 +2283,7 @@ static int __devinit smc_drv_probe(struct platform_device *pdev)
2283 2283
2284 ndev->irq = ires->start; 2284 ndev->irq = ires->start;
2285 2285
2286 if (ires->flags & IRQF_TRIGGER_MASK) 2286 if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
2287 irq_flags = ires->flags & IRQF_TRIGGER_MASK; 2287 irq_flags = ires->flags & IRQF_TRIGGER_MASK;
2288 2288
2289 ret = smc_request_attrib(pdev, ndev); 2289 ret = smc_request_attrib(pdev, ndev);
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index ccdd196f5297..f9cdcbcb77d4 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -986,7 +986,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
986 struct net_device *dev = pdata->dev; 986 struct net_device *dev = pdata->dev;
987 int npackets = 0; 987 int npackets = 0;
988 988
989 while (likely(netif_running(dev)) && (npackets < budget)) { 989 while (npackets < budget) {
990 unsigned int pktlength; 990 unsigned int pktlength;
991 unsigned int pktwords; 991 unsigned int pktwords;
992 struct sk_buff *skb; 992 struct sk_buff *skb;
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index b4909a2dec66..0f7909276237 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -252,6 +252,9 @@ static int smsc9420_ethtool_get_settings(struct net_device *dev,
252{ 252{
253 struct smsc9420_pdata *pd = netdev_priv(dev); 253 struct smsc9420_pdata *pd = netdev_priv(dev);
254 254
255 if (!pd->phy_dev)
256 return -ENODEV;
257
255 cmd->maxtxpkt = 1; 258 cmd->maxtxpkt = 1;
256 cmd->maxrxpkt = 1; 259 cmd->maxrxpkt = 1;
257 return phy_ethtool_gset(pd->phy_dev, cmd); 260 return phy_ethtool_gset(pd->phy_dev, cmd);
@@ -262,6 +265,9 @@ static int smsc9420_ethtool_set_settings(struct net_device *dev,
262{ 265{
263 struct smsc9420_pdata *pd = netdev_priv(dev); 266 struct smsc9420_pdata *pd = netdev_priv(dev);
264 267
268 if (!pd->phy_dev)
269 return -ENODEV;
270
265 return phy_ethtool_sset(pd->phy_dev, cmd); 271 return phy_ethtool_sset(pd->phy_dev, cmd);
266} 272}
267 273
@@ -290,6 +296,10 @@ static void smsc9420_ethtool_set_msglevel(struct net_device *netdev, u32 data)
290static int smsc9420_ethtool_nway_reset(struct net_device *netdev) 296static int smsc9420_ethtool_nway_reset(struct net_device *netdev)
291{ 297{
292 struct smsc9420_pdata *pd = netdev_priv(netdev); 298 struct smsc9420_pdata *pd = netdev_priv(netdev);
299
300 if (!pd->phy_dev)
301 return -ENODEV;
302
293 return phy_start_aneg(pd->phy_dev); 303 return phy_start_aneg(pd->phy_dev);
294} 304}
295 305
@@ -312,6 +322,10 @@ smsc9420_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
312 for (i = 0; i < 0x100; i += (sizeof(u32))) 322 for (i = 0; i < 0x100; i += (sizeof(u32)))
313 data[j++] = smsc9420_reg_read(pd, i); 323 data[j++] = smsc9420_reg_read(pd, i);
314 324
325 // cannot read phy registers if the net device is down
326 if (!phy_dev)
327 return;
328
315 for (i = 0; i <= 31; i++) 329 for (i = 0; i <= 31; i++)
316 data[j++] = smsc9420_mii_read(phy_dev->bus, phy_dev->addr, i); 330 data[j++] = smsc9420_mii_read(phy_dev->bus, phy_dev->addr, i);
317} 331}
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index c2f14dc9ba28..9542995ba667 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -416,13 +416,8 @@ static void init_dma_desc_rings(struct net_device *dev)
416 unsigned int txsize = priv->dma_tx_size; 416 unsigned int txsize = priv->dma_tx_size;
417 unsigned int rxsize = priv->dma_rx_size; 417 unsigned int rxsize = priv->dma_rx_size;
418 unsigned int bfsize = priv->dma_buf_sz; 418 unsigned int bfsize = priv->dma_buf_sz;
419 int buff2_needed = 0; 419 int buff2_needed = 0, dis_ic = 0;
420 int dis_ic = 0;
421 420
422#ifdef CONFIG_STMMAC_TIMER
423 /* Using Timers disable interrupts on completion for the reception */
424 dis_ic = 1;
425#endif
426 /* Set the Buffer size according to the MTU; 421 /* Set the Buffer size according to the MTU;
427 * indeed, in case of jumbo we need to bump-up the buffer sizes. 422 * indeed, in case of jumbo we need to bump-up the buffer sizes.
428 */ 423 */
@@ -437,6 +432,11 @@ static void init_dma_desc_rings(struct net_device *dev)
437 else 432 else
438 bfsize = DMA_BUFFER_SIZE; 433 bfsize = DMA_BUFFER_SIZE;
439 434
435#ifdef CONFIG_STMMAC_TIMER
436 /* Disable interrupts on completion for the reception if timer is on */
437 if (likely(priv->tm->enable))
438 dis_ic = 1;
439#endif
440 /* If the MTU exceeds 8k so use the second buffer in the chain */ 440 /* If the MTU exceeds 8k so use the second buffer in the chain */
441 if (bfsize >= BUF_SIZE_8KiB) 441 if (bfsize >= BUF_SIZE_8KiB)
442 buff2_needed = 1; 442 buff2_needed = 1;
@@ -809,20 +809,22 @@ static void stmmac_tx(struct stmmac_priv *priv)
809 809
810static inline void stmmac_enable_irq(struct stmmac_priv *priv) 810static inline void stmmac_enable_irq(struct stmmac_priv *priv)
811{ 811{
812#ifndef CONFIG_STMMAC_TIMER 812#ifdef CONFIG_STMMAC_TIMER
813 writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA); 813 if (likely(priv->tm->enable))
814#else 814 priv->tm->timer_start(tmrate);
815 priv->tm->timer_start(tmrate); 815 else
816#endif 816#endif
817 writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA);
817} 818}
818 819
819static inline void stmmac_disable_irq(struct stmmac_priv *priv) 820static inline void stmmac_disable_irq(struct stmmac_priv *priv)
820{ 821{
821#ifndef CONFIG_STMMAC_TIMER 822#ifdef CONFIG_STMMAC_TIMER
822 writel(0, priv->dev->base_addr + DMA_INTR_ENA); 823 if (likely(priv->tm->enable))
823#else 824 priv->tm->timer_stop();
824 priv->tm->timer_stop(); 825 else
825#endif 826#endif
827 writel(0, priv->dev->base_addr + DMA_INTR_ENA);
826} 828}
827 829
828static int stmmac_has_work(struct stmmac_priv *priv) 830static int stmmac_has_work(struct stmmac_priv *priv)
@@ -1031,22 +1033,23 @@ static int stmmac_open(struct net_device *dev)
1031 } 1033 }
1032 1034
1033#ifdef CONFIG_STMMAC_TIMER 1035#ifdef CONFIG_STMMAC_TIMER
1034 priv->tm = kmalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); 1036 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
1035 if (unlikely(priv->tm == NULL)) { 1037 if (unlikely(priv->tm == NULL)) {
1036 pr_err("%s: ERROR: timer memory alloc failed \n", __func__); 1038 pr_err("%s: ERROR: timer memory alloc failed \n", __func__);
1037 return -ENOMEM; 1039 return -ENOMEM;
1038 } 1040 }
1039 priv->tm->freq = tmrate; 1041 priv->tm->freq = tmrate;
1040 1042
1041 /* Test if the HW timer can be actually used. 1043 /* Test if the external timer can be actually used.
1042 * In case of failure continue with no timer. */ 1044 * In case of failure continue without timer. */
1043 if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) { 1045 if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) {
1044 pr_warning("stmmaceth: cannot attach the HW timer\n"); 1046 pr_warning("stmmaceth: cannot attach the external timer.\n");
1045 tmrate = 0; 1047 tmrate = 0;
1046 priv->tm->freq = 0; 1048 priv->tm->freq = 0;
1047 priv->tm->timer_start = stmmac_no_timer_started; 1049 priv->tm->timer_start = stmmac_no_timer_started;
1048 priv->tm->timer_stop = stmmac_no_timer_stopped; 1050 priv->tm->timer_stop = stmmac_no_timer_stopped;
1049 } 1051 } else
1052 priv->tm->enable = 1;
1050#endif 1053#endif
1051 1054
1052 /* Create and initialize the TX/RX descriptors chains. */ 1055 /* Create and initialize the TX/RX descriptors chains. */
@@ -1322,9 +1325,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1322 1325
1323 /* Interrupt on completition only for the latest segment */ 1326 /* Interrupt on completition only for the latest segment */
1324 priv->mac_type->ops->close_tx_desc(desc); 1327 priv->mac_type->ops->close_tx_desc(desc);
1328
1325#ifdef CONFIG_STMMAC_TIMER 1329#ifdef CONFIG_STMMAC_TIMER
1326 /* Clean IC while using timers */ 1330 /* Clean IC while using timer */
1327 priv->mac_type->ops->clear_tx_ic(desc); 1331 if (likely(priv->tm->enable))
1332 priv->mac_type->ops->clear_tx_ic(desc);
1328#endif 1333#endif
1329 /* To avoid raise condition */ 1334 /* To avoid raise condition */
1330 priv->mac_type->ops->set_tx_owner(first); 1335 priv->mac_type->ops->set_tx_owner(first);
@@ -2028,7 +2033,8 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
2028 2033
2029#ifdef CONFIG_STMMAC_TIMER 2034#ifdef CONFIG_STMMAC_TIMER
2030 priv->tm->timer_stop(); 2035 priv->tm->timer_stop();
2031 dis_ic = 1; 2036 if (likely(priv->tm->enable))
2037 dis_ic = 1;
2032#endif 2038#endif
2033 napi_disable(&priv->napi); 2039 napi_disable(&priv->napi);
2034 2040
diff --git a/drivers/net/stmmac/stmmac_timer.c b/drivers/net/stmmac/stmmac_timer.c
index b838c6582077..679f61ffb1f8 100644
--- a/drivers/net/stmmac/stmmac_timer.c
+++ b/drivers/net/stmmac/stmmac_timer.c
@@ -63,7 +63,7 @@ int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
63 63
64 stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE); 64 stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
65 if (stmmac_rtc == NULL) { 65 if (stmmac_rtc == NULL) {
66 pr_error("open rtc device failed\n"); 66 pr_err("open rtc device failed\n");
67 return -ENODEV; 67 return -ENODEV;
68 } 68 }
69 69
@@ -71,7 +71,7 @@ int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
71 71
72 /* Periodic mode is not supported */ 72 /* Periodic mode is not supported */
73 if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) { 73 if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) {
74 pr_error("set periodic failed\n"); 74 pr_err("set periodic failed\n");
75 rtc_irq_unregister(stmmac_rtc, &stmmac_task); 75 rtc_irq_unregister(stmmac_rtc, &stmmac_task);
76 rtc_class_close(stmmac_rtc); 76 rtc_class_close(stmmac_rtc);
77 return -1; 77 return -1;
diff --git a/drivers/net/stmmac/stmmac_timer.h b/drivers/net/stmmac/stmmac_timer.h
index f795cae33725..6863590d184b 100644
--- a/drivers/net/stmmac/stmmac_timer.h
+++ b/drivers/net/stmmac/stmmac_timer.h
@@ -26,6 +26,7 @@ struct stmmac_timer {
26 void (*timer_start) (unsigned int new_freq); 26 void (*timer_start) (unsigned int new_freq);
27 void (*timer_stop) (void); 27 void (*timer_stop) (void);
28 unsigned int freq; 28 unsigned int freq;
29 unsigned int enable;
29}; 30};
30 31
31/* Open the HW timer device and return 0 in case of success */ 32/* Open the HW timer device and return 0 in case of success */
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 7019a0d1a82b..61640b99b705 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -2063,7 +2063,15 @@ static int gem_check_invariants(struct gem *gp)
2063 mif_cfg &= ~MIF_CFG_PSELECT; 2063 mif_cfg &= ~MIF_CFG_PSELECT;
2064 writel(mif_cfg, gp->regs + MIF_CFG); 2064 writel(mif_cfg, gp->regs + MIF_CFG);
2065 } else { 2065 } else {
2066 gp->phy_type = phy_serialink; 2066#ifdef CONFIG_SPARC
2067 const char *p;
2068
2069 p = of_get_property(gp->of_node, "shared-pins", NULL);
2070 if (p && !strcmp(p, "serdes"))
2071 gp->phy_type = phy_serdes;
2072 else
2073#endif
2074 gp->phy_type = phy_serialink;
2067 } 2075 }
2068 if (gp->phy_type == phy_mii_mdio1 || 2076 if (gp->phy_type == phy_mii_mdio1 ||
2069 gp->phy_type == phy_mii_mdio0) { 2077 gp->phy_type == phy_mii_mdio0) {
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index fa4e58196c21..43bc3fcc0d85 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -378,7 +378,7 @@ static void dbg_dump(int line_count, const char *func_name, unsigned char *buf,
378} 378}
379 379
380#define DUMP(buf_, len_) \ 380#define DUMP(buf_, len_) \
381 dbg_dump(__LINE__, __func__, buf_, len_) 381 dbg_dump(__LINE__, __func__, (unsigned char *)buf_, len_)
382 382
383#define DUMP1(buf_, len_) \ 383#define DUMP1(buf_, len_) \
384 do { \ 384 do { \
@@ -1363,7 +1363,7 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
1363 /* reset the rts and dtr */ 1363 /* reset the rts and dtr */
1364 /* do the actual close */ 1364 /* do the actual close */
1365 serial->open_count--; 1365 serial->open_count--;
1366 kref_put(&serial->parent->ref, hso_serial_ref_free); 1366
1367 if (serial->open_count <= 0) { 1367 if (serial->open_count <= 0) {
1368 serial->open_count = 0; 1368 serial->open_count = 0;
1369 spin_lock_irq(&serial->serial_lock); 1369 spin_lock_irq(&serial->serial_lock);
@@ -1383,6 +1383,8 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
1383 usb_autopm_put_interface(serial->parent->interface); 1383 usb_autopm_put_interface(serial->parent->interface);
1384 1384
1385 mutex_unlock(&serial->parent->mutex); 1385 mutex_unlock(&serial->parent->mutex);
1386
1387 kref_put(&serial->parent->ref, hso_serial_ref_free);
1386} 1388}
1387 1389
1388/* close the requested serial port */ 1390/* close the requested serial port */
@@ -1527,7 +1529,7 @@ static void tiocmget_intr_callback(struct urb *urb)
1527 dev_warn(&usb->dev, 1529 dev_warn(&usb->dev,
1528 "hso received invalid serial state notification\n"); 1530 "hso received invalid serial state notification\n");
1529 DUMP(serial_state_notification, 1531 DUMP(serial_state_notification,
1530 sizeof(hso_serial_state_notifation)) 1532 sizeof(struct hso_serial_state_notification));
1531 } else { 1533 } else {
1532 1534
1533 UART_state_bitmap = le16_to_cpu(serial_state_notification-> 1535 UART_state_bitmap = le16_to_cpu(serial_state_notification->
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index ade5b344f75d..52af5017c46b 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -210,32 +210,29 @@ rx_drop:
210static struct net_device_stats *veth_get_stats(struct net_device *dev) 210static struct net_device_stats *veth_get_stats(struct net_device *dev)
211{ 211{
212 struct veth_priv *priv; 212 struct veth_priv *priv;
213 struct net_device_stats *dev_stats;
214 int cpu; 213 int cpu;
215 struct veth_net_stats *stats; 214 struct veth_net_stats *stats, total = {0};
216 215
217 priv = netdev_priv(dev); 216 priv = netdev_priv(dev);
218 dev_stats = &dev->stats;
219
220 dev_stats->rx_packets = 0;
221 dev_stats->tx_packets = 0;
222 dev_stats->rx_bytes = 0;
223 dev_stats->tx_bytes = 0;
224 dev_stats->tx_dropped = 0;
225 dev_stats->rx_dropped = 0;
226 217
227 for_each_online_cpu(cpu) { 218 for_each_possible_cpu(cpu) {
228 stats = per_cpu_ptr(priv->stats, cpu); 219 stats = per_cpu_ptr(priv->stats, cpu);
229 220
230 dev_stats->rx_packets += stats->rx_packets; 221 total.rx_packets += stats->rx_packets;
231 dev_stats->tx_packets += stats->tx_packets; 222 total.tx_packets += stats->tx_packets;
232 dev_stats->rx_bytes += stats->rx_bytes; 223 total.rx_bytes += stats->rx_bytes;
233 dev_stats->tx_bytes += stats->tx_bytes; 224 total.tx_bytes += stats->tx_bytes;
234 dev_stats->tx_dropped += stats->tx_dropped; 225 total.tx_dropped += stats->tx_dropped;
235 dev_stats->rx_dropped += stats->rx_dropped; 226 total.rx_dropped += stats->rx_dropped;
236 } 227 }
237 228 dev->stats.rx_packets = total.rx_packets;
238 return dev_stats; 229 dev->stats.tx_packets = total.tx_packets;
230 dev->stats.rx_bytes = total.rx_bytes;
231 dev->stats.tx_bytes = total.tx_bytes;
232 dev->stats.tx_dropped = total.tx_dropped;
233 dev->stats.rx_dropped = total.rx_dropped;
234
235 return &dev->stats;
239} 236}
240 237
241static int veth_open(struct net_device *dev) 238static int veth_open(struct net_device *dev)
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index e2c33c06190b..8e25ca7080c7 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -907,6 +907,7 @@ static ssize_t cosa_write(struct file *file,
907 current->state = TASK_RUNNING; 907 current->state = TASK_RUNNING;
908 chan->tx_status = 1; 908 chan->tx_status = 1;
909 spin_unlock_irqrestore(&cosa->lock, flags); 909 spin_unlock_irqrestore(&cosa->lock, flags);
910 up(&chan->wsem);
910 return -ERESTARTSYS; 911 return -ERESTARTSYS;
911 } 912 }
912 } 913 }
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index d0593ed9170e..f6036fb42319 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -43,21 +43,6 @@
43 43
44#include "airo.h" 44#include "airo.h"
45 45
46/*
47 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
48 you do not define PCMCIA_DEBUG at all, all the debug code will be
49 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
50 be present but disabled -- but it can then be enabled for specific
51 modules at load time with a 'pc_debug=#' option to insmod.
52*/
53#ifdef PCMCIA_DEBUG
54static int pc_debug = PCMCIA_DEBUG;
55module_param(pc_debug, int, 0);
56static char *version = "$Revision: 1.2 $";
57#define DEBUG(n, args...) if (pc_debug > (n)) printk(KERN_DEBUG args);
58#else
59#define DEBUG(n, args...)
60#endif
61 46
62/*====================================================================*/ 47/*====================================================================*/
63 48
@@ -145,11 +130,10 @@ static int airo_probe(struct pcmcia_device *p_dev)
145{ 130{
146 local_info_t *local; 131 local_info_t *local;
147 132
148 DEBUG(0, "airo_attach()\n"); 133 dev_dbg(&p_dev->dev, "airo_attach()\n");
149 134
150 /* Interrupt setup */ 135 /* Interrupt setup */
151 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 136 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
152 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
153 p_dev->irq.Handler = NULL; 137 p_dev->irq.Handler = NULL;
154 138
155 /* 139 /*
@@ -184,7 +168,7 @@ static int airo_probe(struct pcmcia_device *p_dev)
184 168
185static void airo_detach(struct pcmcia_device *link) 169static void airo_detach(struct pcmcia_device *link)
186{ 170{
187 DEBUG(0, "airo_detach(0x%p)\n", link); 171 dev_dbg(&link->dev, "airo_detach\n");
188 172
189 airo_release(link); 173 airo_release(link);
190 174
@@ -204,9 +188,6 @@ static void airo_detach(struct pcmcia_device *link)
204 188
205 ======================================================================*/ 189 ======================================================================*/
206 190
207#define CS_CHECK(fn, ret) \
208do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
209
210static int airo_cs_config_check(struct pcmcia_device *p_dev, 191static int airo_cs_config_check(struct pcmcia_device *p_dev,
211 cistpl_cftable_entry_t *cfg, 192 cistpl_cftable_entry_t *cfg,
212 cistpl_cftable_entry_t *dflt, 193 cistpl_cftable_entry_t *dflt,
@@ -275,11 +256,11 @@ static int airo_cs_config_check(struct pcmcia_device *p_dev,
275 req->Base = mem->win[0].host_addr; 256 req->Base = mem->win[0].host_addr;
276 req->Size = mem->win[0].len; 257 req->Size = mem->win[0].len;
277 req->AccessSpeed = 0; 258 req->AccessSpeed = 0;
278 if (pcmcia_request_window(&p_dev, req, &p_dev->win) != 0) 259 if (pcmcia_request_window(p_dev, req, &p_dev->win) != 0)
279 return -ENODEV; 260 return -ENODEV;
280 map.Page = 0; 261 map.Page = 0;
281 map.CardOffset = mem->win[0].card_addr; 262 map.CardOffset = mem->win[0].card_addr;
282 if (pcmcia_map_mem_page(p_dev->win, &map) != 0) 263 if (pcmcia_map_mem_page(p_dev, p_dev->win, &map) != 0)
283 return -ENODEV; 264 return -ENODEV;
284 } 265 }
285 /* If we got this far, we're cool! */ 266 /* If we got this far, we're cool! */
@@ -291,11 +272,11 @@ static int airo_config(struct pcmcia_device *link)
291{ 272{
292 local_info_t *dev; 273 local_info_t *dev;
293 win_req_t *req; 274 win_req_t *req;
294 int last_fn, last_ret; 275 int ret;
295 276
296 dev = link->priv; 277 dev = link->priv;
297 278
298 DEBUG(0, "airo_config(0x%p)\n", link); 279 dev_dbg(&link->dev, "airo_config\n");
299 280
300 req = kzalloc(sizeof(win_req_t), GFP_KERNEL); 281 req = kzalloc(sizeof(win_req_t), GFP_KERNEL);
301 if (!req) 282 if (!req)
@@ -315,8 +296,8 @@ static int airo_config(struct pcmcia_device *link)
315 * and most client drivers will only use the CIS to fill in 296 * and most client drivers will only use the CIS to fill in
316 * implementation-defined details. 297 * implementation-defined details.
317 */ 298 */
318 last_ret = pcmcia_loop_config(link, airo_cs_config_check, req); 299 ret = pcmcia_loop_config(link, airo_cs_config_check, req);
319 if (last_ret) 300 if (ret)
320 goto failed; 301 goto failed;
321 302
322 /* 303 /*
@@ -324,21 +305,25 @@ static int airo_config(struct pcmcia_device *link)
324 handler to the interrupt, unless the 'Handler' member of the 305 handler to the interrupt, unless the 'Handler' member of the
325 irq structure is initialized. 306 irq structure is initialized.
326 */ 307 */
327 if (link->conf.Attributes & CONF_ENABLE_IRQ) 308 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
328 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 309 ret = pcmcia_request_irq(link, &link->irq);
310 if (ret)
311 goto failed;
312 }
329 313
330 /* 314 /*
331 This actually configures the PCMCIA socket -- setting up 315 This actually configures the PCMCIA socket -- setting up
332 the I/O windows and the interrupt mapping, and putting the 316 the I/O windows and the interrupt mapping, and putting the
333 card and host interface into "Memory and IO" mode. 317 card and host interface into "Memory and IO" mode.
334 */ 318 */
335 CS_CHECK(RequestConfiguration, 319 ret = pcmcia_request_configuration(link, &link->conf);
336 pcmcia_request_configuration(link, &link->conf)); 320 if (ret)
321 goto failed;
337 ((local_info_t *)link->priv)->eth_dev = 322 ((local_info_t *)link->priv)->eth_dev =
338 init_airo_card(link->irq.AssignedIRQ, 323 init_airo_card(link->irq.AssignedIRQ,
339 link->io.BasePort1, 1, &handle_to_dev(link)); 324 link->io.BasePort1, 1, &link->dev);
340 if (!((local_info_t *)link->priv)->eth_dev) 325 if (!((local_info_t *)link->priv)->eth_dev)
341 goto cs_failed; 326 goto failed;
342 327
343 /* 328 /*
344 At this point, the dev_node_t structure(s) need to be 329 At this point, the dev_node_t structure(s) need to be
@@ -368,8 +353,6 @@ static int airo_config(struct pcmcia_device *link)
368 kfree(req); 353 kfree(req);
369 return 0; 354 return 0;
370 355
371 cs_failed:
372 cs_error(link, last_fn, last_ret);
373 failed: 356 failed:
374 airo_release(link); 357 airo_release(link);
375 kfree(req); 358 kfree(req);
@@ -386,7 +369,7 @@ static int airo_config(struct pcmcia_device *link)
386 369
387static void airo_release(struct pcmcia_device *link) 370static void airo_release(struct pcmcia_device *link)
388{ 371{
389 DEBUG(0, "airo_release(0x%p)\n", link); 372 dev_dbg(&link->dev, "airo_release\n");
390 pcmcia_disable_device(link); 373 pcmcia_disable_device(link);
391} 374}
392 375
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 9c6ab5378f6e..95a8e232b58f 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1125,7 +1125,6 @@ ath5k_mode_setup(struct ath5k_softc *sc)
1125 /* configure operational mode */ 1125 /* configure operational mode */
1126 ath5k_hw_set_opmode(ah); 1126 ath5k_hw_set_opmode(ah);
1127 1127
1128 ath5k_hw_set_mcast_filter(ah, 0, 0);
1129 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt); 1128 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
1130} 1129}
1131 1130
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index b767c3b67b24..b548c8eaaae1 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -63,12 +63,16 @@ static const struct pci_device_id ath5k_led_devices[] = {
63 { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0422), ATH_LED(1, 1) }, 63 { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0422), ATH_LED(1, 1) },
64 /* E-machines E510 (tuliom@gmail.com) */ 64 /* E-machines E510 (tuliom@gmail.com) */
65 { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0428), ATH_LED(3, 0) }, 65 { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0428), ATH_LED(3, 0) },
66 /* BenQ Joybook R55v (nowymarluk@wp.pl) */
67 { ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0100), ATH_LED(1, 0) },
66 /* Acer Extensa 5620z (nekoreeve@gmail.com) */ 68 /* Acer Extensa 5620z (nekoreeve@gmail.com) */
67 { ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0105), ATH_LED(3, 0) }, 69 { ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0105), ATH_LED(3, 0) },
68 /* Fukato Datacask Jupiter 1014a (mrb74@gmx.at) */ 70 /* Fukato Datacask Jupiter 1014a (mrb74@gmx.at) */
69 { ATH_SDEVICE(PCI_VENDOR_ID_AZWAVE, 0x1026), ATH_LED(3, 0) }, 71 { ATH_SDEVICE(PCI_VENDOR_ID_AZWAVE, 0x1026), ATH_LED(3, 0) },
70 /* IBM ThinkPad AR5BXB6 (legovini@spiro.fisica.unipd.it) */ 72 /* IBM ThinkPad AR5BXB6 (legovini@spiro.fisica.unipd.it) */
71 { ATH_SDEVICE(PCI_VENDOR_ID_IBM, 0x058a), ATH_LED(1, 0) }, 73 { ATH_SDEVICE(PCI_VENDOR_ID_IBM, 0x058a), ATH_LED(1, 0) },
74 /* HP Compaq CQ60-206US (ddreggors@jumptv.com) */
75 { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) },
72 /* HP Compaq C700 (nitrousnrg@gmail.com) */ 76 /* HP Compaq C700 (nitrousnrg@gmail.com) */
73 { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) }, 77 { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) },
74 /* IBM-specific AR5212 (all others) */ 78 /* IBM-specific AR5212 (all others) */
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 52bed89063d4..43d2be9867fc 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1555,6 +1555,8 @@ void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1555 BIT(NL80211_IFTYPE_ADHOC) | 1555 BIT(NL80211_IFTYPE_ADHOC) |
1556 BIT(NL80211_IFTYPE_MESH_POINT); 1556 BIT(NL80211_IFTYPE_MESH_POINT);
1557 1557
1558 hw->wiphy->ps_default = false;
1559
1558 hw->queues = 4; 1560 hw->queues = 4;
1559 hw->max_rates = 4; 1561 hw->max_rates = 4;
1560 hw->channel_change_time = 5000; 1562 hw->channel_change_time = 5000;
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index ddaa859c3491..32407911842f 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -55,22 +55,6 @@
55 55
56#include "atmel.h" 56#include "atmel.h"
57 57
58/*
59 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
60 you do not define PCMCIA_DEBUG at all, all the debug code will be
61 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
62 be present but disabled -- but it can then be enabled for specific
63 modules at load time with a 'pc_debug=#' option to insmod.
64*/
65
66#ifdef PCMCIA_DEBUG
67static int pc_debug = PCMCIA_DEBUG;
68module_param(pc_debug, int, 0);
69static char *version = "$Revision: 1.2 $";
70#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args);
71#else
72#define DEBUG(n, args...)
73#endif
74 58
75/*====================================================================*/ 59/*====================================================================*/
76 60
@@ -155,11 +139,10 @@ static int atmel_probe(struct pcmcia_device *p_dev)
155{ 139{
156 local_info_t *local; 140 local_info_t *local;
157 141
158 DEBUG(0, "atmel_attach()\n"); 142 dev_dbg(&p_dev->dev, "atmel_attach()\n");
159 143
160 /* Interrupt setup */ 144 /* Interrupt setup */
161 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 145 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
162 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
163 p_dev->irq.Handler = NULL; 146 p_dev->irq.Handler = NULL;
164 147
165 /* 148 /*
@@ -194,7 +177,7 @@ static int atmel_probe(struct pcmcia_device *p_dev)
194 177
195static void atmel_detach(struct pcmcia_device *link) 178static void atmel_detach(struct pcmcia_device *link)
196{ 179{
197 DEBUG(0, "atmel_detach(0x%p)\n", link); 180 dev_dbg(&link->dev, "atmel_detach\n");
198 181
199 atmel_release(link); 182 atmel_release(link);
200 183
@@ -209,9 +192,6 @@ static void atmel_detach(struct pcmcia_device *link)
209 192
210 ======================================================================*/ 193 ======================================================================*/
211 194
212#define CS_CHECK(fn, ret) \
213do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
214
215/* Call-back function to interrogate PCMCIA-specific information 195/* Call-back function to interrogate PCMCIA-specific information
216 about the current existance of the card */ 196 about the current existance of the card */
217static int card_present(void *arg) 197static int card_present(void *arg)
@@ -275,13 +255,13 @@ static int atmel_config_check(struct pcmcia_device *p_dev,
275static int atmel_config(struct pcmcia_device *link) 255static int atmel_config(struct pcmcia_device *link)
276{ 256{
277 local_info_t *dev; 257 local_info_t *dev;
278 int last_fn, last_ret; 258 int ret;
279 struct pcmcia_device_id *did; 259 struct pcmcia_device_id *did;
280 260
281 dev = link->priv; 261 dev = link->priv;
282 did = dev_get_drvdata(&handle_to_dev(link)); 262 did = dev_get_drvdata(&link->dev);
283 263
284 DEBUG(0, "atmel_config(0x%p)\n", link); 264 dev_dbg(&link->dev, "atmel_config\n");
285 265
286 /* 266 /*
287 In this loop, we scan the CIS for configuration table entries, 267 In this loop, we scan the CIS for configuration table entries,
@@ -303,31 +283,36 @@ static int atmel_config(struct pcmcia_device *link)
303 handler to the interrupt, unless the 'Handler' member of the 283 handler to the interrupt, unless the 'Handler' member of the
304 irq structure is initialized. 284 irq structure is initialized.
305 */ 285 */
306 if (link->conf.Attributes & CONF_ENABLE_IRQ) 286 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
307 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 287 ret = pcmcia_request_irq(link, &link->irq);
288 if (ret)
289 goto failed;
290 }
308 291
309 /* 292 /*
310 This actually configures the PCMCIA socket -- setting up 293 This actually configures the PCMCIA socket -- setting up
311 the I/O windows and the interrupt mapping, and putting the 294 the I/O windows and the interrupt mapping, and putting the
312 card and host interface into "Memory and IO" mode. 295 card and host interface into "Memory and IO" mode.
313 */ 296 */
314 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 297 ret = pcmcia_request_configuration(link, &link->conf);
298 if (ret)
299 goto failed;
315 300
316 if (link->irq.AssignedIRQ == 0) { 301 if (link->irq.AssignedIRQ == 0) {
317 printk(KERN_ALERT 302 printk(KERN_ALERT
318 "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config."); 303 "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config.");
319 goto cs_failed; 304 goto failed;
320 } 305 }
321 306
322 ((local_info_t*)link->priv)->eth_dev = 307 ((local_info_t*)link->priv)->eth_dev =
323 init_atmel_card(link->irq.AssignedIRQ, 308 init_atmel_card(link->irq.AssignedIRQ,
324 link->io.BasePort1, 309 link->io.BasePort1,
325 did ? did->driver_info : ATMEL_FW_TYPE_NONE, 310 did ? did->driver_info : ATMEL_FW_TYPE_NONE,
326 &handle_to_dev(link), 311 &link->dev,
327 card_present, 312 card_present,
328 link); 313 link);
329 if (!((local_info_t*)link->priv)->eth_dev) 314 if (!((local_info_t*)link->priv)->eth_dev)
330 goto cs_failed; 315 goto failed;
331 316
332 317
333 /* 318 /*
@@ -340,8 +325,6 @@ static int atmel_config(struct pcmcia_device *link)
340 325
341 return 0; 326 return 0;
342 327
343 cs_failed:
344 cs_error(link, last_fn, last_ret);
345 failed: 328 failed:
346 atmel_release(link); 329 atmel_release(link);
347 return -ENODEV; 330 return -ENODEV;
@@ -359,7 +342,7 @@ static void atmel_release(struct pcmcia_device *link)
359{ 342{
360 struct net_device *dev = ((local_info_t*)link->priv)->eth_dev; 343 struct net_device *dev = ((local_info_t*)link->priv)->eth_dev;
361 344
362 DEBUG(0, "atmel_release(0x%p)\n", link); 345 dev_dbg(&link->dev, "atmel_release\n");
363 346
364 if (dev) 347 if (dev)
365 stop_atmel_card(dev); 348 stop_atmel_card(dev);
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 86f35827f008..098dda1a67c1 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4521,9 +4521,8 @@ static int b43_op_beacon_set_tim(struct ieee80211_hw *hw,
4521{ 4521{
4522 struct b43_wl *wl = hw_to_b43_wl(hw); 4522 struct b43_wl *wl = hw_to_b43_wl(hw);
4523 4523
4524 mutex_lock(&wl->mutex); 4524 /* FIXME: add locking */
4525 b43_update_templates(wl); 4525 b43_update_templates(wl);
4526 mutex_unlock(&wl->mutex);
4527 4526
4528 return 0; 4527 return 0;
4529} 4528}
diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c
index 6c3a74964ab8..984174bc7b0f 100644
--- a/drivers/net/wireless/b43/pcmcia.c
+++ b/drivers/net/wireless/b43/pcmcia.c
@@ -65,35 +65,15 @@ static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev)
65 struct ssb_bus *ssb; 65 struct ssb_bus *ssb;
66 win_req_t win; 66 win_req_t win;
67 memreq_t mem; 67 memreq_t mem;
68 tuple_t tuple;
69 cisparse_t parse;
70 int err = -ENOMEM; 68 int err = -ENOMEM;
71 int res = 0; 69 int res = 0;
72 unsigned char buf[64];
73 70
74 ssb = kzalloc(sizeof(*ssb), GFP_KERNEL); 71 ssb = kzalloc(sizeof(*ssb), GFP_KERNEL);
75 if (!ssb) 72 if (!ssb)
76 goto out_error; 73 goto out_error;
77 74
78 err = -ENODEV; 75 err = -ENODEV;
79 tuple.DesiredTuple = CISTPL_CONFIG;
80 tuple.Attributes = 0;
81 tuple.TupleData = buf;
82 tuple.TupleDataMax = sizeof(buf);
83 tuple.TupleOffset = 0;
84 76
85 res = pcmcia_get_first_tuple(dev, &tuple);
86 if (res != 0)
87 goto err_kfree_ssb;
88 res = pcmcia_get_tuple_data(dev, &tuple);
89 if (res != 0)
90 goto err_kfree_ssb;
91 res = pcmcia_parse_tuple(&tuple, &parse);
92 if (res != 0)
93 goto err_kfree_ssb;
94
95 dev->conf.ConfigBase = parse.config.base;
96 dev->conf.Present = parse.config.rmask[0];
97 dev->conf.Attributes = CONF_ENABLE_IRQ; 77 dev->conf.Attributes = CONF_ENABLE_IRQ;
98 dev->conf.IntType = INT_MEMORY_AND_IO; 78 dev->conf.IntType = INT_MEMORY_AND_IO;
99 79
@@ -107,20 +87,18 @@ static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev)
107 win.Base = 0; 87 win.Base = 0;
108 win.Size = SSB_CORE_SIZE; 88 win.Size = SSB_CORE_SIZE;
109 win.AccessSpeed = 250; 89 win.AccessSpeed = 250;
110 res = pcmcia_request_window(&dev, &win, &dev->win); 90 res = pcmcia_request_window(dev, &win, &dev->win);
111 if (res != 0) 91 if (res != 0)
112 goto err_kfree_ssb; 92 goto err_kfree_ssb;
113 93
114 mem.CardOffset = 0; 94 mem.CardOffset = 0;
115 mem.Page = 0; 95 mem.Page = 0;
116 res = pcmcia_map_mem_page(dev->win, &mem); 96 res = pcmcia_map_mem_page(dev, dev->win, &mem);
117 if (res != 0) 97 if (res != 0)
118 goto err_disable; 98 goto err_disable;
119 99
120 dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 100 dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
121 dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
122 dev->irq.Handler = NULL; /* The handler is registered later. */ 101 dev->irq.Handler = NULL; /* The handler is registered later. */
123 dev->irq.Instance = NULL;
124 res = pcmcia_request_irq(dev, &dev->irq); 102 res = pcmcia_request_irq(dev, &dev->irq);
125 if (res != 0) 103 if (res != 0)
126 goto err_disable; 104 goto err_disable;
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index ad8eab4a639b..c9640a3e02c9 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -274,9 +274,6 @@ static int sandisk_enable_wireless(struct net_device *dev)
274 conf_reg_t reg; 274 conf_reg_t reg;
275 struct hostap_interface *iface = netdev_priv(dev); 275 struct hostap_interface *iface = netdev_priv(dev);
276 local_info_t *local = iface->local; 276 local_info_t *local = iface->local;
277 tuple_t tuple;
278 cisparse_t *parse = NULL;
279 u_char buf[64];
280 struct hostap_cs_priv *hw_priv = local->hw_priv; 277 struct hostap_cs_priv *hw_priv = local->hw_priv;
281 278
282 if (hw_priv->link->io.NumPorts1 < 0x42) { 279 if (hw_priv->link->io.NumPorts1 < 0x42) {
@@ -285,28 +282,13 @@ static int sandisk_enable_wireless(struct net_device *dev)
285 goto done; 282 goto done;
286 } 283 }
287 284
288 parse = kmalloc(sizeof(cisparse_t), GFP_KERNEL);
289 if (parse == NULL) {
290 ret = -ENOMEM;
291 goto done;
292 }
293
294 tuple.Attributes = TUPLE_RETURN_COMMON;
295 tuple.TupleData = buf;
296 tuple.TupleDataMax = sizeof(buf);
297 tuple.TupleOffset = 0;
298
299 if (hw_priv->link->manf_id != 0xd601 || hw_priv->link->card_id != 0x0101) { 285 if (hw_priv->link->manf_id != 0xd601 || hw_priv->link->card_id != 0x0101) {
300 /* No SanDisk manfid found */ 286 /* No SanDisk manfid found */
301 ret = -ENODEV; 287 ret = -ENODEV;
302 goto done; 288 goto done;
303 } 289 }
304 290
305 tuple.DesiredTuple = CISTPL_LONGLINK_MFC; 291 if (hw_priv->link->socket->functions < 2) {
306 if (pcmcia_get_first_tuple(hw_priv->link, &tuple) ||
307 pcmcia_get_tuple_data(hw_priv->link, &tuple) ||
308 pcmcia_parse_tuple(&tuple, parse) ||
309 parse->longlink_mfc.nfn < 2) {
310 /* No multi-function links found */ 292 /* No multi-function links found */
311 ret = -ENODEV; 293 ret = -ENODEV;
312 goto done; 294 goto done;
@@ -354,7 +336,6 @@ static int sandisk_enable_wireless(struct net_device *dev)
354 udelay(10); 336 udelay(10);
355 337
356done: 338done:
357 kfree(parse);
358 return ret; 339 return ret;
359} 340}
360 341
@@ -529,10 +510,6 @@ static void prism2_detach(struct pcmcia_device *link)
529} 510}
530 511
531 512
532#define CS_CHECK(fn, ret) \
533do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
534
535
536/* run after a CARD_INSERTION event is received to configure the PCMCIA 513/* run after a CARD_INSERTION event is received to configure the PCMCIA
537 * socket and make the device available to the system */ 514 * socket and make the device available to the system */
538 515
@@ -624,7 +601,6 @@ static int prism2_config(struct pcmcia_device *link)
624 struct hostap_interface *iface; 601 struct hostap_interface *iface;
625 local_info_t *local; 602 local_info_t *local;
626 int ret = 1; 603 int ret = 1;
627 int last_fn, last_ret;
628 struct hostap_cs_priv *hw_priv; 604 struct hostap_cs_priv *hw_priv;
629 605
630 PDEBUG(DEBUG_FLOW, "prism2_config()\n"); 606 PDEBUG(DEBUG_FLOW, "prism2_config()\n");
@@ -636,19 +612,18 @@ static int prism2_config(struct pcmcia_device *link)
636 } 612 }
637 613
638 /* Look for an appropriate configuration table entry in the CIS */ 614 /* Look for an appropriate configuration table entry in the CIS */
639 last_ret = pcmcia_loop_config(link, prism2_config_check, NULL); 615 ret = pcmcia_loop_config(link, prism2_config_check, NULL);
640 if (last_ret) { 616 if (ret) {
641 if (!ignore_cis_vcc) 617 if (!ignore_cis_vcc)
642 printk(KERN_ERR "GetNextTuple(): No matching " 618 printk(KERN_ERR "GetNextTuple(): No matching "
643 "CIS configuration. Maybe you need the " 619 "CIS configuration. Maybe you need the "
644 "ignore_cis_vcc=1 parameter.\n"); 620 "ignore_cis_vcc=1 parameter.\n");
645 cs_error(link, RequestIO, last_ret);
646 goto failed; 621 goto failed;
647 } 622 }
648 623
649 /* Need to allocate net_device before requesting IRQ handler */ 624 /* Need to allocate net_device before requesting IRQ handler */
650 dev = prism2_init_local_data(&prism2_pccard_funcs, 0, 625 dev = prism2_init_local_data(&prism2_pccard_funcs, 0,
651 &handle_to_dev(link)); 626 &link->dev);
652 if (dev == NULL) 627 if (dev == NULL)
653 goto failed; 628 goto failed;
654 link->priv = dev; 629 link->priv = dev;
@@ -666,13 +641,11 @@ static int prism2_config(struct pcmcia_device *link)
666 * irq structure is initialized. 641 * irq structure is initialized.
667 */ 642 */
668 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 643 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
669 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | 644 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
670 IRQ_HANDLE_PRESENT;
671 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
672 link->irq.Handler = prism2_interrupt; 645 link->irq.Handler = prism2_interrupt;
673 link->irq.Instance = dev; 646 ret = pcmcia_request_irq(link, &link->irq);
674 CS_CHECK(RequestIRQ, 647 if (ret)
675 pcmcia_request_irq(link, &link->irq)); 648 goto failed;
676 } 649 }
677 650
678 /* 651 /*
@@ -680,8 +653,9 @@ static int prism2_config(struct pcmcia_device *link)
680 * the I/O windows and the interrupt mapping, and putting the 653 * the I/O windows and the interrupt mapping, and putting the
681 * card and host interface into "Memory and IO" mode. 654 * card and host interface into "Memory and IO" mode.
682 */ 655 */
683 CS_CHECK(RequestConfiguration, 656 ret = pcmcia_request_configuration(link, &link->conf);
684 pcmcia_request_configuration(link, &link->conf)); 657 if (ret)
658 goto failed;
685 659
686 dev->irq = link->irq.AssignedIRQ; 660 dev->irq = link->irq.AssignedIRQ;
687 dev->base_addr = link->io.BasePort1; 661 dev->base_addr = link->io.BasePort1;
@@ -714,9 +688,6 @@ static int prism2_config(struct pcmcia_device *link)
714 } 688 }
715 return ret; 689 return ret;
716 690
717 cs_failed:
718 cs_error(link, last_fn, last_ret);
719
720 failed: 691 failed:
721 kfree(hw_priv); 692 kfree(hw_priv);
722 prism2_release((u_long)link); 693 prism2_release((u_long)link);
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 240cff1e6979..6e2fc0cb6f8a 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -6029,7 +6029,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6029 struct ipw2100_priv *priv; 6029 struct ipw2100_priv *priv;
6030 struct net_device *dev; 6030 struct net_device *dev;
6031 6031
6032 dev = alloc_ieee80211(sizeof(struct ipw2100_priv), 0); 6032 dev = alloc_ieee80211(sizeof(struct ipw2100_priv));
6033 if (!dev) 6033 if (!dev)
6034 return NULL; 6034 return NULL;
6035 priv = libipw_priv(dev); 6035 priv = libipw_priv(dev);
@@ -6342,7 +6342,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6342 sysfs_remove_group(&pci_dev->dev.kobj, 6342 sysfs_remove_group(&pci_dev->dev.kobj,
6343 &ipw2100_attribute_group); 6343 &ipw2100_attribute_group);
6344 6344
6345 free_ieee80211(dev, 0); 6345 free_ieee80211(dev);
6346 pci_set_drvdata(pci_dev, NULL); 6346 pci_set_drvdata(pci_dev, NULL);
6347 } 6347 }
6348 6348
@@ -6400,7 +6400,7 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
6400 if (dev->base_addr) 6400 if (dev->base_addr)
6401 iounmap((void __iomem *)dev->base_addr); 6401 iounmap((void __iomem *)dev->base_addr);
6402 6402
6403 free_ieee80211(dev, 0); 6403 free_ieee80211(dev);
6404 } 6404 }
6405 6405
6406 pci_release_regions(pci_dev); 6406 pci_release_regions(pci_dev);
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 827824d45de9..a6ca536e44f8 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -104,25 +104,6 @@ static int antenna = CFG_SYS_ANTENNA_BOTH;
104static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */ 104static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
105#endif 105#endif
106 106
107static struct ieee80211_rate ipw2200_rates[] = {
108 { .bitrate = 10 },
109 { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
110 { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
111 { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
112 { .bitrate = 60 },
113 { .bitrate = 90 },
114 { .bitrate = 120 },
115 { .bitrate = 180 },
116 { .bitrate = 240 },
117 { .bitrate = 360 },
118 { .bitrate = 480 },
119 { .bitrate = 540 }
120};
121
122#define ipw2200_a_rates (ipw2200_rates + 4)
123#define ipw2200_num_a_rates 8
124#define ipw2200_bg_rates (ipw2200_rates + 0)
125#define ipw2200_num_bg_rates 12
126 107
127#ifdef CONFIG_IPW2200_QOS 108#ifdef CONFIG_IPW2200_QOS
128static int qos_enable = 0; 109static int qos_enable = 0;
@@ -8674,6 +8655,24 @@ static int ipw_sw_reset(struct ipw_priv *priv, int option)
8674 * 8655 *
8675 */ 8656 */
8676 8657
8658static int ipw_wx_get_name(struct net_device *dev,
8659 struct iw_request_info *info,
8660 union iwreq_data *wrqu, char *extra)
8661{
8662 struct ipw_priv *priv = libipw_priv(dev);
8663 mutex_lock(&priv->mutex);
8664 if (priv->status & STATUS_RF_KILL_MASK)
8665 strcpy(wrqu->name, "radio off");
8666 else if (!(priv->status & STATUS_ASSOCIATED))
8667 strcpy(wrqu->name, "unassociated");
8668 else
8669 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8670 ipw_modes[priv->assoc_request.ieee_mode]);
8671 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8672 mutex_unlock(&priv->mutex);
8673 return 0;
8674}
8675
8677static int ipw_set_channel(struct ipw_priv *priv, u8 channel) 8676static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8678{ 8677{
8679 if (channel == 0) { 8678 if (channel == 0) {
@@ -9973,7 +9972,7 @@ static int ipw_wx_sw_reset(struct net_device *dev,
9973/* Rebase the WE IOCTLs to zero for the handler array */ 9972/* Rebase the WE IOCTLs to zero for the handler array */
9974#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT] 9973#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9975static iw_handler ipw_wx_handlers[] = { 9974static iw_handler ipw_wx_handlers[] = {
9976 IW_IOCTL(SIOCGIWNAME) = (iw_handler) cfg80211_wext_giwname, 9975 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9977 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq, 9976 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9978 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq, 9977 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9979 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode, 9978 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
@@ -11417,100 +11416,16 @@ static void ipw_bg_down(struct work_struct *work)
11417/* Called by register_netdev() */ 11416/* Called by register_netdev() */
11418static int ipw_net_init(struct net_device *dev) 11417static int ipw_net_init(struct net_device *dev)
11419{ 11418{
11420 int i, rc = 0;
11421 struct ipw_priv *priv = libipw_priv(dev); 11419 struct ipw_priv *priv = libipw_priv(dev);
11422 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11423 struct wireless_dev *wdev = &priv->ieee->wdev;
11424 mutex_lock(&priv->mutex); 11420 mutex_lock(&priv->mutex);
11425 11421
11426 if (ipw_up(priv)) { 11422 if (ipw_up(priv)) {
11427 rc = -EIO; 11423 mutex_unlock(&priv->mutex);
11428 goto out; 11424 return -EIO;
11429 }
11430
11431 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11432
11433 /* fill-out priv->ieee->bg_band */
11434 if (geo->bg_channels) {
11435 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11436
11437 bg_band->band = IEEE80211_BAND_2GHZ;
11438 bg_band->n_channels = geo->bg_channels;
11439 bg_band->channels =
11440 kzalloc(geo->bg_channels *
11441 sizeof(struct ieee80211_channel), GFP_KERNEL);
11442 /* translate geo->bg to bg_band.channels */
11443 for (i = 0; i < geo->bg_channels; i++) {
11444 bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
11445 bg_band->channels[i].center_freq = geo->bg[i].freq;
11446 bg_band->channels[i].hw_value = geo->bg[i].channel;
11447 bg_band->channels[i].max_power = geo->bg[i].max_power;
11448 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11449 bg_band->channels[i].flags |=
11450 IEEE80211_CHAN_PASSIVE_SCAN;
11451 if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
11452 bg_band->channels[i].flags |=
11453 IEEE80211_CHAN_NO_IBSS;
11454 if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
11455 bg_band->channels[i].flags |=
11456 IEEE80211_CHAN_RADAR;
11457 /* No equivalent for LIBIPW_CH_80211H_RULES,
11458 LIBIPW_CH_UNIFORM_SPREADING, or
11459 LIBIPW_CH_B_ONLY... */
11460 }
11461 /* point at bitrate info */
11462 bg_band->bitrates = ipw2200_bg_rates;
11463 bg_band->n_bitrates = ipw2200_num_bg_rates;
11464
11465 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
11466 }
11467
11468 /* fill-out priv->ieee->a_band */
11469 if (geo->a_channels) {
11470 struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11471
11472 a_band->band = IEEE80211_BAND_5GHZ;
11473 a_band->n_channels = geo->a_channels;
11474 a_band->channels =
11475 kzalloc(geo->a_channels *
11476 sizeof(struct ieee80211_channel), GFP_KERNEL);
11477 /* translate geo->bg to a_band.channels */
11478 for (i = 0; i < geo->a_channels; i++) {
11479 a_band->channels[i].band = IEEE80211_BAND_2GHZ;
11480 a_band->channels[i].center_freq = geo->a[i].freq;
11481 a_band->channels[i].hw_value = geo->a[i].channel;
11482 a_band->channels[i].max_power = geo->a[i].max_power;
11483 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11484 a_band->channels[i].flags |=
11485 IEEE80211_CHAN_PASSIVE_SCAN;
11486 if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
11487 a_band->channels[i].flags |=
11488 IEEE80211_CHAN_NO_IBSS;
11489 if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
11490 a_band->channels[i].flags |=
11491 IEEE80211_CHAN_RADAR;
11492 /* No equivalent for LIBIPW_CH_80211H_RULES,
11493 LIBIPW_CH_UNIFORM_SPREADING, or
11494 LIBIPW_CH_B_ONLY... */
11495 }
11496 /* point at bitrate info */
11497 a_band->bitrates = ipw2200_a_rates;
11498 a_band->n_bitrates = ipw2200_num_a_rates;
11499
11500 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
11501 }
11502
11503 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11504
11505 /* With that information in place, we can now register the wiphy... */
11506 if (wiphy_register(wdev->wiphy)) {
11507 rc = -EIO;
11508 goto out;
11509 } 11425 }
11510 11426
11511out:
11512 mutex_unlock(&priv->mutex); 11427 mutex_unlock(&priv->mutex);
11513 return rc; 11428 return 0;
11514} 11429}
11515 11430
11516/* PCI driver stuff */ 11431/* PCI driver stuff */
@@ -11641,7 +11556,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
11641 if (priv->prom_net_dev) 11556 if (priv->prom_net_dev)
11642 return -EPERM; 11557 return -EPERM;
11643 11558
11644 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv), 1); 11559 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
11645 if (priv->prom_net_dev == NULL) 11560 if (priv->prom_net_dev == NULL)
11646 return -ENOMEM; 11561 return -ENOMEM;
11647 11562
@@ -11660,7 +11575,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
11660 11575
11661 rc = register_netdev(priv->prom_net_dev); 11576 rc = register_netdev(priv->prom_net_dev);
11662 if (rc) { 11577 if (rc) {
11663 free_ieee80211(priv->prom_net_dev, 1); 11578 free_ieee80211(priv->prom_net_dev);
11664 priv->prom_net_dev = NULL; 11579 priv->prom_net_dev = NULL;
11665 return rc; 11580 return rc;
11666 } 11581 }
@@ -11674,7 +11589,7 @@ static void ipw_prom_free(struct ipw_priv *priv)
11674 return; 11589 return;
11675 11590
11676 unregister_netdev(priv->prom_net_dev); 11591 unregister_netdev(priv->prom_net_dev);
11677 free_ieee80211(priv->prom_net_dev, 1); 11592 free_ieee80211(priv->prom_net_dev);
11678 11593
11679 priv->prom_net_dev = NULL; 11594 priv->prom_net_dev = NULL;
11680} 11595}
@@ -11702,7 +11617,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11702 struct ipw_priv *priv; 11617 struct ipw_priv *priv;
11703 int i; 11618 int i;
11704 11619
11705 net_dev = alloc_ieee80211(sizeof(struct ipw_priv), 0); 11620 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
11706 if (net_dev == NULL) { 11621 if (net_dev == NULL) {
11707 err = -ENOMEM; 11622 err = -ENOMEM;
11708 goto out; 11623 goto out;
@@ -11850,7 +11765,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11850 pci_disable_device(pdev); 11765 pci_disable_device(pdev);
11851 pci_set_drvdata(pdev, NULL); 11766 pci_set_drvdata(pdev, NULL);
11852 out_free_ieee80211: 11767 out_free_ieee80211:
11853 free_ieee80211(priv->net_dev, 0); 11768 free_ieee80211(priv->net_dev);
11854 out: 11769 out:
11855 return err; 11770 return err;
11856} 11771}
@@ -11917,7 +11832,7 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11917 pci_release_regions(pdev); 11832 pci_release_regions(pdev);
11918 pci_disable_device(pdev); 11833 pci_disable_device(pdev);
11919 pci_set_drvdata(pdev, NULL); 11834 pci_set_drvdata(pdev, NULL);
11920 free_ieee80211(priv->net_dev, 0); 11835 free_ieee80211(priv->net_dev);
11921 free_firmware(); 11836 free_firmware();
11922} 11837}
11923 11838
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index bf45391172f3..1e334ff6bd52 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -31,7 +31,6 @@
31#include <linux/ieee80211.h> 31#include <linux/ieee80211.h>
32 32
33#include <net/lib80211.h> 33#include <net/lib80211.h>
34#include <net/cfg80211.h>
35 34
36#define LIBIPW_VERSION "git-1.1.13" 35#define LIBIPW_VERSION "git-1.1.13"
37 36
@@ -784,15 +783,12 @@ struct libipw_geo {
784 783
785struct libipw_device { 784struct libipw_device {
786 struct net_device *dev; 785 struct net_device *dev;
787 struct wireless_dev wdev;
788 struct libipw_security sec; 786 struct libipw_security sec;
789 787
790 /* Bookkeeping structures */ 788 /* Bookkeeping structures */
791 struct libipw_stats ieee_stats; 789 struct libipw_stats ieee_stats;
792 790
793 struct libipw_geo geo; 791 struct libipw_geo geo;
794 struct ieee80211_supported_band bg_band;
795 struct ieee80211_supported_band a_band;
796 792
797 /* Probe / Beacon management */ 793 /* Probe / Beacon management */
798 struct list_head network_free_list; 794 struct list_head network_free_list;
@@ -1018,8 +1014,8 @@ static inline int libipw_is_cck_rate(u8 rate)
1018} 1014}
1019 1015
1020/* ieee80211.c */ 1016/* ieee80211.c */
1021extern void free_ieee80211(struct net_device *dev, int monitor); 1017extern void free_ieee80211(struct net_device *dev);
1022extern struct net_device *alloc_ieee80211(int sizeof_priv, int monitor); 1018extern struct net_device *alloc_ieee80211(int sizeof_priv);
1023extern int libipw_change_mtu(struct net_device *dev, int new_mtu); 1019extern int libipw_change_mtu(struct net_device *dev, int new_mtu);
1024 1020
1025extern void libipw_networks_age(struct libipw_device *ieee, 1021extern void libipw_networks_age(struct libipw_device *ieee,
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index a0e9f6aed7da..eb2b60834c17 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -62,9 +62,6 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
62MODULE_AUTHOR(DRV_COPYRIGHT); 62MODULE_AUTHOR(DRV_COPYRIGHT);
63MODULE_LICENSE("GPL"); 63MODULE_LICENSE("GPL");
64 64
65struct cfg80211_ops libipw_config_ops = { };
66void *libipw_wiphy_privid = &libipw_wiphy_privid;
67
68static int libipw_networks_allocate(struct libipw_device *ieee) 65static int libipw_networks_allocate(struct libipw_device *ieee)
69{ 66{
70 if (ieee->networks) 67 if (ieee->networks)
@@ -143,7 +140,7 @@ int libipw_change_mtu(struct net_device *dev, int new_mtu)
143} 140}
144EXPORT_SYMBOL(libipw_change_mtu); 141EXPORT_SYMBOL(libipw_change_mtu);
145 142
146struct net_device *alloc_ieee80211(int sizeof_priv, int monitor) 143struct net_device *alloc_ieee80211(int sizeof_priv)
147{ 144{
148 struct libipw_device *ieee; 145 struct libipw_device *ieee;
149 struct net_device *dev; 146 struct net_device *dev;
@@ -160,31 +157,10 @@ struct net_device *alloc_ieee80211(int sizeof_priv, int monitor)
160 157
161 ieee->dev = dev; 158 ieee->dev = dev;
162 159
163 if (!monitor) {
164 ieee->wdev.wiphy = wiphy_new(&libipw_config_ops, 0);
165 if (!ieee->wdev.wiphy) {
166 LIBIPW_ERROR("Unable to allocate wiphy.\n");
167 goto failed_free_netdev;
168 }
169
170 ieee->dev->ieee80211_ptr = &ieee->wdev;
171 ieee->wdev.iftype = NL80211_IFTYPE_STATION;
172
173 /* Fill-out wiphy structure bits we know... Not enough info
174 here to call set_wiphy_dev or set MAC address or channel info
175 -- have to do that in ->ndo_init... */
176 ieee->wdev.wiphy->privid = libipw_wiphy_privid;
177
178 ieee->wdev.wiphy->max_scan_ssids = 1;
179 ieee->wdev.wiphy->max_scan_ie_len = 0;
180 ieee->wdev.wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION)
181 | BIT(NL80211_IFTYPE_ADHOC);
182 }
183
184 err = libipw_networks_allocate(ieee); 160 err = libipw_networks_allocate(ieee);
185 if (err) { 161 if (err) {
186 LIBIPW_ERROR("Unable to allocate beacon storage: %d\n", err); 162 LIBIPW_ERROR("Unable to allocate beacon storage: %d\n", err);
187 goto failed_free_wiphy; 163 goto failed_free_netdev;
188 } 164 }
189 libipw_networks_initialize(ieee); 165 libipw_networks_initialize(ieee);
190 166
@@ -217,31 +193,19 @@ struct net_device *alloc_ieee80211(int sizeof_priv, int monitor)
217 193
218 return dev; 194 return dev;
219 195
220failed_free_wiphy:
221 if (!monitor)
222 wiphy_free(ieee->wdev.wiphy);
223failed_free_netdev: 196failed_free_netdev:
224 free_netdev(dev); 197 free_netdev(dev);
225failed: 198failed:
226 return NULL; 199 return NULL;
227} 200}
228 201
229void free_ieee80211(struct net_device *dev, int monitor) 202void free_ieee80211(struct net_device *dev)
230{ 203{
231 struct libipw_device *ieee = netdev_priv(dev); 204 struct libipw_device *ieee = netdev_priv(dev);
232 205
233 lib80211_crypt_info_free(&ieee->crypt_info); 206 lib80211_crypt_info_free(&ieee->crypt_info);
234 207
235 libipw_networks_free(ieee); 208 libipw_networks_free(ieee);
236
237 /* free cfg80211 resources */
238 if (!monitor) {
239 wiphy_unregister(ieee->wdev.wiphy);
240 kfree(ieee->a_band.channels);
241 kfree(ieee->bg_band.channels);
242 wiphy_free(ieee->wdev.wiphy);
243 }
244
245 free_netdev(dev); 209 free_netdev(dev);
246} 210}
247 211
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 2716b91ba9fa..950267ab556a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -161,5 +161,6 @@ struct iwl_cfg iwl1000_bgn_cfg = {
161 .max_ll_items = OTP_MAX_LL_ITEMS_1000, 161 .max_ll_items = OTP_MAX_LL_ITEMS_1000,
162 .shadow_ram_support = false, 162 .shadow_ram_support = false,
163 .ht_greenfield_support = true, 163 .ht_greenfield_support = true,
164 .use_rts_for_ht = true, /* use rts/cts protection */
164}; 165};
165 166
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index c295b8ee9228..1473452ba22f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -175,6 +175,7 @@ struct iwl_cfg iwl6000h_2agn_cfg = {
175 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 175 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
176 .shadow_ram_support = true, 176 .shadow_ram_support = true,
177 .ht_greenfield_support = true, 177 .ht_greenfield_support = true,
178 .use_rts_for_ht = true, /* use rts/cts protection */
178}; 179};
179 180
180/* 181/*
@@ -198,6 +199,7 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
198 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 199 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
199 .shadow_ram_support = true, 200 .shadow_ram_support = true,
200 .ht_greenfield_support = true, 201 .ht_greenfield_support = true,
202 .use_rts_for_ht = true, /* use rts/cts protection */
201}; 203};
202 204
203struct iwl_cfg iwl6050_2agn_cfg = { 205struct iwl_cfg iwl6050_2agn_cfg = {
@@ -218,6 +220,7 @@ struct iwl_cfg iwl6050_2agn_cfg = {
218 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 220 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
219 .shadow_ram_support = true, 221 .shadow_ram_support = true,
220 .ht_greenfield_support = true, 222 .ht_greenfield_support = true,
223 .use_rts_for_ht = true, /* use rts/cts protection */
221}; 224};
222 225
223struct iwl_cfg iwl6000_3agn_cfg = { 226struct iwl_cfg iwl6000_3agn_cfg = {
@@ -238,6 +241,7 @@ struct iwl_cfg iwl6000_3agn_cfg = {
238 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 241 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
239 .shadow_ram_support = true, 242 .shadow_ram_support = true,
240 .ht_greenfield_support = true, 243 .ht_greenfield_support = true,
244 .use_rts_for_ht = true, /* use rts/cts protection */
241}; 245};
242 246
243struct iwl_cfg iwl6050_3agn_cfg = { 247struct iwl_cfg iwl6050_3agn_cfg = {
@@ -258,6 +262,7 @@ struct iwl_cfg iwl6050_3agn_cfg = {
258 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 262 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
259 .shadow_ram_support = true, 263 .shadow_ram_support = true,
260 .ht_greenfield_support = true, 264 .ht_greenfield_support = true,
265 .use_rts_for_ht = true, /* use rts/cts protection */
261}; 266};
262 267
263MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); 268MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 346dc06fa7b7..81726ee32858 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -418,6 +418,15 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
418 else if (tid == IWL_AGG_ALL_TID) 418 else if (tid == IWL_AGG_ALL_TID)
419 for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++) 419 for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++)
420 rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); 420 rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
421 if (priv->cfg->use_rts_for_ht) {
422 /*
423 * switch to RTS/CTS if it is the prefer protection method
424 * for HT traffic
425 */
426 IWL_DEBUG_HT(priv, "use RTS/CTS protection for HT\n");
427 priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
428 iwlcore_commit_rxon(priv);
429 }
421} 430}
422 431
423static inline int get_num_of_ant_from_rate(u32 rate_n_flags) 432static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index eaafae091f5b..921dc4a26fe2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -116,9 +116,6 @@ int iwl_commit_rxon(struct iwl_priv *priv)
116 116
117 /* always get timestamp with Rx frame */ 117 /* always get timestamp with Rx frame */
118 priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK; 118 priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
119 /* allow CTS-to-self if possible. this is relevant only for
120 * 5000, but will not damage 4965 */
121 priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN;
122 119
123 ret = iwl_check_rxon_cmd(priv); 120 ret = iwl_check_rxon_cmd(priv);
124 if (ret) { 121 if (ret) {
@@ -218,6 +215,13 @@ int iwl_commit_rxon(struct iwl_priv *priv)
218 "Could not send WEP static key.\n"); 215 "Could not send WEP static key.\n");
219 } 216 }
220 217
218 /*
219 * allow CTS-to-self if possible for new association.
220 * this is relevant only for 5000 series and up,
221 * but will not damage 4965
222 */
223 priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN;
224
221 /* Apply the new configuration 225 /* Apply the new configuration
222 * RXON assoc doesn't clear the station table in uCode, 226 * RXON assoc doesn't clear the station table in uCode,
223 */ 227 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index e50103a956b1..7754538c2194 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -213,6 +213,7 @@ struct iwl_mod_params {
213 * @pa_type: used by 6000 series only to identify the type of Power Amplifier 213 * @pa_type: used by 6000 series only to identify the type of Power Amplifier
214 * @max_ll_items: max number of OTP blocks 214 * @max_ll_items: max number of OTP blocks
215 * @shadow_ram_support: shadow support for OTP memory 215 * @shadow_ram_support: shadow support for OTP memory
216 * @use_rts_for_ht: use rts/cts protection for HT traffic
216 * 217 *
217 * We enable the driver to be backward compatible wrt API version. The 218 * We enable the driver to be backward compatible wrt API version. The
218 * driver specifies which APIs it supports (with @ucode_api_max being the 219 * driver specifies which APIs it supports (with @ucode_api_max being the
@@ -255,6 +256,7 @@ struct iwl_cfg {
255 const bool shadow_ram_support; 256 const bool shadow_ram_support;
256 const bool ht_greenfield_support; 257 const bool ht_greenfield_support;
257 const bool broken_powersave; 258 const bool broken_powersave;
259 bool use_rts_for_ht;
258}; 260};
259 261
260/*************************** 262/***************************
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index fb9bcfa6d947..b7e196e3c8d3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -1277,8 +1277,16 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1277 return -ENXIO; 1277 return -ENXIO;
1278 } 1278 }
1279 1279
1280 if (priv->stations[sta_id].tid[tid].agg.state ==
1281 IWL_EMPTYING_HW_QUEUE_ADDBA) {
1282 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
1283 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1284 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1285 return 0;
1286 }
1287
1280 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) 1288 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
1281 IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n"); 1289 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
1282 1290
1283 tid_data = &priv->stations[sta_id].tid[tid]; 1291 tid_data = &priv->stations[sta_id].tid[tid];
1284 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; 1292 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 039b555e4d76..53d56ab83c03 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -169,16 +169,19 @@ static int lbs_ethtool_set_wol(struct net_device *dev,
169 struct lbs_private *priv = dev->ml_priv; 169 struct lbs_private *priv = dev->ml_priv;
170 uint32_t criteria = 0; 170 uint32_t criteria = 0;
171 171
172 if (priv->wol_criteria == 0xffffffff && wol->wolopts)
173 return -EOPNOTSUPP;
174
175 if (wol->wolopts & ~(WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY)) 172 if (wol->wolopts & ~(WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY))
176 return -EOPNOTSUPP; 173 return -EOPNOTSUPP;
177 174
178 if (wol->wolopts & WAKE_UCAST) criteria |= EHS_WAKE_ON_UNICAST_DATA; 175 if (wol->wolopts & WAKE_UCAST)
179 if (wol->wolopts & WAKE_MCAST) criteria |= EHS_WAKE_ON_MULTICAST_DATA; 176 criteria |= EHS_WAKE_ON_UNICAST_DATA;
180 if (wol->wolopts & WAKE_BCAST) criteria |= EHS_WAKE_ON_BROADCAST_DATA; 177 if (wol->wolopts & WAKE_MCAST)
181 if (wol->wolopts & WAKE_PHY) criteria |= EHS_WAKE_ON_MAC_EVENT; 178 criteria |= EHS_WAKE_ON_MULTICAST_DATA;
179 if (wol->wolopts & WAKE_BCAST)
180 criteria |= EHS_WAKE_ON_BROADCAST_DATA;
181 if (wol->wolopts & WAKE_PHY)
182 criteria |= EHS_WAKE_ON_MAC_EVENT;
183 if (wol->wolopts == 0)
184 criteria |= EHS_REMOVE_WAKEUP;
182 185
183 return lbs_host_sleep_cfg(priv, criteria, (struct wol_config *)NULL); 186 return lbs_host_sleep_cfg(priv, criteria, (struct wol_config *)NULL);
184} 187}
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 62381768f2d5..b1d84592b959 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -590,7 +590,7 @@ static int if_cs_prog_helper(struct if_cs_card *card)
590 590
591 /* TODO: make firmware file configurable */ 591 /* TODO: make firmware file configurable */
592 ret = request_firmware(&fw, "libertas_cs_helper.fw", 592 ret = request_firmware(&fw, "libertas_cs_helper.fw",
593 &handle_to_dev(card->p_dev)); 593 &card->p_dev->dev);
594 if (ret) { 594 if (ret) {
595 lbs_pr_err("can't load helper firmware\n"); 595 lbs_pr_err("can't load helper firmware\n");
596 ret = -ENODEV; 596 ret = -ENODEV;
@@ -663,7 +663,7 @@ static int if_cs_prog_real(struct if_cs_card *card)
663 663
664 /* TODO: make firmware file configurable */ 664 /* TODO: make firmware file configurable */
665 ret = request_firmware(&fw, "libertas_cs.fw", 665 ret = request_firmware(&fw, "libertas_cs.fw",
666 &handle_to_dev(card->p_dev)); 666 &card->p_dev->dev);
667 if (ret) { 667 if (ret) {
668 lbs_pr_err("can't load firmware\n"); 668 lbs_pr_err("can't load firmware\n");
669 ret = -ENODEV; 669 ret = -ENODEV;
@@ -793,18 +793,37 @@ static void if_cs_release(struct pcmcia_device *p_dev)
793 * configure the card at this point -- we wait until we receive a card 793 * configure the card at this point -- we wait until we receive a card
794 * insertion event. 794 * insertion event.
795 */ 795 */
796
797static int if_cs_ioprobe(struct pcmcia_device *p_dev,
798 cistpl_cftable_entry_t *cfg,
799 cistpl_cftable_entry_t *dflt,
800 unsigned int vcc,
801 void *priv_data)
802{
803 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
804 p_dev->io.BasePort1 = cfg->io.win[0].base;
805 p_dev->io.NumPorts1 = cfg->io.win[0].len;
806
807 /* Do we need to allocate an interrupt? */
808 if (cfg->irq.IRQInfo1)
809 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
810
811 /* IO window settings */
812 if (cfg->io.nwin != 1) {
813 lbs_pr_err("wrong CIS (check number of IO windows)\n");
814 return -ENODEV;
815 }
816
817 /* This reserves IO space but doesn't actually enable it */
818 return pcmcia_request_io(p_dev, &p_dev->io);
819}
820
796static int if_cs_probe(struct pcmcia_device *p_dev) 821static int if_cs_probe(struct pcmcia_device *p_dev)
797{ 822{
798 int ret = -ENOMEM; 823 int ret = -ENOMEM;
799 unsigned int prod_id; 824 unsigned int prod_id;
800 struct lbs_private *priv; 825 struct lbs_private *priv;
801 struct if_cs_card *card; 826 struct if_cs_card *card;
802 /* CIS parsing */
803 tuple_t tuple;
804 cisparse_t parse;
805 cistpl_cftable_entry_t *cfg = &parse.cftable_entry;
806 cistpl_io_t *io = &cfg->io;
807 u_char buf[64];
808 827
809 lbs_deb_enter(LBS_DEB_CS); 828 lbs_deb_enter(LBS_DEB_CS);
810 829
@@ -818,48 +837,15 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
818 837
819 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 838 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
820 p_dev->irq.Handler = NULL; 839 p_dev->irq.Handler = NULL;
821 p_dev->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_LEVEL_ID;
822 840
823 p_dev->conf.Attributes = 0; 841 p_dev->conf.Attributes = 0;
824 p_dev->conf.IntType = INT_MEMORY_AND_IO; 842 p_dev->conf.IntType = INT_MEMORY_AND_IO;
825 843
826 tuple.Attributes = 0; 844 if (pcmcia_loop_config(p_dev, if_cs_ioprobe, NULL)) {
827 tuple.TupleData = buf; 845 lbs_pr_err("error in pcmcia_loop_config\n");
828 tuple.TupleDataMax = sizeof(buf);
829 tuple.TupleOffset = 0;
830
831 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
832 if ((ret = pcmcia_get_first_tuple(p_dev, &tuple)) != 0 ||
833 (ret = pcmcia_get_tuple_data(p_dev, &tuple)) != 0 ||
834 (ret = pcmcia_parse_tuple(&tuple, &parse)) != 0)
835 {
836 lbs_pr_err("error in pcmcia_get_first_tuple etc\n");
837 goto out1;
838 }
839
840 p_dev->conf.ConfigIndex = cfg->index;
841
842 /* Do we need to allocate an interrupt? */
843 if (cfg->irq.IRQInfo1) {
844 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
845 }
846
847 /* IO window settings */
848 if (cfg->io.nwin != 1) {
849 lbs_pr_err("wrong CIS (check number of IO windows)\n");
850 ret = -ENODEV;
851 goto out1; 846 goto out1;
852 } 847 }
853 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
854 p_dev->io.BasePort1 = io->win[0].base;
855 p_dev->io.NumPorts1 = io->win[0].len;
856 848
857 /* This reserves IO space but doesn't actually enable it */
858 ret = pcmcia_request_io(p_dev, &p_dev->io);
859 if (ret) {
860 lbs_pr_err("error in pcmcia_request_io\n");
861 goto out1;
862 }
863 849
864 /* 850 /*
865 * Allocate an interrupt line. Note that this does not assign 851 * Allocate an interrupt line. Note that this does not assign
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c
index 9498b46c99a4..e61e6b9440ab 100644
--- a/drivers/net/wireless/netwave_cs.c
+++ b/drivers/net/wireless/netwave_cs.c
@@ -145,23 +145,6 @@ static const unsigned int txConfEUD = 0x10; /* Enable Uni-Data packets */
145static const unsigned int txConfKey = 0x02; /* Scramble data packets */ 145static const unsigned int txConfKey = 0x02; /* Scramble data packets */
146static const unsigned int txConfLoop = 0x01; /* Loopback mode */ 146static const unsigned int txConfLoop = 0x01; /* Loopback mode */
147 147
148/*
149 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
150 you do not define PCMCIA_DEBUG at all, all the debug code will be
151 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
152 be present but disabled -- but it can then be enabled for specific
153 modules at load time with a 'pc_debug=#' option to insmod.
154*/
155
156#ifdef PCMCIA_DEBUG
157static int pc_debug = PCMCIA_DEBUG;
158module_param(pc_debug, int, 0);
159#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
160static char *version =
161"netwave_cs.c 0.3.0 Thu Jul 17 14:36:02 1997 (John Markus Bjørndalen)\n";
162#else
163#define DEBUG(n, args...)
164#endif
165 148
166/*====================================================================*/ 149/*====================================================================*/
167 150
@@ -383,7 +366,7 @@ static int netwave_probe(struct pcmcia_device *link)
383 struct net_device *dev; 366 struct net_device *dev;
384 netwave_private *priv; 367 netwave_private *priv;
385 368
386 DEBUG(0, "netwave_attach()\n"); 369 dev_dbg(&link->dev, "netwave_attach()\n");
387 370
388 /* Initialize the struct pcmcia_device structure */ 371 /* Initialize the struct pcmcia_device structure */
389 dev = alloc_etherdev(sizeof(netwave_private)); 372 dev = alloc_etherdev(sizeof(netwave_private));
@@ -401,8 +384,7 @@ static int netwave_probe(struct pcmcia_device *link)
401 link->io.IOAddrLines = 5; 384 link->io.IOAddrLines = 5;
402 385
403 /* Interrupt setup */ 386 /* Interrupt setup */
404 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 387 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
405 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
406 link->irq.Handler = &netwave_interrupt; 388 link->irq.Handler = &netwave_interrupt;
407 389
408 /* General socket configuration */ 390 /* General socket configuration */
@@ -421,8 +403,6 @@ static int netwave_probe(struct pcmcia_device *link)
421 403
422 dev->watchdog_timeo = TX_TIMEOUT; 404 dev->watchdog_timeo = TX_TIMEOUT;
423 405
424 link->irq.Instance = dev;
425
426 return netwave_pcmcia_config( link); 406 return netwave_pcmcia_config( link);
427} /* netwave_attach */ 407} /* netwave_attach */
428 408
@@ -438,7 +418,7 @@ static void netwave_detach(struct pcmcia_device *link)
438{ 418{
439 struct net_device *dev = link->priv; 419 struct net_device *dev = link->priv;
440 420
441 DEBUG(0, "netwave_detach(0x%p)\n", link); 421 dev_dbg(&link->dev, "netwave_detach\n");
442 422
443 netwave_release(link); 423 netwave_release(link);
444 424
@@ -725,18 +705,15 @@ static const struct iw_handler_def netwave_handler_def =
725 * 705 *
726 */ 706 */
727 707
728#define CS_CHECK(fn, ret) \
729do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
730
731static int netwave_pcmcia_config(struct pcmcia_device *link) { 708static int netwave_pcmcia_config(struct pcmcia_device *link) {
732 struct net_device *dev = link->priv; 709 struct net_device *dev = link->priv;
733 netwave_private *priv = netdev_priv(dev); 710 netwave_private *priv = netdev_priv(dev);
734 int i, j, last_ret, last_fn; 711 int i, j, ret;
735 win_req_t req; 712 win_req_t req;
736 memreq_t mem; 713 memreq_t mem;
737 u_char __iomem *ramBase = NULL; 714 u_char __iomem *ramBase = NULL;
738 715
739 DEBUG(0, "netwave_pcmcia_config(0x%p)\n", link); 716 dev_dbg(&link->dev, "netwave_pcmcia_config\n");
740 717
741 /* 718 /*
742 * Try allocating IO ports. This tries a few fixed addresses. 719 * Try allocating IO ports. This tries a few fixed addresses.
@@ -749,22 +726,24 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) {
749 if (i == 0) 726 if (i == 0)
750 break; 727 break;
751 } 728 }
752 if (i != 0) { 729 if (i != 0)
753 cs_error(link, RequestIO, i);
754 goto failed; 730 goto failed;
755 }
756 731
757 /* 732 /*
758 * Now allocate an interrupt line. Note that this does not 733 * Now allocate an interrupt line. Note that this does not
759 * actually assign a handler to the interrupt. 734 * actually assign a handler to the interrupt.
760 */ 735 */
761 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 736 ret = pcmcia_request_irq(link, &link->irq);
737 if (ret)
738 goto failed;
762 739
763 /* 740 /*
764 * This actually configures the PCMCIA socket -- setting up 741 * This actually configures the PCMCIA socket -- setting up
765 * the I/O windows and the interrupt mapping. 742 * the I/O windows and the interrupt mapping.
766 */ 743 */
767 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 744 ret = pcmcia_request_configuration(link, &link->conf);
745 if (ret)
746 goto failed;
768 747
769 /* 748 /*
770 * Allocate a 32K memory window. Note that the struct pcmcia_device 749 * Allocate a 32K memory window. Note that the struct pcmcia_device
@@ -772,14 +751,18 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) {
772 * device needs several windows, you'll need to keep track of 751 * device needs several windows, you'll need to keep track of
773 * the handles in your private data structure, dev->priv. 752 * the handles in your private data structure, dev->priv.
774 */ 753 */
775 DEBUG(1, "Setting mem speed of %d\n", mem_speed); 754 dev_dbg(&link->dev, "Setting mem speed of %d\n", mem_speed);
776 755
777 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_CM|WIN_ENABLE; 756 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_CM|WIN_ENABLE;
778 req.Base = 0; req.Size = 0x8000; 757 req.Base = 0; req.Size = 0x8000;
779 req.AccessSpeed = mem_speed; 758 req.AccessSpeed = mem_speed;
780 CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &link->win)); 759 ret = pcmcia_request_window(link, &req, &link->win);
760 if (ret)
761 goto failed;
781 mem.CardOffset = 0x20000; mem.Page = 0; 762 mem.CardOffset = 0x20000; mem.Page = 0;
782 CS_CHECK(MapMemPage, pcmcia_map_mem_page(link->win, &mem)); 763 ret = pcmcia_map_mem_page(link, link->win, &mem);
764 if (ret)
765 goto failed;
783 766
784 /* Store base address of the common window frame */ 767 /* Store base address of the common window frame */
785 ramBase = ioremap(req.Base, 0x8000); 768 ramBase = ioremap(req.Base, 0x8000);
@@ -787,7 +770,7 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) {
787 770
788 dev->irq = link->irq.AssignedIRQ; 771 dev->irq = link->irq.AssignedIRQ;
789 dev->base_addr = link->io.BasePort1; 772 dev->base_addr = link->io.BasePort1;
790 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 773 SET_NETDEV_DEV(dev, &link->dev);
791 774
792 if (register_netdev(dev) != 0) { 775 if (register_netdev(dev) != 0) {
793 printk(KERN_DEBUG "netwave_cs: register_netdev() failed\n"); 776 printk(KERN_DEBUG "netwave_cs: register_netdev() failed\n");
@@ -818,8 +801,6 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) {
818 get_uint16(ramBase + NETWAVE_EREG_ARW+2)); 801 get_uint16(ramBase + NETWAVE_EREG_ARW+2));
819 return 0; 802 return 0;
820 803
821cs_failed:
822 cs_error(link, last_fn, last_ret);
823failed: 804failed:
824 netwave_release(link); 805 netwave_release(link);
825 return -ENODEV; 806 return -ENODEV;
@@ -837,7 +818,7 @@ static void netwave_release(struct pcmcia_device *link)
837 struct net_device *dev = link->priv; 818 struct net_device *dev = link->priv;
838 netwave_private *priv = netdev_priv(dev); 819 netwave_private *priv = netdev_priv(dev);
839 820
840 DEBUG(0, "netwave_release(0x%p)\n", link); 821 dev_dbg(&link->dev, "netwave_release\n");
841 822
842 pcmcia_disable_device(link); 823 pcmcia_disable_device(link);
843 if (link->win) 824 if (link->win)
@@ -892,7 +873,7 @@ static void netwave_reset(struct net_device *dev) {
892 u_char __iomem *ramBase = priv->ramBase; 873 u_char __iomem *ramBase = priv->ramBase;
893 unsigned int iobase = dev->base_addr; 874 unsigned int iobase = dev->base_addr;
894 875
895 DEBUG(0, "netwave_reset: Done with hardware reset\n"); 876 pr_debug("netwave_reset: Done with hardware reset\n");
896 877
897 priv->timeoutCounter = 0; 878 priv->timeoutCounter = 0;
898 879
@@ -988,7 +969,7 @@ static int netwave_hw_xmit(unsigned char* data, int len,
988 969
989 dev->stats.tx_bytes += len; 970 dev->stats.tx_bytes += len;
990 971
991 DEBUG(3, "Transmitting with SPCQ %x SPU %x LIF %x ISPLQ %x\n", 972 pr_debug("Transmitting with SPCQ %x SPU %x LIF %x ISPLQ %x\n",
992 readb(ramBase + NETWAVE_EREG_SPCQ), 973 readb(ramBase + NETWAVE_EREG_SPCQ),
993 readb(ramBase + NETWAVE_EREG_SPU), 974 readb(ramBase + NETWAVE_EREG_SPU),
994 readb(ramBase + NETWAVE_EREG_LIF), 975 readb(ramBase + NETWAVE_EREG_LIF),
@@ -1000,7 +981,7 @@ static int netwave_hw_xmit(unsigned char* data, int len,
1000 MaxData = get_uint16(ramBase + NETWAVE_EREG_TDP+2); 981 MaxData = get_uint16(ramBase + NETWAVE_EREG_TDP+2);
1001 DataOffset = get_uint16(ramBase + NETWAVE_EREG_TDP+4); 982 DataOffset = get_uint16(ramBase + NETWAVE_EREG_TDP+4);
1002 983
1003 DEBUG(3, "TxFreeList %x, MaxData %x, DataOffset %x\n", 984 pr_debug("TxFreeList %x, MaxData %x, DataOffset %x\n",
1004 TxFreeList, MaxData, DataOffset); 985 TxFreeList, MaxData, DataOffset);
1005 986
1006 /* Copy packet to the adapter fragment buffers */ 987 /* Copy packet to the adapter fragment buffers */
@@ -1088,7 +1069,7 @@ static irqreturn_t netwave_interrupt(int irq, void* dev_id)
1088 status = inb(iobase + NETWAVE_REG_ASR); 1069 status = inb(iobase + NETWAVE_REG_ASR);
1089 1070
1090 if (!pcmcia_dev_present(link)) { 1071 if (!pcmcia_dev_present(link)) {
1091 DEBUG(1, "netwave_interrupt: Interrupt with status 0x%x " 1072 pr_debug("netwave_interrupt: Interrupt with status 0x%x "
1092 "from removed or suspended card!\n", status); 1073 "from removed or suspended card!\n", status);
1093 break; 1074 break;
1094 } 1075 }
@@ -1132,7 +1113,7 @@ static irqreturn_t netwave_interrupt(int irq, void* dev_id)
1132 int txStatus; 1113 int txStatus;
1133 1114
1134 txStatus = readb(ramBase + NETWAVE_EREG_TSER); 1115 txStatus = readb(ramBase + NETWAVE_EREG_TSER);
1135 DEBUG(3, "Transmit done. TSER = %x id %x\n", 1116 pr_debug("Transmit done. TSER = %x id %x\n",
1136 txStatus, readb(ramBase + NETWAVE_EREG_TSER + 1)); 1117 txStatus, readb(ramBase + NETWAVE_EREG_TSER + 1));
1137 1118
1138 if (txStatus & 0x20) { 1119 if (txStatus & 0x20) {
@@ -1156,7 +1137,7 @@ static irqreturn_t netwave_interrupt(int irq, void* dev_id)
1156 * TxGU and TxNOAP is set. (Those are the only ones 1137 * TxGU and TxNOAP is set. (Those are the only ones
1157 * to set TxErr). 1138 * to set TxErr).
1158 */ 1139 */
1159 DEBUG(3, "netwave_interrupt: TxDN with error status %x\n", 1140 pr_debug("netwave_interrupt: TxDN with error status %x\n",
1160 txStatus); 1141 txStatus);
1161 1142
1162 /* Clear out TxGU, TxNOAP, TxErr and TxTrys */ 1143 /* Clear out TxGU, TxNOAP, TxErr and TxTrys */
@@ -1164,7 +1145,7 @@ static irqreturn_t netwave_interrupt(int irq, void* dev_id)
1164 writeb(0xdf & txStatus, ramBase+NETWAVE_EREG_TSER+4); 1145 writeb(0xdf & txStatus, ramBase+NETWAVE_EREG_TSER+4);
1165 ++dev->stats.tx_errors; 1146 ++dev->stats.tx_errors;
1166 } 1147 }
1167 DEBUG(3, "New status is TSER %x ASR %x\n", 1148 pr_debug("New status is TSER %x ASR %x\n",
1168 readb(ramBase + NETWAVE_EREG_TSER), 1149 readb(ramBase + NETWAVE_EREG_TSER),
1169 inb(iobase + NETWAVE_REG_ASR)); 1150 inb(iobase + NETWAVE_REG_ASR));
1170 1151
@@ -1172,7 +1153,7 @@ static irqreturn_t netwave_interrupt(int irq, void* dev_id)
1172 } 1153 }
1173 /* TxBA, this would trigger on all error packets received */ 1154 /* TxBA, this would trigger on all error packets received */
1174 /* if (status & 0x01) { 1155 /* if (status & 0x01) {
1175 DEBUG(4, "Transmit buffers available, %x\n", status); 1156 pr_debug("Transmit buffers available, %x\n", status);
1176 } 1157 }
1177 */ 1158 */
1178 } 1159 }
@@ -1190,7 +1171,7 @@ static irqreturn_t netwave_interrupt(int irq, void* dev_id)
1190 */ 1171 */
1191static void netwave_watchdog(struct net_device *dev) { 1172static void netwave_watchdog(struct net_device *dev) {
1192 1173
1193 DEBUG(1, "%s: netwave_watchdog: watchdog timer expired\n", dev->name); 1174 pr_debug("%s: netwave_watchdog: watchdog timer expired\n", dev->name);
1194 netwave_reset(dev); 1175 netwave_reset(dev);
1195 dev->trans_start = jiffies; 1176 dev->trans_start = jiffies;
1196 netif_wake_queue(dev); 1177 netif_wake_queue(dev);
@@ -1211,7 +1192,7 @@ static int netwave_rx(struct net_device *dev)
1211 int i; 1192 int i;
1212 u_char *ptr; 1193 u_char *ptr;
1213 1194
1214 DEBUG(3, "xinw_rx: Receiving ... \n"); 1195 pr_debug("xinw_rx: Receiving ... \n");
1215 1196
1216 /* Receive max 10 packets for now. */ 1197 /* Receive max 10 packets for now. */
1217 for (i = 0; i < 10; i++) { 1198 for (i = 0; i < 10; i++) {
@@ -1237,7 +1218,7 @@ static int netwave_rx(struct net_device *dev)
1237 1218
1238 skb = dev_alloc_skb(rcvLen+5); 1219 skb = dev_alloc_skb(rcvLen+5);
1239 if (skb == NULL) { 1220 if (skb == NULL) {
1240 DEBUG(1, "netwave_rx: Could not allocate an sk_buff of " 1221 pr_debug("netwave_rx: Could not allocate an sk_buff of "
1241 "length %d\n", rcvLen); 1222 "length %d\n", rcvLen);
1242 ++dev->stats.rx_dropped; 1223 ++dev->stats.rx_dropped;
1243 /* Tell the adapter to skip the packet */ 1224 /* Tell the adapter to skip the packet */
@@ -1279,7 +1260,7 @@ static int netwave_rx(struct net_device *dev)
1279 wait_WOC(iobase); 1260 wait_WOC(iobase);
1280 writeb(NETWAVE_CMD_SRP, ramBase + NETWAVE_EREG_CB + 0); 1261 writeb(NETWAVE_CMD_SRP, ramBase + NETWAVE_EREG_CB + 0);
1281 writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 1); 1262 writeb(NETWAVE_CMD_EOC, ramBase + NETWAVE_EREG_CB + 1);
1282 DEBUG(3, "Packet reception ok\n"); 1263 pr_debug("Packet reception ok\n");
1283 } 1264 }
1284 return 0; 1265 return 0;
1285} 1266}
@@ -1288,7 +1269,7 @@ static int netwave_open(struct net_device *dev) {
1288 netwave_private *priv = netdev_priv(dev); 1269 netwave_private *priv = netdev_priv(dev);
1289 struct pcmcia_device *link = priv->p_dev; 1270 struct pcmcia_device *link = priv->p_dev;
1290 1271
1291 DEBUG(1, "netwave_open: starting.\n"); 1272 dev_dbg(&link->dev, "netwave_open: starting.\n");
1292 1273
1293 if (!pcmcia_dev_present(link)) 1274 if (!pcmcia_dev_present(link))
1294 return -ENODEV; 1275 return -ENODEV;
@@ -1305,7 +1286,7 @@ static int netwave_close(struct net_device *dev) {
1305 netwave_private *priv = netdev_priv(dev); 1286 netwave_private *priv = netdev_priv(dev);
1306 struct pcmcia_device *link = priv->p_dev; 1287 struct pcmcia_device *link = priv->p_dev;
1307 1288
1308 DEBUG(1, "netwave_close: finishing.\n"); 1289 dev_dbg(&link->dev, "netwave_close: finishing.\n");
1309 1290
1310 link->open--; 1291 link->open--;
1311 netif_stop_queue(dev); 1292 netif_stop_queue(dev);
@@ -1358,11 +1339,11 @@ static void set_multicast_list(struct net_device *dev)
1358 u_char rcvMode = 0; 1339 u_char rcvMode = 0;
1359 1340
1360#ifdef PCMCIA_DEBUG 1341#ifdef PCMCIA_DEBUG
1361 if (pc_debug > 2) { 1342 {
1362 static int old; 1343 xstatic int old;
1363 if (old != dev->mc_count) { 1344 if (old != dev->mc_count) {
1364 old = dev->mc_count; 1345 old = dev->mc_count;
1365 DEBUG(0, "%s: setting Rx mode to %d addresses.\n", 1346 pr_debug("%s: setting Rx mode to %d addresses.\n",
1366 dev->name, dev->mc_count); 1347 dev->name, dev->mc_count);
1367 } 1348 }
1368 } 1349 }
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index 38c1c9d2abb8..f27bb8367c98 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -109,7 +109,7 @@ orinoco_cs_probe(struct pcmcia_device *link)
109 struct orinoco_private *priv; 109 struct orinoco_private *priv;
110 struct orinoco_pccard *card; 110 struct orinoco_pccard *card;
111 111
112 priv = alloc_orinocodev(sizeof(*card), &handle_to_dev(link), 112 priv = alloc_orinocodev(sizeof(*card), &link->dev,
113 orinoco_cs_hard_reset, NULL); 113 orinoco_cs_hard_reset, NULL);
114 if (!priv) 114 if (!priv)
115 return -ENOMEM; 115 return -ENOMEM;
@@ -120,10 +120,8 @@ orinoco_cs_probe(struct pcmcia_device *link)
120 link->priv = priv; 120 link->priv = priv;
121 121
122 /* Interrupt setup */ 122 /* Interrupt setup */
123 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 123 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
124 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
125 link->irq.Handler = orinoco_interrupt; 124 link->irq.Handler = orinoco_interrupt;
126 link->irq.Instance = priv;
127 125
128 /* General socket configuration defaults can go here. In this 126 /* General socket configuration defaults can go here. In this
129 * client, we assume very little, and rely on the CIS for 127 * client, we assume very little, and rely on the CIS for
@@ -160,12 +158,6 @@ static void orinoco_cs_detach(struct pcmcia_device *link)
160 * device available to the system. 158 * device available to the system.
161 */ 159 */
162 160
163#define CS_CHECK(fn, ret) do { \
164 last_fn = (fn); \
165 if ((last_ret = (ret)) != 0) \
166 goto cs_failed; \
167} while (0)
168
169static int orinoco_cs_config_check(struct pcmcia_device *p_dev, 161static int orinoco_cs_config_check(struct pcmcia_device *p_dev,
170 cistpl_cftable_entry_t *cfg, 162 cistpl_cftable_entry_t *cfg,
171 cistpl_cftable_entry_t *dflt, 163 cistpl_cftable_entry_t *dflt,
@@ -240,7 +232,7 @@ orinoco_cs_config(struct pcmcia_device *link)
240 struct orinoco_private *priv = link->priv; 232 struct orinoco_private *priv = link->priv;
241 struct orinoco_pccard *card = priv->card; 233 struct orinoco_pccard *card = priv->card;
242 hermes_t *hw = &priv->hw; 234 hermes_t *hw = &priv->hw;
243 int last_fn, last_ret; 235 int ret;
244 void __iomem *mem; 236 void __iomem *mem;
245 237
246 /* 238 /*
@@ -257,13 +249,12 @@ orinoco_cs_config(struct pcmcia_device *link)
257 * and most client drivers will only use the CIS to fill in 249 * and most client drivers will only use the CIS to fill in
258 * implementation-defined details. 250 * implementation-defined details.
259 */ 251 */
260 last_ret = pcmcia_loop_config(link, orinoco_cs_config_check, NULL); 252 ret = pcmcia_loop_config(link, orinoco_cs_config_check, NULL);
261 if (last_ret) { 253 if (ret) {
262 if (!ignore_cis_vcc) 254 if (!ignore_cis_vcc)
263 printk(KERN_ERR PFX "GetNextTuple(): No matching " 255 printk(KERN_ERR PFX "GetNextTuple(): No matching "
264 "CIS configuration. Maybe you need the " 256 "CIS configuration. Maybe you need the "
265 "ignore_cis_vcc=1 parameter.\n"); 257 "ignore_cis_vcc=1 parameter.\n");
266 cs_error(link, RequestIO, last_ret);
267 goto failed; 258 goto failed;
268 } 259 }
269 260
@@ -272,14 +263,16 @@ orinoco_cs_config(struct pcmcia_device *link)
272 * a handler to the interrupt, unless the 'Handler' member of 263 * a handler to the interrupt, unless the 'Handler' member of
273 * the irq structure is initialized. 264 * the irq structure is initialized.
274 */ 265 */
275 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 266 ret = pcmcia_request_irq(link, &link->irq);
267 if (ret)
268 goto failed;
276 269
277 /* We initialize the hermes structure before completing PCMCIA 270 /* We initialize the hermes structure before completing PCMCIA
278 * configuration just in case the interrupt handler gets 271 * configuration just in case the interrupt handler gets
279 * called. */ 272 * called. */
280 mem = ioport_map(link->io.BasePort1, link->io.NumPorts1); 273 mem = ioport_map(link->io.BasePort1, link->io.NumPorts1);
281 if (!mem) 274 if (!mem)
282 goto cs_failed; 275 goto failed;
283 276
284 hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING); 277 hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
285 278
@@ -288,8 +281,9 @@ orinoco_cs_config(struct pcmcia_device *link)
288 * the I/O windows and the interrupt mapping, and putting the 281 * the I/O windows and the interrupt mapping, and putting the
289 * card and host interface into "Memory and IO" mode. 282 * card and host interface into "Memory and IO" mode.
290 */ 283 */
291 CS_CHECK(RequestConfiguration, 284 ret = pcmcia_request_configuration(link, &link->conf);
292 pcmcia_request_configuration(link, &link->conf)); 285 if (ret)
286 goto failed;
293 287
294 /* Ok, we have the configuration, prepare to register the netdev */ 288 /* Ok, we have the configuration, prepare to register the netdev */
295 card->node.major = card->node.minor = 0; 289 card->node.major = card->node.minor = 0;
@@ -315,9 +309,6 @@ orinoco_cs_config(struct pcmcia_device *link)
315 * net_device has been registered */ 309 * net_device has been registered */
316 return 0; 310 return 0;
317 311
318 cs_failed:
319 cs_error(link, last_fn, last_ret);
320
321 failed: 312 failed:
322 orinoco_cs_release(link); 313 orinoco_cs_release(link);
323 return -ENODEV; 314 return -ENODEV;
diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c
index c361310b885d..59bda240fdc2 100644
--- a/drivers/net/wireless/orinoco/spectrum_cs.c
+++ b/drivers/net/wireless/orinoco/spectrum_cs.c
@@ -73,9 +73,6 @@ static void spectrum_cs_release(struct pcmcia_device *link);
73#define HCR_MEM16 0x10 /* memory width bit, should be preserved */ 73#define HCR_MEM16 0x10 /* memory width bit, should be preserved */
74 74
75 75
76#define CS_CHECK(fn, ret) \
77 do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
78
79/* 76/*
80 * Reset the card using configuration registers COR and CCSR. 77 * Reset the card using configuration registers COR and CCSR.
81 * If IDLE is 1, stop the firmware, so that it can be safely rewritten. 78 * If IDLE is 1, stop the firmware, so that it can be safely rewritten.
@@ -83,7 +80,7 @@ static void spectrum_cs_release(struct pcmcia_device *link);
83static int 80static int
84spectrum_reset(struct pcmcia_device *link, int idle) 81spectrum_reset(struct pcmcia_device *link, int idle)
85{ 82{
86 int last_ret, last_fn; 83 int ret;
87 conf_reg_t reg; 84 conf_reg_t reg;
88 u_int save_cor; 85 u_int save_cor;
89 86
@@ -95,23 +92,26 @@ spectrum_reset(struct pcmcia_device *link, int idle)
95 reg.Function = 0; 92 reg.Function = 0;
96 reg.Action = CS_READ; 93 reg.Action = CS_READ;
97 reg.Offset = CISREG_COR; 94 reg.Offset = CISREG_COR;
98 CS_CHECK(AccessConfigurationRegister, 95 ret = pcmcia_access_configuration_register(link, &reg);
99 pcmcia_access_configuration_register(link, &reg)); 96 if (ret)
97 goto failed;
100 save_cor = reg.Value; 98 save_cor = reg.Value;
101 99
102 /* Soft-Reset card */ 100 /* Soft-Reset card */
103 reg.Action = CS_WRITE; 101 reg.Action = CS_WRITE;
104 reg.Offset = CISREG_COR; 102 reg.Offset = CISREG_COR;
105 reg.Value = (save_cor | COR_SOFT_RESET); 103 reg.Value = (save_cor | COR_SOFT_RESET);
106 CS_CHECK(AccessConfigurationRegister, 104 ret = pcmcia_access_configuration_register(link, &reg);
107 pcmcia_access_configuration_register(link, &reg)); 105 if (ret)
106 goto failed;
108 udelay(1000); 107 udelay(1000);
109 108
110 /* Read CCSR */ 109 /* Read CCSR */
111 reg.Action = CS_READ; 110 reg.Action = CS_READ;
112 reg.Offset = CISREG_CCSR; 111 reg.Offset = CISREG_CCSR;
113 CS_CHECK(AccessConfigurationRegister, 112 ret = pcmcia_access_configuration_register(link, &reg);
114 pcmcia_access_configuration_register(link, &reg)); 113 if (ret)
114 goto failed;
115 115
116 /* 116 /*
117 * Start or stop the firmware. Memory width bit should be 117 * Start or stop the firmware. Memory width bit should be
@@ -120,21 +120,22 @@ spectrum_reset(struct pcmcia_device *link, int idle)
120 reg.Action = CS_WRITE; 120 reg.Action = CS_WRITE;
121 reg.Offset = CISREG_CCSR; 121 reg.Offset = CISREG_CCSR;
122 reg.Value = (idle ? HCR_IDLE : HCR_RUN) | (reg.Value & HCR_MEM16); 122 reg.Value = (idle ? HCR_IDLE : HCR_RUN) | (reg.Value & HCR_MEM16);
123 CS_CHECK(AccessConfigurationRegister, 123 ret = pcmcia_access_configuration_register(link, &reg);
124 pcmcia_access_configuration_register(link, &reg)); 124 if (ret)
125 goto failed;
125 udelay(1000); 126 udelay(1000);
126 127
127 /* Restore original COR configuration index */ 128 /* Restore original COR configuration index */
128 reg.Action = CS_WRITE; 129 reg.Action = CS_WRITE;
129 reg.Offset = CISREG_COR; 130 reg.Offset = CISREG_COR;
130 reg.Value = (save_cor & ~COR_SOFT_RESET); 131 reg.Value = (save_cor & ~COR_SOFT_RESET);
131 CS_CHECK(AccessConfigurationRegister, 132 ret = pcmcia_access_configuration_register(link, &reg);
132 pcmcia_access_configuration_register(link, &reg)); 133 if (ret)
134 goto failed;
133 udelay(1000); 135 udelay(1000);
134 return 0; 136 return 0;
135 137
136cs_failed: 138failed:
137 cs_error(link, last_fn, last_ret);
138 return -ENODEV; 139 return -ENODEV;
139} 140}
140 141
@@ -181,7 +182,7 @@ spectrum_cs_probe(struct pcmcia_device *link)
181 struct orinoco_private *priv; 182 struct orinoco_private *priv;
182 struct orinoco_pccard *card; 183 struct orinoco_pccard *card;
183 184
184 priv = alloc_orinocodev(sizeof(*card), &handle_to_dev(link), 185 priv = alloc_orinocodev(sizeof(*card), &link->dev,
185 spectrum_cs_hard_reset, 186 spectrum_cs_hard_reset,
186 spectrum_cs_stop_firmware); 187 spectrum_cs_stop_firmware);
187 if (!priv) 188 if (!priv)
@@ -193,10 +194,8 @@ spectrum_cs_probe(struct pcmcia_device *link)
193 link->priv = priv; 194 link->priv = priv;
194 195
195 /* Interrupt setup */ 196 /* Interrupt setup */
196 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 197 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
197 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
198 link->irq.Handler = orinoco_interrupt; 198 link->irq.Handler = orinoco_interrupt;
199 link->irq.Instance = priv;
200 199
201 /* General socket configuration defaults can go here. In this 200 /* General socket configuration defaults can go here. In this
202 * client, we assume very little, and rely on the CIS for 201 * client, we assume very little, and rely on the CIS for
@@ -307,7 +306,7 @@ spectrum_cs_config(struct pcmcia_device *link)
307 struct orinoco_private *priv = link->priv; 306 struct orinoco_private *priv = link->priv;
308 struct orinoco_pccard *card = priv->card; 307 struct orinoco_pccard *card = priv->card;
309 hermes_t *hw = &priv->hw; 308 hermes_t *hw = &priv->hw;
310 int last_fn, last_ret; 309 int ret;
311 void __iomem *mem; 310 void __iomem *mem;
312 311
313 /* 312 /*
@@ -324,13 +323,12 @@ spectrum_cs_config(struct pcmcia_device *link)
324 * and most client drivers will only use the CIS to fill in 323 * and most client drivers will only use the CIS to fill in
325 * implementation-defined details. 324 * implementation-defined details.
326 */ 325 */
327 last_ret = pcmcia_loop_config(link, spectrum_cs_config_check, NULL); 326 ret = pcmcia_loop_config(link, spectrum_cs_config_check, NULL);
328 if (last_ret) { 327 if (ret) {
329 if (!ignore_cis_vcc) 328 if (!ignore_cis_vcc)
330 printk(KERN_ERR PFX "GetNextTuple(): No matching " 329 printk(KERN_ERR PFX "GetNextTuple(): No matching "
331 "CIS configuration. Maybe you need the " 330 "CIS configuration. Maybe you need the "
332 "ignore_cis_vcc=1 parameter.\n"); 331 "ignore_cis_vcc=1 parameter.\n");
333 cs_error(link, RequestIO, last_ret);
334 goto failed; 332 goto failed;
335 } 333 }
336 334
@@ -339,14 +337,16 @@ spectrum_cs_config(struct pcmcia_device *link)
339 * a handler to the interrupt, unless the 'Handler' member of 337 * a handler to the interrupt, unless the 'Handler' member of
340 * the irq structure is initialized. 338 * the irq structure is initialized.
341 */ 339 */
342 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 340 ret = pcmcia_request_irq(link, &link->irq);
341 if (ret)
342 goto failed;
343 343
344 /* We initialize the hermes structure before completing PCMCIA 344 /* We initialize the hermes structure before completing PCMCIA
345 * configuration just in case the interrupt handler gets 345 * configuration just in case the interrupt handler gets
346 * called. */ 346 * called. */
347 mem = ioport_map(link->io.BasePort1, link->io.NumPorts1); 347 mem = ioport_map(link->io.BasePort1, link->io.NumPorts1);
348 if (!mem) 348 if (!mem)
349 goto cs_failed; 349 goto failed;
350 350
351 hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING); 351 hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
352 352
@@ -355,8 +355,9 @@ spectrum_cs_config(struct pcmcia_device *link)
355 * the I/O windows and the interrupt mapping, and putting the 355 * the I/O windows and the interrupt mapping, and putting the
356 * card and host interface into "Memory and IO" mode. 356 * card and host interface into "Memory and IO" mode.
357 */ 357 */
358 CS_CHECK(RequestConfiguration, 358 ret = pcmcia_request_configuration(link, &link->conf);
359 pcmcia_request_configuration(link, &link->conf)); 359 if (ret)
360 goto failed;
360 361
361 /* Ok, we have the configuration, prepare to register the netdev */ 362 /* Ok, we have the configuration, prepare to register the netdev */
362 card->node.major = card->node.minor = 0; 363 card->node.major = card->node.minor = 0;
@@ -386,9 +387,6 @@ spectrum_cs_config(struct pcmcia_device *link)
386 * net_device has been registered */ 387 * net_device has been registered */
387 return 0; 388 return 0;
388 389
389 cs_failed:
390 cs_error(link, last_fn, last_ret);
391
392 failed: 390 failed:
393 spectrum_cs_release(link); 391 spectrum_cs_release(link);
394 return -ENODEV; 392 return -ENODEV;
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 17e199546eeb..92af9b96bb7a 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -426,12 +426,16 @@ static const char p54u_romboot_3887[] = "~~~~";
426static int p54u_firmware_reset_3887(struct ieee80211_hw *dev) 426static int p54u_firmware_reset_3887(struct ieee80211_hw *dev)
427{ 427{
428 struct p54u_priv *priv = dev->priv; 428 struct p54u_priv *priv = dev->priv;
429 u8 buf[4]; 429 u8 *buf;
430 int ret; 430 int ret;
431 431
432 memcpy(&buf, p54u_romboot_3887, sizeof(buf)); 432 buf = kmalloc(4, GFP_KERNEL);
433 if (!buf)
434 return -ENOMEM;
435 memcpy(buf, p54u_romboot_3887, 4);
433 ret = p54u_bulk_msg(priv, P54U_PIPE_DATA, 436 ret = p54u_bulk_msg(priv, P54U_PIPE_DATA,
434 buf, sizeof(buf)); 437 buf, 4);
438 kfree(buf);
435 if (ret) 439 if (ret)
436 dev_err(&priv->udev->dev, "(p54usb) unable to jump to " 440 dev_err(&priv->udev->dev, "(p54usb) unable to jump to "
437 "boot ROM (%d)!\n", ret); 441 "boot ROM (%d)!\n", ret);
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 1c88c2ea59aa..5b8e3e4cdd9f 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -71,25 +71,7 @@ typedef u_char mac_addr[ETH_ALEN]; /* Hardware address */
71#include "rayctl.h" 71#include "rayctl.h"
72#include "ray_cs.h" 72#include "ray_cs.h"
73 73
74/* All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
75 you do not define PCMCIA_DEBUG at all, all the debug code will be
76 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
77 be present but disabled -- but it can then be enabled for specific
78 modules at load time with a 'pc_debug=#' option to insmod.
79*/
80 74
81#ifdef RAYLINK_DEBUG
82#define PCMCIA_DEBUG RAYLINK_DEBUG
83#endif
84#ifdef PCMCIA_DEBUG
85static int ray_debug;
86static int pc_debug = PCMCIA_DEBUG;
87module_param(pc_debug, int, 0);
88/* #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args); */
89#define DEBUG(n, args...) if (pc_debug > (n)) printk(args);
90#else
91#define DEBUG(n, args...)
92#endif
93/** Prototypes based on PCMCIA skeleton driver *******************************/ 75/** Prototypes based on PCMCIA skeleton driver *******************************/
94static int ray_config(struct pcmcia_device *link); 76static int ray_config(struct pcmcia_device *link);
95static void ray_release(struct pcmcia_device *link); 77static void ray_release(struct pcmcia_device *link);
@@ -325,7 +307,7 @@ static int ray_probe(struct pcmcia_device *p_dev)
325 ray_dev_t *local; 307 ray_dev_t *local;
326 struct net_device *dev; 308 struct net_device *dev;
327 309
328 DEBUG(1, "ray_attach()\n"); 310 dev_dbg(&p_dev->dev, "ray_attach()\n");
329 311
330 /* Allocate space for private device-specific data */ 312 /* Allocate space for private device-specific data */
331 dev = alloc_etherdev(sizeof(ray_dev_t)); 313 dev = alloc_etherdev(sizeof(ray_dev_t));
@@ -341,8 +323,7 @@ static int ray_probe(struct pcmcia_device *p_dev)
341 p_dev->io.IOAddrLines = 5; 323 p_dev->io.IOAddrLines = 5;
342 324
343 /* Interrupt setup. For PCMCIA, driver takes what's given */ 325 /* Interrupt setup. For PCMCIA, driver takes what's given */
344 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 326 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
345 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
346 p_dev->irq.Handler = &ray_interrupt; 327 p_dev->irq.Handler = &ray_interrupt;
347 328
348 /* General socket configuration */ 329 /* General socket configuration */
@@ -351,13 +332,12 @@ static int ray_probe(struct pcmcia_device *p_dev)
351 p_dev->conf.ConfigIndex = 1; 332 p_dev->conf.ConfigIndex = 1;
352 333
353 p_dev->priv = dev; 334 p_dev->priv = dev;
354 p_dev->irq.Instance = dev;
355 335
356 local->finder = p_dev; 336 local->finder = p_dev;
357 local->card_status = CARD_INSERTED; 337 local->card_status = CARD_INSERTED;
358 local->authentication_state = UNAUTHENTICATED; 338 local->authentication_state = UNAUTHENTICATED;
359 local->num_multi = 0; 339 local->num_multi = 0;
360 DEBUG(2, "ray_attach p_dev = %p, dev = %p, local = %p, intr = %p\n", 340 dev_dbg(&p_dev->dev, "ray_attach p_dev = %p, dev = %p, local = %p, intr = %p\n",
361 p_dev, dev, local, &ray_interrupt); 341 p_dev, dev, local, &ray_interrupt);
362 342
363 /* Raylink entries in the device structure */ 343 /* Raylink entries in the device structure */
@@ -370,7 +350,7 @@ static int ray_probe(struct pcmcia_device *p_dev)
370#endif /* WIRELESS_SPY */ 350#endif /* WIRELESS_SPY */
371 351
372 352
373 DEBUG(2, "ray_cs ray_attach calling ether_setup.)\n"); 353 dev_dbg(&p_dev->dev, "ray_cs ray_attach calling ether_setup.)\n");
374 netif_stop_queue(dev); 354 netif_stop_queue(dev);
375 355
376 init_timer(&local->timer); 356 init_timer(&local->timer);
@@ -393,7 +373,7 @@ static void ray_detach(struct pcmcia_device *link)
393 struct net_device *dev; 373 struct net_device *dev;
394 ray_dev_t *local; 374 ray_dev_t *local;
395 375
396 DEBUG(1, "ray_detach(0x%p)\n", link); 376 dev_dbg(&link->dev, "ray_detach\n");
397 377
398 this_device = NULL; 378 this_device = NULL;
399 dev = link->priv; 379 dev = link->priv;
@@ -408,7 +388,7 @@ static void ray_detach(struct pcmcia_device *link)
408 unregister_netdev(dev); 388 unregister_netdev(dev);
409 free_netdev(dev); 389 free_netdev(dev);
410 } 390 }
411 DEBUG(2, "ray_cs ray_detach ending\n"); 391 dev_dbg(&link->dev, "ray_cs ray_detach ending\n");
412} /* ray_detach */ 392} /* ray_detach */
413 393
414/*============================================================================= 394/*=============================================================================
@@ -416,19 +396,17 @@ static void ray_detach(struct pcmcia_device *link)
416 is received, to configure the PCMCIA socket, and to make the 396 is received, to configure the PCMCIA socket, and to make the
417 ethernet device available to the system. 397 ethernet device available to the system.
418=============================================================================*/ 398=============================================================================*/
419#define CS_CHECK(fn, ret) \
420do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
421#define MAX_TUPLE_SIZE 128 399#define MAX_TUPLE_SIZE 128
422static int ray_config(struct pcmcia_device *link) 400static int ray_config(struct pcmcia_device *link)
423{ 401{
424 int last_fn = 0, last_ret = 0; 402 int ret = 0;
425 int i; 403 int i;
426 win_req_t req; 404 win_req_t req;
427 memreq_t mem; 405 memreq_t mem;
428 struct net_device *dev = (struct net_device *)link->priv; 406 struct net_device *dev = (struct net_device *)link->priv;
429 ray_dev_t *local = netdev_priv(dev); 407 ray_dev_t *local = netdev_priv(dev);
430 408
431 DEBUG(1, "ray_config(0x%p)\n", link); 409 dev_dbg(&link->dev, "ray_config\n");
432 410
433 /* Determine card type and firmware version */ 411 /* Determine card type and firmware version */
434 printk(KERN_INFO "ray_cs Detected: %s%s%s%s\n", 412 printk(KERN_INFO "ray_cs Detected: %s%s%s%s\n",
@@ -440,14 +418,17 @@ static int ray_config(struct pcmcia_device *link)
440 /* Now allocate an interrupt line. Note that this does not 418 /* Now allocate an interrupt line. Note that this does not
441 actually assign a handler to the interrupt. 419 actually assign a handler to the interrupt.
442 */ 420 */
443 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 421 ret = pcmcia_request_irq(link, &link->irq);
422 if (ret)
423 goto failed;
444 dev->irq = link->irq.AssignedIRQ; 424 dev->irq = link->irq.AssignedIRQ;
445 425
446 /* This actually configures the PCMCIA socket -- setting up 426 /* This actually configures the PCMCIA socket -- setting up
447 the I/O windows and the interrupt mapping. 427 the I/O windows and the interrupt mapping.
448 */ 428 */
449 CS_CHECK(RequestConfiguration, 429 ret = pcmcia_request_configuration(link, &link->conf);
450 pcmcia_request_configuration(link, &link->conf)); 430 if (ret)
431 goto failed;
451 432
452/*** Set up 32k window for shared memory (transmit and control) ************/ 433/*** Set up 32k window for shared memory (transmit and control) ************/
453 req.Attributes = 434 req.Attributes =
@@ -455,10 +436,14 @@ static int ray_config(struct pcmcia_device *link)
455 req.Base = 0; 436 req.Base = 0;
456 req.Size = 0x8000; 437 req.Size = 0x8000;
457 req.AccessSpeed = ray_mem_speed; 438 req.AccessSpeed = ray_mem_speed;
458 CS_CHECK(RequestWindow, pcmcia_request_window(&link, &req, &link->win)); 439 ret = pcmcia_request_window(link, &req, &link->win);
440 if (ret)
441 goto failed;
459 mem.CardOffset = 0x0000; 442 mem.CardOffset = 0x0000;
460 mem.Page = 0; 443 mem.Page = 0;
461 CS_CHECK(MapMemPage, pcmcia_map_mem_page(link->win, &mem)); 444 ret = pcmcia_map_mem_page(link, link->win, &mem);
445 if (ret)
446 goto failed;
462 local->sram = ioremap(req.Base, req.Size); 447 local->sram = ioremap(req.Base, req.Size);
463 448
464/*** Set up 16k window for shared memory (receive buffer) ***************/ 449/*** Set up 16k window for shared memory (receive buffer) ***************/
@@ -467,11 +452,14 @@ static int ray_config(struct pcmcia_device *link)
467 req.Base = 0; 452 req.Base = 0;
468 req.Size = 0x4000; 453 req.Size = 0x4000;
469 req.AccessSpeed = ray_mem_speed; 454 req.AccessSpeed = ray_mem_speed;
470 CS_CHECK(RequestWindow, 455 ret = pcmcia_request_window(link, &req, &local->rmem_handle);
471 pcmcia_request_window(&link, &req, &local->rmem_handle)); 456 if (ret)
457 goto failed;
472 mem.CardOffset = 0x8000; 458 mem.CardOffset = 0x8000;
473 mem.Page = 0; 459 mem.Page = 0;
474 CS_CHECK(MapMemPage, pcmcia_map_mem_page(local->rmem_handle, &mem)); 460 ret = pcmcia_map_mem_page(link, local->rmem_handle, &mem);
461 if (ret)
462 goto failed;
475 local->rmem = ioremap(req.Base, req.Size); 463 local->rmem = ioremap(req.Base, req.Size);
476 464
477/*** Set up window for attribute memory ***********************************/ 465/*** Set up window for attribute memory ***********************************/
@@ -480,22 +468,25 @@ static int ray_config(struct pcmcia_device *link)
480 req.Base = 0; 468 req.Base = 0;
481 req.Size = 0x1000; 469 req.Size = 0x1000;
482 req.AccessSpeed = ray_mem_speed; 470 req.AccessSpeed = ray_mem_speed;
483 CS_CHECK(RequestWindow, 471 ret = pcmcia_request_window(link, &req, &local->amem_handle);
484 pcmcia_request_window(&link, &req, &local->amem_handle)); 472 if (ret)
473 goto failed;
485 mem.CardOffset = 0x0000; 474 mem.CardOffset = 0x0000;
486 mem.Page = 0; 475 mem.Page = 0;
487 CS_CHECK(MapMemPage, pcmcia_map_mem_page(local->amem_handle, &mem)); 476 ret = pcmcia_map_mem_page(link, local->amem_handle, &mem);
477 if (ret)
478 goto failed;
488 local->amem = ioremap(req.Base, req.Size); 479 local->amem = ioremap(req.Base, req.Size);
489 480
490 DEBUG(3, "ray_config sram=%p\n", local->sram); 481 dev_dbg(&link->dev, "ray_config sram=%p\n", local->sram);
491 DEBUG(3, "ray_config rmem=%p\n", local->rmem); 482 dev_dbg(&link->dev, "ray_config rmem=%p\n", local->rmem);
492 DEBUG(3, "ray_config amem=%p\n", local->amem); 483 dev_dbg(&link->dev, "ray_config amem=%p\n", local->amem);
493 if (ray_init(dev) < 0) { 484 if (ray_init(dev) < 0) {
494 ray_release(link); 485 ray_release(link);
495 return -ENODEV; 486 return -ENODEV;
496 } 487 }
497 488
498 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 489 SET_NETDEV_DEV(dev, &link->dev);
499 i = register_netdev(dev); 490 i = register_netdev(dev);
500 if (i != 0) { 491 if (i != 0) {
501 printk("ray_config register_netdev() failed\n"); 492 printk("ray_config register_netdev() failed\n");
@@ -511,9 +502,7 @@ static int ray_config(struct pcmcia_device *link)
511 502
512 return 0; 503 return 0;
513 504
514cs_failed: 505failed:
515 cs_error(link, last_fn, last_ret);
516
517 ray_release(link); 506 ray_release(link);
518 return -ENODEV; 507 return -ENODEV;
519} /* ray_config */ 508} /* ray_config */
@@ -543,9 +532,9 @@ static int ray_init(struct net_device *dev)
543 struct ccs __iomem *pccs; 532 struct ccs __iomem *pccs;
544 ray_dev_t *local = netdev_priv(dev); 533 ray_dev_t *local = netdev_priv(dev);
545 struct pcmcia_device *link = local->finder; 534 struct pcmcia_device *link = local->finder;
546 DEBUG(1, "ray_init(0x%p)\n", dev); 535 dev_dbg(&link->dev, "ray_init(0x%p)\n", dev);
547 if (!(pcmcia_dev_present(link))) { 536 if (!(pcmcia_dev_present(link))) {
548 DEBUG(0, "ray_init - device not present\n"); 537 dev_dbg(&link->dev, "ray_init - device not present\n");
549 return -1; 538 return -1;
550 } 539 }
551 540
@@ -567,13 +556,13 @@ static int ray_init(struct net_device *dev)
567 local->fw_ver = local->startup_res.firmware_version[0]; 556 local->fw_ver = local->startup_res.firmware_version[0];
568 local->fw_bld = local->startup_res.firmware_version[1]; 557 local->fw_bld = local->startup_res.firmware_version[1];
569 local->fw_var = local->startup_res.firmware_version[2]; 558 local->fw_var = local->startup_res.firmware_version[2];
570 DEBUG(1, "ray_init firmware version %d.%d \n", local->fw_ver, 559 dev_dbg(&link->dev, "ray_init firmware version %d.%d \n", local->fw_ver,
571 local->fw_bld); 560 local->fw_bld);
572 561
573 local->tib_length = 0x20; 562 local->tib_length = 0x20;
574 if ((local->fw_ver == 5) && (local->fw_bld >= 30)) 563 if ((local->fw_ver == 5) && (local->fw_bld >= 30))
575 local->tib_length = local->startup_res.tib_length; 564 local->tib_length = local->startup_res.tib_length;
576 DEBUG(2, "ray_init tib_length = 0x%02x\n", local->tib_length); 565 dev_dbg(&link->dev, "ray_init tib_length = 0x%02x\n", local->tib_length);
577 /* Initialize CCS's to buffer free state */ 566 /* Initialize CCS's to buffer free state */
578 pccs = ccs_base(local); 567 pccs = ccs_base(local);
579 for (i = 0; i < NUMBER_OF_CCS; i++) { 568 for (i = 0; i < NUMBER_OF_CCS; i++) {
@@ -592,7 +581,7 @@ static int ray_init(struct net_device *dev)
592 581
593 clear_interrupt(local); /* Clear any interrupt from the card */ 582 clear_interrupt(local); /* Clear any interrupt from the card */
594 local->card_status = CARD_AWAITING_PARAM; 583 local->card_status = CARD_AWAITING_PARAM;
595 DEBUG(2, "ray_init ending\n"); 584 dev_dbg(&link->dev, "ray_init ending\n");
596 return 0; 585 return 0;
597} /* ray_init */ 586} /* ray_init */
598 587
@@ -605,9 +594,9 @@ static int dl_startup_params(struct net_device *dev)
605 struct ccs __iomem *pccs; 594 struct ccs __iomem *pccs;
606 struct pcmcia_device *link = local->finder; 595 struct pcmcia_device *link = local->finder;
607 596
608 DEBUG(1, "dl_startup_params entered\n"); 597 dev_dbg(&link->dev, "dl_startup_params entered\n");
609 if (!(pcmcia_dev_present(link))) { 598 if (!(pcmcia_dev_present(link))) {
610 DEBUG(2, "ray_cs dl_startup_params - device not present\n"); 599 dev_dbg(&link->dev, "ray_cs dl_startup_params - device not present\n");
611 return -1; 600 return -1;
612 } 601 }
613 602
@@ -625,7 +614,7 @@ static int dl_startup_params(struct net_device *dev)
625 local->dl_param_ccs = ccsindex; 614 local->dl_param_ccs = ccsindex;
626 pccs = ccs_base(local) + ccsindex; 615 pccs = ccs_base(local) + ccsindex;
627 writeb(CCS_DOWNLOAD_STARTUP_PARAMS, &pccs->cmd); 616 writeb(CCS_DOWNLOAD_STARTUP_PARAMS, &pccs->cmd);
628 DEBUG(2, "dl_startup_params start ccsindex = %d\n", 617 dev_dbg(&link->dev, "dl_startup_params start ccsindex = %d\n",
629 local->dl_param_ccs); 618 local->dl_param_ccs);
630 /* Interrupt the firmware to process the command */ 619 /* Interrupt the firmware to process the command */
631 if (interrupt_ecf(local, ccsindex)) { 620 if (interrupt_ecf(local, ccsindex)) {
@@ -641,7 +630,7 @@ static int dl_startup_params(struct net_device *dev)
641 local->timer.data = (long)local; 630 local->timer.data = (long)local;
642 local->timer.function = &verify_dl_startup; 631 local->timer.function = &verify_dl_startup;
643 add_timer(&local->timer); 632 add_timer(&local->timer);
644 DEBUG(2, 633 dev_dbg(&link->dev,
645 "ray_cs dl_startup_params started timer for verify_dl_startup\n"); 634 "ray_cs dl_startup_params started timer for verify_dl_startup\n");
646 return 0; 635 return 0;
647} /* dl_startup_params */ 636} /* dl_startup_params */
@@ -717,11 +706,11 @@ static void verify_dl_startup(u_long data)
717 struct pcmcia_device *link = local->finder; 706 struct pcmcia_device *link = local->finder;
718 707
719 if (!(pcmcia_dev_present(link))) { 708 if (!(pcmcia_dev_present(link))) {
720 DEBUG(2, "ray_cs verify_dl_startup - device not present\n"); 709 dev_dbg(&link->dev, "ray_cs verify_dl_startup - device not present\n");
721 return; 710 return;
722 } 711 }
723#ifdef PCMCIA_DEBUG 712#if 0
724 if (pc_debug > 2) { 713 {
725 int i; 714 int i;
726 printk(KERN_DEBUG 715 printk(KERN_DEBUG
727 "verify_dl_startup parameters sent via ccs %d:\n", 716 "verify_dl_startup parameters sent via ccs %d:\n",
@@ -760,7 +749,7 @@ static void start_net(u_long data)
760 int ccsindex; 749 int ccsindex;
761 struct pcmcia_device *link = local->finder; 750 struct pcmcia_device *link = local->finder;
762 if (!(pcmcia_dev_present(link))) { 751 if (!(pcmcia_dev_present(link))) {
763 DEBUG(2, "ray_cs start_net - device not present\n"); 752 dev_dbg(&link->dev, "ray_cs start_net - device not present\n");
764 return; 753 return;
765 } 754 }
766 /* Fill in the CCS fields for the ECF */ 755 /* Fill in the CCS fields for the ECF */
@@ -771,7 +760,7 @@ static void start_net(u_long data)
771 writeb(0, &pccs->var.start_network.update_param); 760 writeb(0, &pccs->var.start_network.update_param);
772 /* Interrupt the firmware to process the command */ 761 /* Interrupt the firmware to process the command */
773 if (interrupt_ecf(local, ccsindex)) { 762 if (interrupt_ecf(local, ccsindex)) {
774 DEBUG(1, "ray start net failed - card not ready for intr\n"); 763 dev_dbg(&link->dev, "ray start net failed - card not ready for intr\n");
775 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); 764 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
776 return; 765 return;
777 } 766 }
@@ -790,7 +779,7 @@ static void join_net(u_long data)
790 struct pcmcia_device *link = local->finder; 779 struct pcmcia_device *link = local->finder;
791 780
792 if (!(pcmcia_dev_present(link))) { 781 if (!(pcmcia_dev_present(link))) {
793 DEBUG(2, "ray_cs join_net - device not present\n"); 782 dev_dbg(&link->dev, "ray_cs join_net - device not present\n");
794 return; 783 return;
795 } 784 }
796 /* Fill in the CCS fields for the ECF */ 785 /* Fill in the CCS fields for the ECF */
@@ -802,7 +791,7 @@ static void join_net(u_long data)
802 writeb(0, &pccs->var.join_network.net_initiated); 791 writeb(0, &pccs->var.join_network.net_initiated);
803 /* Interrupt the firmware to process the command */ 792 /* Interrupt the firmware to process the command */
804 if (interrupt_ecf(local, ccsindex)) { 793 if (interrupt_ecf(local, ccsindex)) {
805 DEBUG(1, "ray join net failed - card not ready for intr\n"); 794 dev_dbg(&link->dev, "ray join net failed - card not ready for intr\n");
806 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); 795 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
807 return; 796 return;
808 } 797 }
@@ -821,7 +810,7 @@ static void ray_release(struct pcmcia_device *link)
821 ray_dev_t *local = netdev_priv(dev); 810 ray_dev_t *local = netdev_priv(dev);
822 int i; 811 int i;
823 812
824 DEBUG(1, "ray_release(0x%p)\n", link); 813 dev_dbg(&link->dev, "ray_release\n");
825 814
826 del_timer(&local->timer); 815 del_timer(&local->timer);
827 816
@@ -829,15 +818,15 @@ static void ray_release(struct pcmcia_device *link)
829 iounmap(local->rmem); 818 iounmap(local->rmem);
830 iounmap(local->amem); 819 iounmap(local->amem);
831 /* Do bother checking to see if these succeed or not */ 820 /* Do bother checking to see if these succeed or not */
832 i = pcmcia_release_window(local->amem_handle); 821 i = pcmcia_release_window(link, local->amem_handle);
833 if (i != 0) 822 if (i != 0)
834 DEBUG(0, "ReleaseWindow(local->amem) ret = %x\n", i); 823 dev_dbg(&link->dev, "ReleaseWindow(local->amem) ret = %x\n", i);
835 i = pcmcia_release_window(local->rmem_handle); 824 i = pcmcia_release_window(link, local->rmem_handle);
836 if (i != 0) 825 if (i != 0)
837 DEBUG(0, "ReleaseWindow(local->rmem) ret = %x\n", i); 826 dev_dbg(&link->dev, "ReleaseWindow(local->rmem) ret = %x\n", i);
838 pcmcia_disable_device(link); 827 pcmcia_disable_device(link);
839 828
840 DEBUG(2, "ray_release ending\n"); 829 dev_dbg(&link->dev, "ray_release ending\n");
841} 830}
842 831
843static int ray_suspend(struct pcmcia_device *link) 832static int ray_suspend(struct pcmcia_device *link)
@@ -871,9 +860,9 @@ static int ray_dev_init(struct net_device *dev)
871 ray_dev_t *local = netdev_priv(dev); 860 ray_dev_t *local = netdev_priv(dev);
872 struct pcmcia_device *link = local->finder; 861 struct pcmcia_device *link = local->finder;
873 862
874 DEBUG(1, "ray_dev_init(dev=%p)\n", dev); 863 dev_dbg(&link->dev, "ray_dev_init(dev=%p)\n", dev);
875 if (!(pcmcia_dev_present(link))) { 864 if (!(pcmcia_dev_present(link))) {
876 DEBUG(2, "ray_dev_init - device not present\n"); 865 dev_dbg(&link->dev, "ray_dev_init - device not present\n");
877 return -1; 866 return -1;
878 } 867 }
879#ifdef RAY_IMMEDIATE_INIT 868#ifdef RAY_IMMEDIATE_INIT
@@ -887,7 +876,7 @@ static int ray_dev_init(struct net_device *dev)
887 /* Postpone the card init so that we can still configure the card, 876 /* Postpone the card init so that we can still configure the card,
888 * for example using the Wireless Extensions. The init will happen 877 * for example using the Wireless Extensions. The init will happen
889 * in ray_open() - Jean II */ 878 * in ray_open() - Jean II */
890 DEBUG(1, 879 dev_dbg(&link->dev,
891 "ray_dev_init: postponing card init to ray_open() ; Status = %d\n", 880 "ray_dev_init: postponing card init to ray_open() ; Status = %d\n",
892 local->card_status); 881 local->card_status);
893#endif /* RAY_IMMEDIATE_INIT */ 882#endif /* RAY_IMMEDIATE_INIT */
@@ -896,7 +885,7 @@ static int ray_dev_init(struct net_device *dev)
896 memcpy(dev->dev_addr, &local->sparm.b4.a_mac_addr, ADDRLEN); 885 memcpy(dev->dev_addr, &local->sparm.b4.a_mac_addr, ADDRLEN);
897 memset(dev->broadcast, 0xff, ETH_ALEN); 886 memset(dev->broadcast, 0xff, ETH_ALEN);
898 887
899 DEBUG(2, "ray_dev_init ending\n"); 888 dev_dbg(&link->dev, "ray_dev_init ending\n");
900 return 0; 889 return 0;
901} 890}
902 891
@@ -906,9 +895,9 @@ static int ray_dev_config(struct net_device *dev, struct ifmap *map)
906 ray_dev_t *local = netdev_priv(dev); 895 ray_dev_t *local = netdev_priv(dev);
907 struct pcmcia_device *link = local->finder; 896 struct pcmcia_device *link = local->finder;
908 /* Dummy routine to satisfy device structure */ 897 /* Dummy routine to satisfy device structure */
909 DEBUG(1, "ray_dev_config(dev=%p,ifmap=%p)\n", dev, map); 898 dev_dbg(&link->dev, "ray_dev_config(dev=%p,ifmap=%p)\n", dev, map);
910 if (!(pcmcia_dev_present(link))) { 899 if (!(pcmcia_dev_present(link))) {
911 DEBUG(2, "ray_dev_config - device not present\n"); 900 dev_dbg(&link->dev, "ray_dev_config - device not present\n");
912 return -1; 901 return -1;
913 } 902 }
914 903
@@ -924,14 +913,14 @@ static netdev_tx_t ray_dev_start_xmit(struct sk_buff *skb,
924 short length = skb->len; 913 short length = skb->len;
925 914
926 if (!pcmcia_dev_present(link)) { 915 if (!pcmcia_dev_present(link)) {
927 DEBUG(2, "ray_dev_start_xmit - device not present\n"); 916 dev_dbg(&link->dev, "ray_dev_start_xmit - device not present\n");
928 dev_kfree_skb(skb); 917 dev_kfree_skb(skb);
929 return NETDEV_TX_OK; 918 return NETDEV_TX_OK;
930 } 919 }
931 920
932 DEBUG(3, "ray_dev_start_xmit(skb=%p, dev=%p)\n", skb, dev); 921 dev_dbg(&link->dev, "ray_dev_start_xmit(skb=%p, dev=%p)\n", skb, dev);
933 if (local->authentication_state == NEED_TO_AUTH) { 922 if (local->authentication_state == NEED_TO_AUTH) {
934 DEBUG(0, "ray_cs Sending authentication request.\n"); 923 dev_dbg(&link->dev, "ray_cs Sending authentication request.\n");
935 if (!build_auth_frame(local, local->auth_id, OPEN_AUTH_REQUEST)) { 924 if (!build_auth_frame(local, local->auth_id, OPEN_AUTH_REQUEST)) {
936 local->authentication_state = AUTHENTICATED; 925 local->authentication_state = AUTHENTICATED;
937 netif_stop_queue(dev); 926 netif_stop_queue(dev);
@@ -971,7 +960,7 @@ static int ray_hw_xmit(unsigned char *data, int len, struct net_device *dev,
971 struct tx_msg __iomem *ptx; /* Address of xmit buffer in PC space */ 960 struct tx_msg __iomem *ptx; /* Address of xmit buffer in PC space */
972 short int addr; /* Address of xmit buffer in card space */ 961 short int addr; /* Address of xmit buffer in card space */
973 962
974 DEBUG(3, "ray_hw_xmit(data=%p, len=%d, dev=%p)\n", data, len, dev); 963 pr_debug("ray_hw_xmit(data=%p, len=%d, dev=%p)\n", data, len, dev);
975 if (len + TX_HEADER_LENGTH > TX_BUF_SIZE) { 964 if (len + TX_HEADER_LENGTH > TX_BUF_SIZE) {
976 printk(KERN_INFO "ray_hw_xmit packet too large: %d bytes\n", 965 printk(KERN_INFO "ray_hw_xmit packet too large: %d bytes\n",
977 len); 966 len);
@@ -979,9 +968,9 @@ static int ray_hw_xmit(unsigned char *data, int len, struct net_device *dev,
979 } 968 }
980 switch (ccsindex = get_free_tx_ccs(local)) { 969 switch (ccsindex = get_free_tx_ccs(local)) {
981 case ECCSBUSY: 970 case ECCSBUSY:
982 DEBUG(2, "ray_hw_xmit tx_ccs table busy\n"); 971 pr_debug("ray_hw_xmit tx_ccs table busy\n");
983 case ECCSFULL: 972 case ECCSFULL:
984 DEBUG(2, "ray_hw_xmit No free tx ccs\n"); 973 pr_debug("ray_hw_xmit No free tx ccs\n");
985 case ECARDGONE: 974 case ECARDGONE:
986 netif_stop_queue(dev); 975 netif_stop_queue(dev);
987 return XMIT_NO_CCS; 976 return XMIT_NO_CCS;
@@ -1018,12 +1007,12 @@ static int ray_hw_xmit(unsigned char *data, int len, struct net_device *dev,
1018 writeb(PSM_CAM, &pccs->var.tx_request.pow_sav_mode); 1007 writeb(PSM_CAM, &pccs->var.tx_request.pow_sav_mode);
1019 writeb(local->net_default_tx_rate, &pccs->var.tx_request.tx_rate); 1008 writeb(local->net_default_tx_rate, &pccs->var.tx_request.tx_rate);
1020 writeb(0, &pccs->var.tx_request.antenna); 1009 writeb(0, &pccs->var.tx_request.antenna);
1021 DEBUG(3, "ray_hw_xmit default_tx_rate = 0x%x\n", 1010 pr_debug("ray_hw_xmit default_tx_rate = 0x%x\n",
1022 local->net_default_tx_rate); 1011 local->net_default_tx_rate);
1023 1012
1024 /* Interrupt the firmware to process the command */ 1013 /* Interrupt the firmware to process the command */
1025 if (interrupt_ecf(local, ccsindex)) { 1014 if (interrupt_ecf(local, ccsindex)) {
1026 DEBUG(2, "ray_hw_xmit failed - ECF not ready for intr\n"); 1015 pr_debug("ray_hw_xmit failed - ECF not ready for intr\n");
1027/* TBD very inefficient to copy packet to buffer, and then not 1016/* TBD very inefficient to copy packet to buffer, and then not
1028 send it, but the alternative is to queue the messages and that 1017 send it, but the alternative is to queue the messages and that
1029 won't be done for a while. Maybe set tbusy until a CCS is free? 1018 won't be done for a while. Maybe set tbusy until a CCS is free?
@@ -1040,7 +1029,7 @@ static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx,
1040{ 1029{
1041 __be16 proto = ((struct ethhdr *)data)->h_proto; 1030 __be16 proto = ((struct ethhdr *)data)->h_proto;
1042 if (ntohs(proto) >= 1536) { /* DIX II ethernet frame */ 1031 if (ntohs(proto) >= 1536) { /* DIX II ethernet frame */
1043 DEBUG(3, "ray_cs translate_frame DIX II\n"); 1032 pr_debug("ray_cs translate_frame DIX II\n");
1044 /* Copy LLC header to card buffer */ 1033 /* Copy LLC header to card buffer */
1045 memcpy_toio(&ptx->var, eth2_llc, sizeof(eth2_llc)); 1034 memcpy_toio(&ptx->var, eth2_llc, sizeof(eth2_llc));
1046 memcpy_toio(((void __iomem *)&ptx->var) + sizeof(eth2_llc), 1035 memcpy_toio(((void __iomem *)&ptx->var) + sizeof(eth2_llc),
@@ -1056,9 +1045,9 @@ static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx,
1056 len - ETH_HLEN); 1045 len - ETH_HLEN);
1057 return (int)sizeof(struct snaphdr_t) - ETH_HLEN; 1046 return (int)sizeof(struct snaphdr_t) - ETH_HLEN;
1058 } else { /* already 802 type, and proto is length */ 1047 } else { /* already 802 type, and proto is length */
1059 DEBUG(3, "ray_cs translate_frame 802\n"); 1048 pr_debug("ray_cs translate_frame 802\n");
1060 if (proto == htons(0xffff)) { /* evil netware IPX 802.3 without LLC */ 1049 if (proto == htons(0xffff)) { /* evil netware IPX 802.3 without LLC */
1061 DEBUG(3, "ray_cs translate_frame evil IPX\n"); 1050 pr_debug("ray_cs translate_frame evil IPX\n");
1062 memcpy_toio(&ptx->var, data + ETH_HLEN, len - ETH_HLEN); 1051 memcpy_toio(&ptx->var, data + ETH_HLEN, len - ETH_HLEN);
1063 return 0 - ETH_HLEN; 1052 return 0 - ETH_HLEN;
1064 } 1053 }
@@ -1603,7 +1592,7 @@ static int ray_open(struct net_device *dev)
1603 struct pcmcia_device *link; 1592 struct pcmcia_device *link;
1604 link = local->finder; 1593 link = local->finder;
1605 1594
1606 DEBUG(1, "ray_open('%s')\n", dev->name); 1595 dev_dbg(&link->dev, "ray_open('%s')\n", dev->name);
1607 1596
1608 if (link->open == 0) 1597 if (link->open == 0)
1609 local->num_multi = 0; 1598 local->num_multi = 0;
@@ -1613,7 +1602,7 @@ static int ray_open(struct net_device *dev)
1613 if (local->card_status == CARD_AWAITING_PARAM) { 1602 if (local->card_status == CARD_AWAITING_PARAM) {
1614 int i; 1603 int i;
1615 1604
1616 DEBUG(1, "ray_open: doing init now !\n"); 1605 dev_dbg(&link->dev, "ray_open: doing init now !\n");
1617 1606
1618 /* Download startup parameters */ 1607 /* Download startup parameters */
1619 if ((i = dl_startup_params(dev)) < 0) { 1608 if ((i = dl_startup_params(dev)) < 0) {
@@ -1629,7 +1618,7 @@ static int ray_open(struct net_device *dev)
1629 else 1618 else
1630 netif_start_queue(dev); 1619 netif_start_queue(dev);
1631 1620
1632 DEBUG(2, "ray_open ending\n"); 1621 dev_dbg(&link->dev, "ray_open ending\n");
1633 return 0; 1622 return 0;
1634} /* end ray_open */ 1623} /* end ray_open */
1635 1624
@@ -1640,7 +1629,7 @@ static int ray_dev_close(struct net_device *dev)
1640 struct pcmcia_device *link; 1629 struct pcmcia_device *link;
1641 link = local->finder; 1630 link = local->finder;
1642 1631
1643 DEBUG(1, "ray_dev_close('%s')\n", dev->name); 1632 dev_dbg(&link->dev, "ray_dev_close('%s')\n", dev->name);
1644 1633
1645 link->open--; 1634 link->open--;
1646 netif_stop_queue(dev); 1635 netif_stop_queue(dev);
@@ -1656,7 +1645,7 @@ static int ray_dev_close(struct net_device *dev)
1656/*===========================================================================*/ 1645/*===========================================================================*/
1657static void ray_reset(struct net_device *dev) 1646static void ray_reset(struct net_device *dev)
1658{ 1647{
1659 DEBUG(1, "ray_reset entered\n"); 1648 pr_debug("ray_reset entered\n");
1660 return; 1649 return;
1661} 1650}
1662 1651
@@ -1669,17 +1658,17 @@ static int interrupt_ecf(ray_dev_t *local, int ccs)
1669 struct pcmcia_device *link = local->finder; 1658 struct pcmcia_device *link = local->finder;
1670 1659
1671 if (!(pcmcia_dev_present(link))) { 1660 if (!(pcmcia_dev_present(link))) {
1672 DEBUG(2, "ray_cs interrupt_ecf - device not present\n"); 1661 dev_dbg(&link->dev, "ray_cs interrupt_ecf - device not present\n");
1673 return -1; 1662 return -1;
1674 } 1663 }
1675 DEBUG(2, "interrupt_ecf(local=%p, ccs = 0x%x\n", local, ccs); 1664 dev_dbg(&link->dev, "interrupt_ecf(local=%p, ccs = 0x%x\n", local, ccs);
1676 1665
1677 while (i && 1666 while (i &&
1678 (readb(local->amem + CIS_OFFSET + ECF_INTR_OFFSET) & 1667 (readb(local->amem + CIS_OFFSET + ECF_INTR_OFFSET) &
1679 ECF_INTR_SET)) 1668 ECF_INTR_SET))
1680 i--; 1669 i--;
1681 if (i == 0) { 1670 if (i == 0) {
1682 DEBUG(2, "ray_cs interrupt_ecf card not ready for interrupt\n"); 1671 dev_dbg(&link->dev, "ray_cs interrupt_ecf card not ready for interrupt\n");
1683 return -1; 1672 return -1;
1684 } 1673 }
1685 /* Fill the mailbox, then kick the card */ 1674 /* Fill the mailbox, then kick the card */
@@ -1698,12 +1687,12 @@ static int get_free_tx_ccs(ray_dev_t *local)
1698 struct pcmcia_device *link = local->finder; 1687 struct pcmcia_device *link = local->finder;
1699 1688
1700 if (!(pcmcia_dev_present(link))) { 1689 if (!(pcmcia_dev_present(link))) {
1701 DEBUG(2, "ray_cs get_free_tx_ccs - device not present\n"); 1690 dev_dbg(&link->dev, "ray_cs get_free_tx_ccs - device not present\n");
1702 return ECARDGONE; 1691 return ECARDGONE;
1703 } 1692 }
1704 1693
1705 if (test_and_set_bit(0, &local->tx_ccs_lock)) { 1694 if (test_and_set_bit(0, &local->tx_ccs_lock)) {
1706 DEBUG(1, "ray_cs tx_ccs_lock busy\n"); 1695 dev_dbg(&link->dev, "ray_cs tx_ccs_lock busy\n");
1707 return ECCSBUSY; 1696 return ECCSBUSY;
1708 } 1697 }
1709 1698
@@ -1716,7 +1705,7 @@ static int get_free_tx_ccs(ray_dev_t *local)
1716 } 1705 }
1717 } 1706 }
1718 local->tx_ccs_lock = 0; 1707 local->tx_ccs_lock = 0;
1719 DEBUG(2, "ray_cs ERROR no free tx CCS for raylink card\n"); 1708 dev_dbg(&link->dev, "ray_cs ERROR no free tx CCS for raylink card\n");
1720 return ECCSFULL; 1709 return ECCSFULL;
1721} /* get_free_tx_ccs */ 1710} /* get_free_tx_ccs */
1722 1711
@@ -1730,11 +1719,11 @@ static int get_free_ccs(ray_dev_t *local)
1730 struct pcmcia_device *link = local->finder; 1719 struct pcmcia_device *link = local->finder;
1731 1720
1732 if (!(pcmcia_dev_present(link))) { 1721 if (!(pcmcia_dev_present(link))) {
1733 DEBUG(2, "ray_cs get_free_ccs - device not present\n"); 1722 dev_dbg(&link->dev, "ray_cs get_free_ccs - device not present\n");
1734 return ECARDGONE; 1723 return ECARDGONE;
1735 } 1724 }
1736 if (test_and_set_bit(0, &local->ccs_lock)) { 1725 if (test_and_set_bit(0, &local->ccs_lock)) {
1737 DEBUG(1, "ray_cs ccs_lock busy\n"); 1726 dev_dbg(&link->dev, "ray_cs ccs_lock busy\n");
1738 return ECCSBUSY; 1727 return ECCSBUSY;
1739 } 1728 }
1740 1729
@@ -1747,7 +1736,7 @@ static int get_free_ccs(ray_dev_t *local)
1747 } 1736 }
1748 } 1737 }
1749 local->ccs_lock = 0; 1738 local->ccs_lock = 0;
1750 DEBUG(1, "ray_cs ERROR no free CCS for raylink card\n"); 1739 dev_dbg(&link->dev, "ray_cs ERROR no free CCS for raylink card\n");
1751 return ECCSFULL; 1740 return ECCSFULL;
1752} /* get_free_ccs */ 1741} /* get_free_ccs */
1753 1742
@@ -1823,7 +1812,7 @@ static struct net_device_stats *ray_get_stats(struct net_device *dev)
1823 struct pcmcia_device *link = local->finder; 1812 struct pcmcia_device *link = local->finder;
1824 struct status __iomem *p = local->sram + STATUS_BASE; 1813 struct status __iomem *p = local->sram + STATUS_BASE;
1825 if (!(pcmcia_dev_present(link))) { 1814 if (!(pcmcia_dev_present(link))) {
1826 DEBUG(2, "ray_cs net_device_stats - device not present\n"); 1815 dev_dbg(&link->dev, "ray_cs net_device_stats - device not present\n");
1827 return &local->stats; 1816 return &local->stats;
1828 } 1817 }
1829 if (readb(&p->mrx_overflow_for_host)) { 1818 if (readb(&p->mrx_overflow_for_host)) {
@@ -1856,12 +1845,12 @@ static void ray_update_parm(struct net_device *dev, UCHAR objid, UCHAR *value,
1856 struct ccs __iomem *pccs; 1845 struct ccs __iomem *pccs;
1857 1846
1858 if (!(pcmcia_dev_present(link))) { 1847 if (!(pcmcia_dev_present(link))) {
1859 DEBUG(2, "ray_update_parm - device not present\n"); 1848 dev_dbg(&link->dev, "ray_update_parm - device not present\n");
1860 return; 1849 return;
1861 } 1850 }
1862 1851
1863 if ((ccsindex = get_free_ccs(local)) < 0) { 1852 if ((ccsindex = get_free_ccs(local)) < 0) {
1864 DEBUG(0, "ray_update_parm - No free ccs\n"); 1853 dev_dbg(&link->dev, "ray_update_parm - No free ccs\n");
1865 return; 1854 return;
1866 } 1855 }
1867 pccs = ccs_base(local) + ccsindex; 1856 pccs = ccs_base(local) + ccsindex;
@@ -1874,7 +1863,7 @@ static void ray_update_parm(struct net_device *dev, UCHAR objid, UCHAR *value,
1874 } 1863 }
1875 /* Interrupt the firmware to process the command */ 1864 /* Interrupt the firmware to process the command */
1876 if (interrupt_ecf(local, ccsindex)) { 1865 if (interrupt_ecf(local, ccsindex)) {
1877 DEBUG(0, "ray_cs associate failed - ECF not ready for intr\n"); 1866 dev_dbg(&link->dev, "ray_cs associate failed - ECF not ready for intr\n");
1878 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); 1867 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
1879 } 1868 }
1880} 1869}
@@ -1891,12 +1880,12 @@ static void ray_update_multi_list(struct net_device *dev, int all)
1891 void __iomem *p = local->sram + HOST_TO_ECF_BASE; 1880 void __iomem *p = local->sram + HOST_TO_ECF_BASE;
1892 1881
1893 if (!(pcmcia_dev_present(link))) { 1882 if (!(pcmcia_dev_present(link))) {
1894 DEBUG(2, "ray_update_multi_list - device not present\n"); 1883 dev_dbg(&link->dev, "ray_update_multi_list - device not present\n");
1895 return; 1884 return;
1896 } else 1885 } else
1897 DEBUG(2, "ray_update_multi_list(%p)\n", dev); 1886 dev_dbg(&link->dev, "ray_update_multi_list(%p)\n", dev);
1898 if ((ccsindex = get_free_ccs(local)) < 0) { 1887 if ((ccsindex = get_free_ccs(local)) < 0) {
1899 DEBUG(1, "ray_update_multi - No free ccs\n"); 1888 dev_dbg(&link->dev, "ray_update_multi - No free ccs\n");
1900 return; 1889 return;
1901 } 1890 }
1902 pccs = ccs_base(local) + ccsindex; 1891 pccs = ccs_base(local) + ccsindex;
@@ -1910,7 +1899,7 @@ static void ray_update_multi_list(struct net_device *dev, int all)
1910 for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; 1899 for (dmip = &dev->mc_list; (dmi = *dmip) != NULL;
1911 dmip = &dmi->next) { 1900 dmip = &dmi->next) {
1912 memcpy_toio(p, dmi->dmi_addr, ETH_ALEN); 1901 memcpy_toio(p, dmi->dmi_addr, ETH_ALEN);
1913 DEBUG(1, 1902 dev_dbg(&link->dev,
1914 "ray_update_multi add addr %02x%02x%02x%02x%02x%02x\n", 1903 "ray_update_multi add addr %02x%02x%02x%02x%02x%02x\n",
1915 dmi->dmi_addr[0], dmi->dmi_addr[1], 1904 dmi->dmi_addr[0], dmi->dmi_addr[1],
1916 dmi->dmi_addr[2], dmi->dmi_addr[3], 1905 dmi->dmi_addr[2], dmi->dmi_addr[3],
@@ -1921,12 +1910,12 @@ static void ray_update_multi_list(struct net_device *dev, int all)
1921 if (i > 256 / ADDRLEN) 1910 if (i > 256 / ADDRLEN)
1922 i = 256 / ADDRLEN; 1911 i = 256 / ADDRLEN;
1923 writeb((UCHAR) i, &pccs->var); 1912 writeb((UCHAR) i, &pccs->var);
1924 DEBUG(1, "ray_cs update_multi %d addresses in list\n", i); 1913 dev_dbg(&link->dev, "ray_cs update_multi %d addresses in list\n", i);
1925 /* Interrupt the firmware to process the command */ 1914 /* Interrupt the firmware to process the command */
1926 local->num_multi = i; 1915 local->num_multi = i;
1927 } 1916 }
1928 if (interrupt_ecf(local, ccsindex)) { 1917 if (interrupt_ecf(local, ccsindex)) {
1929 DEBUG(1, 1918 dev_dbg(&link->dev,
1930 "ray_cs update_multi failed - ECF not ready for intr\n"); 1919 "ray_cs update_multi failed - ECF not ready for intr\n");
1931 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); 1920 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
1932 } 1921 }
@@ -1938,11 +1927,11 @@ static void set_multicast_list(struct net_device *dev)
1938 ray_dev_t *local = netdev_priv(dev); 1927 ray_dev_t *local = netdev_priv(dev);
1939 UCHAR promisc; 1928 UCHAR promisc;
1940 1929
1941 DEBUG(2, "ray_cs set_multicast_list(%p)\n", dev); 1930 pr_debug("ray_cs set_multicast_list(%p)\n", dev);
1942 1931
1943 if (dev->flags & IFF_PROMISC) { 1932 if (dev->flags & IFF_PROMISC) {
1944 if (local->sparm.b5.a_promiscuous_mode == 0) { 1933 if (local->sparm.b5.a_promiscuous_mode == 0) {
1945 DEBUG(1, "ray_cs set_multicast_list promisc on\n"); 1934 pr_debug("ray_cs set_multicast_list promisc on\n");
1946 local->sparm.b5.a_promiscuous_mode = 1; 1935 local->sparm.b5.a_promiscuous_mode = 1;
1947 promisc = 1; 1936 promisc = 1;
1948 ray_update_parm(dev, OBJID_promiscuous_mode, 1937 ray_update_parm(dev, OBJID_promiscuous_mode,
@@ -1950,7 +1939,7 @@ static void set_multicast_list(struct net_device *dev)
1950 } 1939 }
1951 } else { 1940 } else {
1952 if (local->sparm.b5.a_promiscuous_mode == 1) { 1941 if (local->sparm.b5.a_promiscuous_mode == 1) {
1953 DEBUG(1, "ray_cs set_multicast_list promisc off\n"); 1942 pr_debug("ray_cs set_multicast_list promisc off\n");
1954 local->sparm.b5.a_promiscuous_mode = 0; 1943 local->sparm.b5.a_promiscuous_mode = 0;
1955 promisc = 0; 1944 promisc = 0;
1956 ray_update_parm(dev, OBJID_promiscuous_mode, 1945 ray_update_parm(dev, OBJID_promiscuous_mode,
@@ -1984,19 +1973,19 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
1984 if (dev == NULL) /* Note that we want interrupts with dev->start == 0 */ 1973 if (dev == NULL) /* Note that we want interrupts with dev->start == 0 */
1985 return IRQ_NONE; 1974 return IRQ_NONE;
1986 1975
1987 DEBUG(4, "ray_cs: interrupt for *dev=%p\n", dev); 1976 pr_debug("ray_cs: interrupt for *dev=%p\n", dev);
1988 1977
1989 local = netdev_priv(dev); 1978 local = netdev_priv(dev);
1990 link = (struct pcmcia_device *)local->finder; 1979 link = (struct pcmcia_device *)local->finder;
1991 if (!pcmcia_dev_present(link)) { 1980 if (!pcmcia_dev_present(link)) {
1992 DEBUG(2, 1981 pr_debug(
1993 "ray_cs interrupt from device not present or suspended.\n"); 1982 "ray_cs interrupt from device not present or suspended.\n");
1994 return IRQ_NONE; 1983 return IRQ_NONE;
1995 } 1984 }
1996 rcsindex = readb(&((struct scb __iomem *)(local->sram))->rcs_index); 1985 rcsindex = readb(&((struct scb __iomem *)(local->sram))->rcs_index);
1997 1986
1998 if (rcsindex >= (NUMBER_OF_CCS + NUMBER_OF_RCS)) { 1987 if (rcsindex >= (NUMBER_OF_CCS + NUMBER_OF_RCS)) {
1999 DEBUG(1, "ray_cs interrupt bad rcsindex = 0x%x\n", rcsindex); 1988 dev_dbg(&link->dev, "ray_cs interrupt bad rcsindex = 0x%x\n", rcsindex);
2000 clear_interrupt(local); 1989 clear_interrupt(local);
2001 return IRQ_HANDLED; 1990 return IRQ_HANDLED;
2002 } 1991 }
@@ -2008,33 +1997,33 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
2008 case CCS_DOWNLOAD_STARTUP_PARAMS: /* Happens in firmware someday */ 1997 case CCS_DOWNLOAD_STARTUP_PARAMS: /* Happens in firmware someday */
2009 del_timer(&local->timer); 1998 del_timer(&local->timer);
2010 if (status == CCS_COMMAND_COMPLETE) { 1999 if (status == CCS_COMMAND_COMPLETE) {
2011 DEBUG(1, 2000 dev_dbg(&link->dev,
2012 "ray_cs interrupt download_startup_parameters OK\n"); 2001 "ray_cs interrupt download_startup_parameters OK\n");
2013 } else { 2002 } else {
2014 DEBUG(1, 2003 dev_dbg(&link->dev,
2015 "ray_cs interrupt download_startup_parameters fail\n"); 2004 "ray_cs interrupt download_startup_parameters fail\n");
2016 } 2005 }
2017 break; 2006 break;
2018 case CCS_UPDATE_PARAMS: 2007 case CCS_UPDATE_PARAMS:
2019 DEBUG(1, "ray_cs interrupt update params done\n"); 2008 dev_dbg(&link->dev, "ray_cs interrupt update params done\n");
2020 if (status != CCS_COMMAND_COMPLETE) { 2009 if (status != CCS_COMMAND_COMPLETE) {
2021 tmp = 2010 tmp =
2022 readb(&pccs->var.update_param. 2011 readb(&pccs->var.update_param.
2023 failure_cause); 2012 failure_cause);
2024 DEBUG(0, 2013 dev_dbg(&link->dev,
2025 "ray_cs interrupt update params failed - reason %d\n", 2014 "ray_cs interrupt update params failed - reason %d\n",
2026 tmp); 2015 tmp);
2027 } 2016 }
2028 break; 2017 break;
2029 case CCS_REPORT_PARAMS: 2018 case CCS_REPORT_PARAMS:
2030 DEBUG(1, "ray_cs interrupt report params done\n"); 2019 dev_dbg(&link->dev, "ray_cs interrupt report params done\n");
2031 break; 2020 break;
2032 case CCS_UPDATE_MULTICAST_LIST: /* Note that this CCS isn't returned */ 2021 case CCS_UPDATE_MULTICAST_LIST: /* Note that this CCS isn't returned */
2033 DEBUG(1, 2022 dev_dbg(&link->dev,
2034 "ray_cs interrupt CCS Update Multicast List done\n"); 2023 "ray_cs interrupt CCS Update Multicast List done\n");
2035 break; 2024 break;
2036 case CCS_UPDATE_POWER_SAVINGS_MODE: 2025 case CCS_UPDATE_POWER_SAVINGS_MODE:
2037 DEBUG(1, 2026 dev_dbg(&link->dev,
2038 "ray_cs interrupt update power save mode done\n"); 2027 "ray_cs interrupt update power save mode done\n");
2039 break; 2028 break;
2040 case CCS_START_NETWORK: 2029 case CCS_START_NETWORK:
@@ -2043,11 +2032,11 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
2043 if (readb 2032 if (readb
2044 (&pccs->var.start_network.net_initiated) == 2033 (&pccs->var.start_network.net_initiated) ==
2045 1) { 2034 1) {
2046 DEBUG(0, 2035 dev_dbg(&link->dev,
2047 "ray_cs interrupt network \"%s\" started\n", 2036 "ray_cs interrupt network \"%s\" started\n",
2048 local->sparm.b4.a_current_ess_id); 2037 local->sparm.b4.a_current_ess_id);
2049 } else { 2038 } else {
2050 DEBUG(0, 2039 dev_dbg(&link->dev,
2051 "ray_cs interrupt network \"%s\" joined\n", 2040 "ray_cs interrupt network \"%s\" joined\n",
2052 local->sparm.b4.a_current_ess_id); 2041 local->sparm.b4.a_current_ess_id);
2053 } 2042 }
@@ -2075,12 +2064,12 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
2075 local->timer.expires = jiffies + HZ * 5; 2064 local->timer.expires = jiffies + HZ * 5;
2076 local->timer.data = (long)local; 2065 local->timer.data = (long)local;
2077 if (status == CCS_START_NETWORK) { 2066 if (status == CCS_START_NETWORK) {
2078 DEBUG(0, 2067 dev_dbg(&link->dev,
2079 "ray_cs interrupt network \"%s\" start failed\n", 2068 "ray_cs interrupt network \"%s\" start failed\n",
2080 local->sparm.b4.a_current_ess_id); 2069 local->sparm.b4.a_current_ess_id);
2081 local->timer.function = &start_net; 2070 local->timer.function = &start_net;
2082 } else { 2071 } else {
2083 DEBUG(0, 2072 dev_dbg(&link->dev,
2084 "ray_cs interrupt network \"%s\" join failed\n", 2073 "ray_cs interrupt network \"%s\" join failed\n",
2085 local->sparm.b4.a_current_ess_id); 2074 local->sparm.b4.a_current_ess_id);
2086 local->timer.function = &join_net; 2075 local->timer.function = &join_net;
@@ -2091,19 +2080,19 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
2091 case CCS_START_ASSOCIATION: 2080 case CCS_START_ASSOCIATION:
2092 if (status == CCS_COMMAND_COMPLETE) { 2081 if (status == CCS_COMMAND_COMPLETE) {
2093 local->card_status = CARD_ASSOC_COMPLETE; 2082 local->card_status = CARD_ASSOC_COMPLETE;
2094 DEBUG(0, "ray_cs association successful\n"); 2083 dev_dbg(&link->dev, "ray_cs association successful\n");
2095 } else { 2084 } else {
2096 DEBUG(0, "ray_cs association failed,\n"); 2085 dev_dbg(&link->dev, "ray_cs association failed,\n");
2097 local->card_status = CARD_ASSOC_FAILED; 2086 local->card_status = CARD_ASSOC_FAILED;
2098 join_net((u_long) local); 2087 join_net((u_long) local);
2099 } 2088 }
2100 break; 2089 break;
2101 case CCS_TX_REQUEST: 2090 case CCS_TX_REQUEST:
2102 if (status == CCS_COMMAND_COMPLETE) { 2091 if (status == CCS_COMMAND_COMPLETE) {
2103 DEBUG(3, 2092 dev_dbg(&link->dev,
2104 "ray_cs interrupt tx request complete\n"); 2093 "ray_cs interrupt tx request complete\n");
2105 } else { 2094 } else {
2106 DEBUG(1, 2095 dev_dbg(&link->dev,
2107 "ray_cs interrupt tx request failed\n"); 2096 "ray_cs interrupt tx request failed\n");
2108 } 2097 }
2109 if (!sniffer) 2098 if (!sniffer)
@@ -2111,21 +2100,21 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
2111 netif_wake_queue(dev); 2100 netif_wake_queue(dev);
2112 break; 2101 break;
2113 case CCS_TEST_MEMORY: 2102 case CCS_TEST_MEMORY:
2114 DEBUG(1, "ray_cs interrupt mem test done\n"); 2103 dev_dbg(&link->dev, "ray_cs interrupt mem test done\n");
2115 break; 2104 break;
2116 case CCS_SHUTDOWN: 2105 case CCS_SHUTDOWN:
2117 DEBUG(1, 2106 dev_dbg(&link->dev,
2118 "ray_cs interrupt Unexpected CCS returned - Shutdown\n"); 2107 "ray_cs interrupt Unexpected CCS returned - Shutdown\n");
2119 break; 2108 break;
2120 case CCS_DUMP_MEMORY: 2109 case CCS_DUMP_MEMORY:
2121 DEBUG(1, "ray_cs interrupt dump memory done\n"); 2110 dev_dbg(&link->dev, "ray_cs interrupt dump memory done\n");
2122 break; 2111 break;
2123 case CCS_START_TIMER: 2112 case CCS_START_TIMER:
2124 DEBUG(2, 2113 dev_dbg(&link->dev,
2125 "ray_cs interrupt DING - raylink timer expired\n"); 2114 "ray_cs interrupt DING - raylink timer expired\n");
2126 break; 2115 break;
2127 default: 2116 default:
2128 DEBUG(1, 2117 dev_dbg(&link->dev,
2129 "ray_cs interrupt Unexpected CCS 0x%x returned 0x%x\n", 2118 "ray_cs interrupt Unexpected CCS 0x%x returned 0x%x\n",
2130 rcsindex, cmd); 2119 rcsindex, cmd);
2131 } 2120 }
@@ -2139,7 +2128,7 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
2139 ray_rx(dev, local, prcs); 2128 ray_rx(dev, local, prcs);
2140 break; 2129 break;
2141 case REJOIN_NET_COMPLETE: 2130 case REJOIN_NET_COMPLETE:
2142 DEBUG(1, "ray_cs interrupt rejoin net complete\n"); 2131 dev_dbg(&link->dev, "ray_cs interrupt rejoin net complete\n");
2143 local->card_status = CARD_ACQ_COMPLETE; 2132 local->card_status = CARD_ACQ_COMPLETE;
2144 /* do we need to clear tx buffers CCS's? */ 2133 /* do we need to clear tx buffers CCS's? */
2145 if (local->sparm.b4.a_network_type == ADHOC) { 2134 if (local->sparm.b4.a_network_type == ADHOC) {
@@ -2149,7 +2138,7 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
2149 memcpy_fromio(&local->bss_id, 2138 memcpy_fromio(&local->bss_id,
2150 prcs->var.rejoin_net_complete. 2139 prcs->var.rejoin_net_complete.
2151 bssid, ADDRLEN); 2140 bssid, ADDRLEN);
2152 DEBUG(1, 2141 dev_dbg(&link->dev,
2153 "ray_cs new BSSID = %02x%02x%02x%02x%02x%02x\n", 2142 "ray_cs new BSSID = %02x%02x%02x%02x%02x%02x\n",
2154 local->bss_id[0], local->bss_id[1], 2143 local->bss_id[0], local->bss_id[1],
2155 local->bss_id[2], local->bss_id[3], 2144 local->bss_id[2], local->bss_id[3],
@@ -2159,15 +2148,15 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
2159 } 2148 }
2160 break; 2149 break;
2161 case ROAMING_INITIATED: 2150 case ROAMING_INITIATED:
2162 DEBUG(1, "ray_cs interrupt roaming initiated\n"); 2151 dev_dbg(&link->dev, "ray_cs interrupt roaming initiated\n");
2163 netif_stop_queue(dev); 2152 netif_stop_queue(dev);
2164 local->card_status = CARD_DOING_ACQ; 2153 local->card_status = CARD_DOING_ACQ;
2165 break; 2154 break;
2166 case JAPAN_CALL_SIGN_RXD: 2155 case JAPAN_CALL_SIGN_RXD:
2167 DEBUG(1, "ray_cs interrupt japan call sign rx\n"); 2156 dev_dbg(&link->dev, "ray_cs interrupt japan call sign rx\n");
2168 break; 2157 break;
2169 default: 2158 default:
2170 DEBUG(1, 2159 dev_dbg(&link->dev,
2171 "ray_cs Unexpected interrupt for RCS 0x%x cmd = 0x%x\n", 2160 "ray_cs Unexpected interrupt for RCS 0x%x cmd = 0x%x\n",
2172 rcsindex, 2161 rcsindex,
2173 (unsigned int)readb(&prcs->interrupt_id)); 2162 (unsigned int)readb(&prcs->interrupt_id));
@@ -2186,7 +2175,7 @@ static void ray_rx(struct net_device *dev, ray_dev_t *local,
2186 int rx_len; 2175 int rx_len;
2187 unsigned int pkt_addr; 2176 unsigned int pkt_addr;
2188 void __iomem *pmsg; 2177 void __iomem *pmsg;
2189 DEBUG(4, "ray_rx process rx packet\n"); 2178 pr_debug("ray_rx process rx packet\n");
2190 2179
2191 /* Calculate address of packet within Rx buffer */ 2180 /* Calculate address of packet within Rx buffer */
2192 pkt_addr = ((readb(&prcs->var.rx_packet.rx_data_ptr[0]) << 8) 2181 pkt_addr = ((readb(&prcs->var.rx_packet.rx_data_ptr[0]) << 8)
@@ -2199,28 +2188,28 @@ static void ray_rx(struct net_device *dev, ray_dev_t *local,
2199 pmsg = local->rmem + pkt_addr; 2188 pmsg = local->rmem + pkt_addr;
2200 switch (readb(pmsg)) { 2189 switch (readb(pmsg)) {
2201 case DATA_TYPE: 2190 case DATA_TYPE:
2202 DEBUG(4, "ray_rx data type\n"); 2191 pr_debug("ray_rx data type\n");
2203 rx_data(dev, prcs, pkt_addr, rx_len); 2192 rx_data(dev, prcs, pkt_addr, rx_len);
2204 break; 2193 break;
2205 case AUTHENTIC_TYPE: 2194 case AUTHENTIC_TYPE:
2206 DEBUG(4, "ray_rx authentic type\n"); 2195 pr_debug("ray_rx authentic type\n");
2207 if (sniffer) 2196 if (sniffer)
2208 rx_data(dev, prcs, pkt_addr, rx_len); 2197 rx_data(dev, prcs, pkt_addr, rx_len);
2209 else 2198 else
2210 rx_authenticate(local, prcs, pkt_addr, rx_len); 2199 rx_authenticate(local, prcs, pkt_addr, rx_len);
2211 break; 2200 break;
2212 case DEAUTHENTIC_TYPE: 2201 case DEAUTHENTIC_TYPE:
2213 DEBUG(4, "ray_rx deauth type\n"); 2202 pr_debug("ray_rx deauth type\n");
2214 if (sniffer) 2203 if (sniffer)
2215 rx_data(dev, prcs, pkt_addr, rx_len); 2204 rx_data(dev, prcs, pkt_addr, rx_len);
2216 else 2205 else
2217 rx_deauthenticate(local, prcs, pkt_addr, rx_len); 2206 rx_deauthenticate(local, prcs, pkt_addr, rx_len);
2218 break; 2207 break;
2219 case NULL_MSG_TYPE: 2208 case NULL_MSG_TYPE:
2220 DEBUG(3, "ray_cs rx NULL msg\n"); 2209 pr_debug("ray_cs rx NULL msg\n");
2221 break; 2210 break;
2222 case BEACON_TYPE: 2211 case BEACON_TYPE:
2223 DEBUG(4, "ray_rx beacon type\n"); 2212 pr_debug("ray_rx beacon type\n");
2224 if (sniffer) 2213 if (sniffer)
2225 rx_data(dev, prcs, pkt_addr, rx_len); 2214 rx_data(dev, prcs, pkt_addr, rx_len);
2226 2215
@@ -2233,7 +2222,7 @@ static void ray_rx(struct net_device *dev, ray_dev_t *local,
2233 ray_get_stats(dev); 2222 ray_get_stats(dev);
2234 break; 2223 break;
2235 default: 2224 default:
2236 DEBUG(0, "ray_cs unknown pkt type %2x\n", 2225 pr_debug("ray_cs unknown pkt type %2x\n",
2237 (unsigned int)readb(pmsg)); 2226 (unsigned int)readb(pmsg));
2238 break; 2227 break;
2239 } 2228 }
@@ -2262,7 +2251,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
2262 rx_len > 2251 rx_len >
2263 (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN + 2252 (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN +
2264 FCS_LEN)) { 2253 FCS_LEN)) {
2265 DEBUG(0, 2254 pr_debug(
2266 "ray_cs invalid packet length %d received \n", 2255 "ray_cs invalid packet length %d received \n",
2267 rx_len); 2256 rx_len);
2268 return; 2257 return;
@@ -2273,17 +2262,17 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
2273 rx_len > 2262 rx_len >
2274 (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN + 2263 (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN +
2275 FCS_LEN)) { 2264 FCS_LEN)) {
2276 DEBUG(0, 2265 pr_debug(
2277 "ray_cs invalid packet length %d received \n", 2266 "ray_cs invalid packet length %d received \n",
2278 rx_len); 2267 rx_len);
2279 return; 2268 return;
2280 } 2269 }
2281 } 2270 }
2282 } 2271 }
2283 DEBUG(4, "ray_cs rx_data packet\n"); 2272 pr_debug("ray_cs rx_data packet\n");
2284 /* If fragmented packet, verify sizes of fragments add up */ 2273 /* If fragmented packet, verify sizes of fragments add up */
2285 if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF) { 2274 if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF) {
2286 DEBUG(1, "ray_cs rx'ed fragment\n"); 2275 pr_debug("ray_cs rx'ed fragment\n");
2287 tmp = (readb(&prcs->var.rx_packet.totalpacketlength[0]) << 8) 2276 tmp = (readb(&prcs->var.rx_packet.totalpacketlength[0]) << 8)
2288 + readb(&prcs->var.rx_packet.totalpacketlength[1]); 2277 + readb(&prcs->var.rx_packet.totalpacketlength[1]);
2289 total_len = tmp; 2278 total_len = tmp;
@@ -2301,7 +2290,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
2301 } while (1); 2290 } while (1);
2302 2291
2303 if (tmp < 0) { 2292 if (tmp < 0) {
2304 DEBUG(0, 2293 pr_debug(
2305 "ray_cs rx_data fragment lengths don't add up\n"); 2294 "ray_cs rx_data fragment lengths don't add up\n");
2306 local->stats.rx_dropped++; 2295 local->stats.rx_dropped++;
2307 release_frag_chain(local, prcs); 2296 release_frag_chain(local, prcs);
@@ -2313,7 +2302,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
2313 2302
2314 skb = dev_alloc_skb(total_len + 5); 2303 skb = dev_alloc_skb(total_len + 5);
2315 if (skb == NULL) { 2304 if (skb == NULL) {
2316 DEBUG(0, "ray_cs rx_data could not allocate skb\n"); 2305 pr_debug("ray_cs rx_data could not allocate skb\n");
2317 local->stats.rx_dropped++; 2306 local->stats.rx_dropped++;
2318 if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF) 2307 if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF)
2319 release_frag_chain(local, prcs); 2308 release_frag_chain(local, prcs);
@@ -2321,7 +2310,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
2321 } 2310 }
2322 skb_reserve(skb, 2); /* Align IP on 16 byte (TBD check this) */ 2311 skb_reserve(skb, 2); /* Align IP on 16 byte (TBD check this) */
2323 2312
2324 DEBUG(4, "ray_cs rx_data total_len = %x, rx_len = %x\n", total_len, 2313 pr_debug("ray_cs rx_data total_len = %x, rx_len = %x\n", total_len,
2325 rx_len); 2314 rx_len);
2326 2315
2327/************************/ 2316/************************/
@@ -2354,7 +2343,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
2354 tmp = 17; 2343 tmp = 17;
2355 if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF) { 2344 if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF) {
2356 prcslink = prcs; 2345 prcslink = prcs;
2357 DEBUG(1, "ray_cs rx_data in fragment loop\n"); 2346 pr_debug("ray_cs rx_data in fragment loop\n");
2358 do { 2347 do {
2359 prcslink = rcs_base(local) 2348 prcslink = rcs_base(local)
2360 + 2349 +
@@ -2426,8 +2415,8 @@ static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len)
2426 memcpy(destaddr, ieee80211_get_DA(pmac), ADDRLEN); 2415 memcpy(destaddr, ieee80211_get_DA(pmac), ADDRLEN);
2427 memcpy(srcaddr, ieee80211_get_SA(pmac), ADDRLEN); 2416 memcpy(srcaddr, ieee80211_get_SA(pmac), ADDRLEN);
2428 2417
2429#ifdef PCMCIA_DEBUG 2418#if 0
2430 if (pc_debug > 3) { 2419 if {
2431 print_hex_dump(KERN_DEBUG, "skb->data before untranslate: ", 2420 print_hex_dump(KERN_DEBUG, "skb->data before untranslate: ",
2432 DUMP_PREFIX_NONE, 16, 1, 2421 DUMP_PREFIX_NONE, 16, 1,
2433 skb->data, 64, true); 2422 skb->data, 64, true);
@@ -2441,7 +2430,7 @@ static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len)
2441 2430
2442 if (psnap->dsap != 0xaa || psnap->ssap != 0xaa || psnap->ctrl != 3) { 2431 if (psnap->dsap != 0xaa || psnap->ssap != 0xaa || psnap->ctrl != 3) {
2443 /* not a snap type so leave it alone */ 2432 /* not a snap type so leave it alone */
2444 DEBUG(3, "ray_cs untranslate NOT SNAP %02x %02x %02x\n", 2433 pr_debug("ray_cs untranslate NOT SNAP %02x %02x %02x\n",
2445 psnap->dsap, psnap->ssap, psnap->ctrl); 2434 psnap->dsap, psnap->ssap, psnap->ctrl);
2446 2435
2447 delta = RX_MAC_HEADER_LENGTH - ETH_HLEN; 2436 delta = RX_MAC_HEADER_LENGTH - ETH_HLEN;
@@ -2450,7 +2439,7 @@ static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len)
2450 } else { /* Its a SNAP */ 2439 } else { /* Its a SNAP */
2451 if (memcmp(psnap->org, org_bridge, 3) == 0) { 2440 if (memcmp(psnap->org, org_bridge, 3) == 0) {
2452 /* EtherII and nuke the LLC */ 2441 /* EtherII and nuke the LLC */
2453 DEBUG(3, "ray_cs untranslate Bridge encap\n"); 2442 pr_debug("ray_cs untranslate Bridge encap\n");
2454 delta = RX_MAC_HEADER_LENGTH 2443 delta = RX_MAC_HEADER_LENGTH
2455 + sizeof(struct snaphdr_t) - ETH_HLEN; 2444 + sizeof(struct snaphdr_t) - ETH_HLEN;
2456 peth = (struct ethhdr *)(skb->data + delta); 2445 peth = (struct ethhdr *)(skb->data + delta);
@@ -2459,14 +2448,14 @@ static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len)
2459 switch (ntohs(type)) { 2448 switch (ntohs(type)) {
2460 case ETH_P_IPX: 2449 case ETH_P_IPX:
2461 case ETH_P_AARP: 2450 case ETH_P_AARP:
2462 DEBUG(3, "ray_cs untranslate RFC IPX/AARP\n"); 2451 pr_debug("ray_cs untranslate RFC IPX/AARP\n");
2463 delta = RX_MAC_HEADER_LENGTH - ETH_HLEN; 2452 delta = RX_MAC_HEADER_LENGTH - ETH_HLEN;
2464 peth = (struct ethhdr *)(skb->data + delta); 2453 peth = (struct ethhdr *)(skb->data + delta);
2465 peth->h_proto = 2454 peth->h_proto =
2466 htons(len - RX_MAC_HEADER_LENGTH); 2455 htons(len - RX_MAC_HEADER_LENGTH);
2467 break; 2456 break;
2468 default: 2457 default:
2469 DEBUG(3, "ray_cs untranslate RFC default\n"); 2458 pr_debug("ray_cs untranslate RFC default\n");
2470 delta = RX_MAC_HEADER_LENGTH + 2459 delta = RX_MAC_HEADER_LENGTH +
2471 sizeof(struct snaphdr_t) - ETH_HLEN; 2460 sizeof(struct snaphdr_t) - ETH_HLEN;
2472 peth = (struct ethhdr *)(skb->data + delta); 2461 peth = (struct ethhdr *)(skb->data + delta);
@@ -2482,12 +2471,12 @@ static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len)
2482 } 2471 }
2483/* TBD reserve skb_reserve(skb, delta); */ 2472/* TBD reserve skb_reserve(skb, delta); */
2484 skb_pull(skb, delta); 2473 skb_pull(skb, delta);
2485 DEBUG(3, "untranslate after skb_pull(%d), skb->data = %p\n", delta, 2474 pr_debug("untranslate after skb_pull(%d), skb->data = %p\n", delta,
2486 skb->data); 2475 skb->data);
2487 memcpy(peth->h_dest, destaddr, ADDRLEN); 2476 memcpy(peth->h_dest, destaddr, ADDRLEN);
2488 memcpy(peth->h_source, srcaddr, ADDRLEN); 2477 memcpy(peth->h_source, srcaddr, ADDRLEN);
2489#ifdef PCMCIA_DEBUG 2478#if 0
2490 if (pc_debug > 3) { 2479 {
2491 int i; 2480 int i;
2492 printk(KERN_DEBUG "skb->data after untranslate:"); 2481 printk(KERN_DEBUG "skb->data after untranslate:");
2493 for (i = 0; i < 64; i++) 2482 for (i = 0; i < 64; i++)
@@ -2529,7 +2518,7 @@ static void release_frag_chain(ray_dev_t *local, struct rcs __iomem *prcs)
2529 while (tmp--) { 2518 while (tmp--) {
2530 writeb(CCS_BUFFER_FREE, &prcslink->buffer_status); 2519 writeb(CCS_BUFFER_FREE, &prcslink->buffer_status);
2531 if (rcsindex >= (NUMBER_OF_CCS + NUMBER_OF_RCS)) { 2520 if (rcsindex >= (NUMBER_OF_CCS + NUMBER_OF_RCS)) {
2532 DEBUG(1, "ray_cs interrupt bad rcsindex = 0x%x\n", 2521 pr_debug("ray_cs interrupt bad rcsindex = 0x%x\n",
2533 rcsindex); 2522 rcsindex);
2534 break; 2523 break;
2535 } 2524 }
@@ -2543,9 +2532,9 @@ static void release_frag_chain(ray_dev_t *local, struct rcs __iomem *prcs)
2543static void authenticate(ray_dev_t *local) 2532static void authenticate(ray_dev_t *local)
2544{ 2533{
2545 struct pcmcia_device *link = local->finder; 2534 struct pcmcia_device *link = local->finder;
2546 DEBUG(0, "ray_cs Starting authentication.\n"); 2535 dev_dbg(&link->dev, "ray_cs Starting authentication.\n");
2547 if (!(pcmcia_dev_present(link))) { 2536 if (!(pcmcia_dev_present(link))) {
2548 DEBUG(2, "ray_cs authenticate - device not present\n"); 2537 dev_dbg(&link->dev, "ray_cs authenticate - device not present\n");
2549 return; 2538 return;
2550 } 2539 }
2551 2540
@@ -2573,11 +2562,11 @@ static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs,
2573 copy_from_rx_buff(local, buff, pkt_addr, rx_len & 0xff); 2562 copy_from_rx_buff(local, buff, pkt_addr, rx_len & 0xff);
2574 /* if we are trying to get authenticated */ 2563 /* if we are trying to get authenticated */
2575 if (local->sparm.b4.a_network_type == ADHOC) { 2564 if (local->sparm.b4.a_network_type == ADHOC) {
2576 DEBUG(1, "ray_cs rx_auth var= %02x %02x %02x %02x %02x %02x\n", 2565 pr_debug("ray_cs rx_auth var= %02x %02x %02x %02x %02x %02x\n",
2577 msg->var[0], msg->var[1], msg->var[2], msg->var[3], 2566 msg->var[0], msg->var[1], msg->var[2], msg->var[3],
2578 msg->var[4], msg->var[5]); 2567 msg->var[4], msg->var[5]);
2579 if (msg->var[2] == 1) { 2568 if (msg->var[2] == 1) {
2580 DEBUG(0, "ray_cs Sending authentication response.\n"); 2569 pr_debug("ray_cs Sending authentication response.\n");
2581 if (!build_auth_frame 2570 if (!build_auth_frame
2582 (local, msg->mac.addr_2, OPEN_AUTH_RESPONSE)) { 2571 (local, msg->mac.addr_2, OPEN_AUTH_RESPONSE)) {
2583 local->authentication_state = NEED_TO_AUTH; 2572 local->authentication_state = NEED_TO_AUTH;
@@ -2591,13 +2580,13 @@ static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs,
2591 /* Verify authentication sequence #2 and success */ 2580 /* Verify authentication sequence #2 and success */
2592 if (msg->var[2] == 2) { 2581 if (msg->var[2] == 2) {
2593 if ((msg->var[3] | msg->var[4]) == 0) { 2582 if ((msg->var[3] | msg->var[4]) == 0) {
2594 DEBUG(1, "Authentication successful\n"); 2583 pr_debug("Authentication successful\n");
2595 local->card_status = CARD_AUTH_COMPLETE; 2584 local->card_status = CARD_AUTH_COMPLETE;
2596 associate(local); 2585 associate(local);
2597 local->authentication_state = 2586 local->authentication_state =
2598 AUTHENTICATED; 2587 AUTHENTICATED;
2599 } else { 2588 } else {
2600 DEBUG(0, "Authentication refused\n"); 2589 pr_debug("Authentication refused\n");
2601 local->card_status = CARD_AUTH_REFUSED; 2590 local->card_status = CARD_AUTH_REFUSED;
2602 join_net((u_long) local); 2591 join_net((u_long) local);
2603 local->authentication_state = 2592 local->authentication_state =
@@ -2617,22 +2606,22 @@ static void associate(ray_dev_t *local)
2617 struct net_device *dev = link->priv; 2606 struct net_device *dev = link->priv;
2618 int ccsindex; 2607 int ccsindex;
2619 if (!(pcmcia_dev_present(link))) { 2608 if (!(pcmcia_dev_present(link))) {
2620 DEBUG(2, "ray_cs associate - device not present\n"); 2609 dev_dbg(&link->dev, "ray_cs associate - device not present\n");
2621 return; 2610 return;
2622 } 2611 }
2623 /* If no tx buffers available, return */ 2612 /* If no tx buffers available, return */
2624 if ((ccsindex = get_free_ccs(local)) < 0) { 2613 if ((ccsindex = get_free_ccs(local)) < 0) {
2625/* TBD should never be here but... what if we are? */ 2614/* TBD should never be here but... what if we are? */
2626 DEBUG(1, "ray_cs associate - No free ccs\n"); 2615 dev_dbg(&link->dev, "ray_cs associate - No free ccs\n");
2627 return; 2616 return;
2628 } 2617 }
2629 DEBUG(1, "ray_cs Starting association with access point\n"); 2618 dev_dbg(&link->dev, "ray_cs Starting association with access point\n");
2630 pccs = ccs_base(local) + ccsindex; 2619 pccs = ccs_base(local) + ccsindex;
2631 /* fill in the CCS */ 2620 /* fill in the CCS */
2632 writeb(CCS_START_ASSOCIATION, &pccs->cmd); 2621 writeb(CCS_START_ASSOCIATION, &pccs->cmd);
2633 /* Interrupt the firmware to process the command */ 2622 /* Interrupt the firmware to process the command */
2634 if (interrupt_ecf(local, ccsindex)) { 2623 if (interrupt_ecf(local, ccsindex)) {
2635 DEBUG(1, "ray_cs associate failed - ECF not ready for intr\n"); 2624 dev_dbg(&link->dev, "ray_cs associate failed - ECF not ready for intr\n");
2636 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); 2625 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
2637 2626
2638 del_timer(&local->timer); 2627 del_timer(&local->timer);
@@ -2655,7 +2644,7 @@ static void rx_deauthenticate(ray_dev_t *local, struct rcs __iomem *prcs,
2655/* UCHAR buff[256]; 2644/* UCHAR buff[256];
2656 struct rx_msg *msg = (struct rx_msg *)buff; 2645 struct rx_msg *msg = (struct rx_msg *)buff;
2657*/ 2646*/
2658 DEBUG(0, "Deauthentication frame received\n"); 2647 pr_debug("Deauthentication frame received\n");
2659 local->authentication_state = UNAUTHENTICATED; 2648 local->authentication_state = UNAUTHENTICATED;
2660 /* Need to reauthenticate or rejoin depending on reason code */ 2649 /* Need to reauthenticate or rejoin depending on reason code */
2661/* copy_from_rx_buff(local, buff, pkt_addr, rx_len & 0xff); 2650/* copy_from_rx_buff(local, buff, pkt_addr, rx_len & 0xff);
@@ -2823,7 +2812,7 @@ static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type)
2823 2812
2824 /* If no tx buffers available, return */ 2813 /* If no tx buffers available, return */
2825 if ((ccsindex = get_free_tx_ccs(local)) < 0) { 2814 if ((ccsindex = get_free_tx_ccs(local)) < 0) {
2826 DEBUG(1, "ray_cs send authenticate - No free tx ccs\n"); 2815 pr_debug("ray_cs send authenticate - No free tx ccs\n");
2827 return -1; 2816 return -1;
2828 } 2817 }
2829 2818
@@ -2855,7 +2844,7 @@ static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type)
2855 2844
2856 /* Interrupt the firmware to process the command */ 2845 /* Interrupt the firmware to process the command */
2857 if (interrupt_ecf(local, ccsindex)) { 2846 if (interrupt_ecf(local, ccsindex)) {
2858 DEBUG(1, 2847 pr_debug(
2859 "ray_cs send authentication request failed - ECF not ready for intr\n"); 2848 "ray_cs send authentication request failed - ECF not ready for intr\n");
2860 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status); 2849 writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
2861 return -1; 2850 return -1;
@@ -2942,9 +2931,9 @@ static int __init init_ray_cs(void)
2942{ 2931{
2943 int rc; 2932 int rc;
2944 2933
2945 DEBUG(1, "%s\n", rcsid); 2934 pr_debug("%s\n", rcsid);
2946 rc = pcmcia_register_driver(&ray_driver); 2935 rc = pcmcia_register_driver(&ray_driver);
2947 DEBUG(1, "raylink init_module register_pcmcia_driver returns 0x%x\n", 2936 pr_debug("raylink init_module register_pcmcia_driver returns 0x%x\n",
2948 rc); 2937 rc);
2949 2938
2950#ifdef CONFIG_PROC_FS 2939#ifdef CONFIG_PROC_FS
@@ -2964,7 +2953,7 @@ static int __init init_ray_cs(void)
2964 2953
2965static void __exit exit_ray_cs(void) 2954static void __exit exit_ray_cs(void)
2966{ 2955{
2967 DEBUG(0, "ray_cs: cleanup_module\n"); 2956 pr_debug("ray_cs: cleanup_module\n");
2968 2957
2969#ifdef CONFIG_PROC_FS 2958#ifdef CONFIG_PROC_FS
2970 remove_proc_entry("driver/ray_cs/ray_cs", NULL); 2959 remove_proc_entry("driver/ray_cs/ray_cs", NULL);
diff --git a/drivers/net/wireless/rtl818x/rtl8187_rfkill.c b/drivers/net/wireless/rtl818x/rtl8187_rfkill.c
index 9fab13e4004e..cad8037ab2af 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_rfkill.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_rfkill.c
@@ -18,6 +18,7 @@
18#include <net/mac80211.h> 18#include <net/mac80211.h>
19 19
20#include "rtl8187.h" 20#include "rtl8187.h"
21#include "rtl8187_rfkill.h"
21 22
22static bool rtl8187_is_radio_enabled(struct rtl8187_priv *priv) 23static bool rtl8187_is_radio_enabled(struct rtl8187_priv *priv)
23{ 24{
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 431a20ec6db6..33918fd5b231 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -3656,10 +3656,7 @@ wv_pcmcia_reset(struct net_device * dev)
3656 3656
3657 i = pcmcia_access_configuration_register(link, &reg); 3657 i = pcmcia_access_configuration_register(link, &reg);
3658 if (i != 0) 3658 if (i != 0)
3659 {
3660 cs_error(link, AccessConfigurationRegister, i);
3661 return FALSE; 3659 return FALSE;
3662 }
3663 3660
3664#ifdef DEBUG_CONFIG_INFO 3661#ifdef DEBUG_CONFIG_INFO
3665 printk(KERN_DEBUG "%s: wavelan_pcmcia_reset(): Config reg is 0x%x\n", 3662 printk(KERN_DEBUG "%s: wavelan_pcmcia_reset(): Config reg is 0x%x\n",
@@ -3670,19 +3667,13 @@ wv_pcmcia_reset(struct net_device * dev)
3670 reg.Value = reg.Value | COR_SW_RESET; 3667 reg.Value = reg.Value | COR_SW_RESET;
3671 i = pcmcia_access_configuration_register(link, &reg); 3668 i = pcmcia_access_configuration_register(link, &reg);
3672 if (i != 0) 3669 if (i != 0)
3673 {
3674 cs_error(link, AccessConfigurationRegister, i);
3675 return FALSE; 3670 return FALSE;
3676 }
3677 3671
3678 reg.Action = CS_WRITE; 3672 reg.Action = CS_WRITE;
3679 reg.Value = COR_LEVEL_IRQ | COR_CONFIG; 3673 reg.Value = COR_LEVEL_IRQ | COR_CONFIG;
3680 i = pcmcia_access_configuration_register(link, &reg); 3674 i = pcmcia_access_configuration_register(link, &reg);
3681 if (i != 0) 3675 if (i != 0)
3682 {
3683 cs_error(link, AccessConfigurationRegister, i);
3684 return FALSE; 3676 return FALSE;
3685 }
3686 3677
3687#ifdef DEBUG_CONFIG_TRACE 3678#ifdef DEBUG_CONFIG_TRACE
3688 printk(KERN_DEBUG "%s: <-wv_pcmcia_reset()\n", dev->name); 3679 printk(KERN_DEBUG "%s: <-wv_pcmcia_reset()\n", dev->name);
@@ -3857,10 +3848,7 @@ wv_pcmcia_config(struct pcmcia_device * link)
3857 { 3848 {
3858 i = pcmcia_request_io(link, &link->io); 3849 i = pcmcia_request_io(link, &link->io);
3859 if (i != 0) 3850 if (i != 0)
3860 {
3861 cs_error(link, RequestIO, i);
3862 break; 3851 break;
3863 }
3864 3852
3865 /* 3853 /*
3866 * Now allocate an interrupt line. Note that this does not 3854 * Now allocate an interrupt line. Note that this does not
@@ -3868,10 +3856,7 @@ wv_pcmcia_config(struct pcmcia_device * link)
3868 */ 3856 */
3869 i = pcmcia_request_irq(link, &link->irq); 3857 i = pcmcia_request_irq(link, &link->irq);
3870 if (i != 0) 3858 if (i != 0)
3871 {
3872 cs_error(link, RequestIRQ, i);
3873 break; 3859 break;
3874 }
3875 3860
3876 /* 3861 /*
3877 * This actually configures the PCMCIA socket -- setting up 3862 * This actually configures the PCMCIA socket -- setting up
@@ -3880,10 +3865,7 @@ wv_pcmcia_config(struct pcmcia_device * link)
3880 link->conf.ConfigIndex = 1; 3865 link->conf.ConfigIndex = 1;
3881 i = pcmcia_request_configuration(link, &link->conf); 3866 i = pcmcia_request_configuration(link, &link->conf);
3882 if (i != 0) 3867 if (i != 0)
3883 {
3884 cs_error(link, RequestConfiguration, i);
3885 break; 3868 break;
3886 }
3887 3869
3888 /* 3870 /*
3889 * Allocate a small memory window. Note that the struct pcmcia_device 3871 * Allocate a small memory window. Note that the struct pcmcia_device
@@ -3894,24 +3876,18 @@ wv_pcmcia_config(struct pcmcia_device * link)
3894 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; 3876 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
3895 req.Base = req.Size = 0; 3877 req.Base = req.Size = 0;
3896 req.AccessSpeed = mem_speed; 3878 req.AccessSpeed = mem_speed;
3897 i = pcmcia_request_window(&link, &req, &link->win); 3879 i = pcmcia_request_window(link, &req, &link->win);
3898 if (i != 0) 3880 if (i != 0)
3899 {
3900 cs_error(link, RequestWindow, i);
3901 break; 3881 break;
3902 }
3903 3882
3904 lp->mem = ioremap(req.Base, req.Size); 3883 lp->mem = ioremap(req.Base, req.Size);
3905 dev->mem_start = (u_long)lp->mem; 3884 dev->mem_start = (u_long)lp->mem;
3906 dev->mem_end = dev->mem_start + req.Size; 3885 dev->mem_end = dev->mem_start + req.Size;
3907 3886
3908 mem.CardOffset = 0; mem.Page = 0; 3887 mem.CardOffset = 0; mem.Page = 0;
3909 i = pcmcia_map_mem_page(link->win, &mem); 3888 i = pcmcia_map_mem_page(link, link->win, &mem);
3910 if (i != 0) 3889 if (i != 0)
3911 {
3912 cs_error(link, MapMemPage, i);
3913 break; 3890 break;
3914 }
3915 3891
3916 /* Feed device with this info... */ 3892 /* Feed device with this info... */
3917 dev->irq = link->irq.AssignedIRQ; 3893 dev->irq = link->irq.AssignedIRQ;
@@ -3923,7 +3899,7 @@ wv_pcmcia_config(struct pcmcia_device * link)
3923 lp->mem, dev->irq, (u_int) dev->base_addr); 3899 lp->mem, dev->irq, (u_int) dev->base_addr);
3924#endif 3900#endif
3925 3901
3926 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 3902 SET_NETDEV_DEV(dev, &link->dev);
3927 i = register_netdev(dev); 3903 i = register_netdev(dev);
3928 if(i != 0) 3904 if(i != 0)
3929 { 3905 {
@@ -4462,8 +4438,7 @@ wavelan_probe(struct pcmcia_device *p_dev)
4462 p_dev->io.IOAddrLines = 3; 4438 p_dev->io.IOAddrLines = 3;
4463 4439
4464 /* Interrupt setup */ 4440 /* Interrupt setup */
4465 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 4441 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
4466 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
4467 p_dev->irq.Handler = wavelan_interrupt; 4442 p_dev->irq.Handler = wavelan_interrupt;
4468 4443
4469 /* General socket configuration */ 4444 /* General socket configuration */
@@ -4475,7 +4450,7 @@ wavelan_probe(struct pcmcia_device *p_dev)
4475 if (!dev) 4450 if (!dev)
4476 return -ENOMEM; 4451 return -ENOMEM;
4477 4452
4478 p_dev->priv = p_dev->irq.Instance = dev; 4453 p_dev->priv = dev;
4479 4454
4480 lp = netdev_priv(dev); 4455 lp = netdev_priv(dev);
4481 4456
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 4f1e0cfe609b..5f0401a52cff 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -67,23 +67,7 @@
67/* For rough constant delay */ 67/* For rough constant delay */
68#define WL3501_NOPLOOP(n) { int x = 0; while (x++ < n) slow_down_io(); } 68#define WL3501_NOPLOOP(n) { int x = 0; while (x++ < n) slow_down_io(); }
69 69
70/* 70
71 * All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If you do not
72 * define PCMCIA_DEBUG at all, all the debug code will be left out. If you
73 * compile with PCMCIA_DEBUG=0, the debug code will be present but disabled --
74 * but it can then be enabled for specific modules at load time with a
75 * 'pc_debug=#' option to insmod.
76 */
77#define PCMCIA_DEBUG 0
78#ifdef PCMCIA_DEBUG
79static int pc_debug = PCMCIA_DEBUG;
80module_param(pc_debug, int, 0);
81#define dprintk(n, format, args...) \
82 { if (pc_debug > (n)) \
83 printk(KERN_INFO "%s: " format "\n", __func__ , ##args); }
84#else
85#define dprintk(n, format, args...)
86#endif
87 71
88#define wl3501_outb(a, b) { outb(a, b); slow_down_io(); } 72#define wl3501_outb(a, b) { outb(a, b); slow_down_io(); }
89#define wl3501_outb_p(a, b) { outb_p(a, b); slow_down_io(); } 73#define wl3501_outb_p(a, b) { outb_p(a, b); slow_down_io(); }
@@ -684,10 +668,10 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
684 int matchflag = 0; 668 int matchflag = 0;
685 struct wl3501_scan_confirm sig; 669 struct wl3501_scan_confirm sig;
686 670
687 dprintk(3, "entry"); 671 pr_debug("entry");
688 wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); 672 wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
689 if (sig.status == WL3501_STATUS_SUCCESS) { 673 if (sig.status == WL3501_STATUS_SUCCESS) {
690 dprintk(3, "success"); 674 pr_debug("success");
691 if ((this->net_type == IW_MODE_INFRA && 675 if ((this->net_type == IW_MODE_INFRA &&
692 (sig.cap_info & WL3501_MGMT_CAPABILITY_ESS)) || 676 (sig.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
693 (this->net_type == IW_MODE_ADHOC && 677 (this->net_type == IW_MODE_ADHOC &&
@@ -722,7 +706,7 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
722 } 706 }
723 } 707 }
724 } else if (sig.status == WL3501_STATUS_TIMEOUT) { 708 } else if (sig.status == WL3501_STATUS_TIMEOUT) {
725 dprintk(3, "timeout"); 709 pr_debug("timeout");
726 this->join_sta_bss = 0; 710 this->join_sta_bss = 0;
727 for (i = this->join_sta_bss; i < this->bss_cnt; i++) 711 for (i = this->join_sta_bss; i < this->bss_cnt; i++)
728 if (!wl3501_mgmt_join(this, i)) 712 if (!wl3501_mgmt_join(this, i))
@@ -879,7 +863,7 @@ static int wl3501_mgmt_auth(struct wl3501_card *this)
879 .timeout = 1000, 863 .timeout = 1000,
880 }; 864 };
881 865
882 dprintk(3, "entry"); 866 pr_debug("entry");
883 memcpy(sig.mac_addr, this->bssid, ETH_ALEN); 867 memcpy(sig.mac_addr, this->bssid, ETH_ALEN);
884 return wl3501_esbq_exec(this, &sig, sizeof(sig)); 868 return wl3501_esbq_exec(this, &sig, sizeof(sig));
885} 869}
@@ -893,7 +877,7 @@ static int wl3501_mgmt_association(struct wl3501_card *this)
893 .cap_info = this->cap_info, 877 .cap_info = this->cap_info,
894 }; 878 };
895 879
896 dprintk(3, "entry"); 880 pr_debug("entry");
897 memcpy(sig.mac_addr, this->bssid, ETH_ALEN); 881 memcpy(sig.mac_addr, this->bssid, ETH_ALEN);
898 return wl3501_esbq_exec(this, &sig, sizeof(sig)); 882 return wl3501_esbq_exec(this, &sig, sizeof(sig));
899} 883}
@@ -903,7 +887,7 @@ static void wl3501_mgmt_join_confirm(struct net_device *dev, u16 addr)
903 struct wl3501_card *this = netdev_priv(dev); 887 struct wl3501_card *this = netdev_priv(dev);
904 struct wl3501_join_confirm sig; 888 struct wl3501_join_confirm sig;
905 889
906 dprintk(3, "entry"); 890 pr_debug("entry");
907 wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); 891 wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
908 if (sig.status == WL3501_STATUS_SUCCESS) { 892 if (sig.status == WL3501_STATUS_SUCCESS) {
909 if (this->net_type == IW_MODE_INFRA) { 893 if (this->net_type == IW_MODE_INFRA) {
@@ -962,7 +946,7 @@ static inline void wl3501_md_confirm_interrupt(struct net_device *dev,
962{ 946{
963 struct wl3501_md_confirm sig; 947 struct wl3501_md_confirm sig;
964 948
965 dprintk(3, "entry"); 949 pr_debug("entry");
966 wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); 950 wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
967 wl3501_free_tx_buffer(this, sig.data); 951 wl3501_free_tx_buffer(this, sig.data);
968 if (netif_queue_stopped(dev)) 952 if (netif_queue_stopped(dev))
@@ -1017,7 +1001,7 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev,
1017static inline void wl3501_get_confirm_interrupt(struct wl3501_card *this, 1001static inline void wl3501_get_confirm_interrupt(struct wl3501_card *this,
1018 u16 addr, void *sig, int size) 1002 u16 addr, void *sig, int size)
1019{ 1003{
1020 dprintk(3, "entry"); 1004 pr_debug("entry");
1021 wl3501_get_from_wla(this, addr, &this->sig_get_confirm, 1005 wl3501_get_from_wla(this, addr, &this->sig_get_confirm,
1022 sizeof(this->sig_get_confirm)); 1006 sizeof(this->sig_get_confirm));
1023 wake_up(&this->wait); 1007 wake_up(&this->wait);
@@ -1029,7 +1013,7 @@ static inline void wl3501_start_confirm_interrupt(struct net_device *dev,
1029{ 1013{
1030 struct wl3501_start_confirm sig; 1014 struct wl3501_start_confirm sig;
1031 1015
1032 dprintk(3, "entry"); 1016 pr_debug("entry");
1033 wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); 1017 wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
1034 if (sig.status == WL3501_STATUS_SUCCESS) 1018 if (sig.status == WL3501_STATUS_SUCCESS)
1035 netif_wake_queue(dev); 1019 netif_wake_queue(dev);
@@ -1041,7 +1025,7 @@ static inline void wl3501_assoc_confirm_interrupt(struct net_device *dev,
1041 struct wl3501_card *this = netdev_priv(dev); 1025 struct wl3501_card *this = netdev_priv(dev);
1042 struct wl3501_assoc_confirm sig; 1026 struct wl3501_assoc_confirm sig;
1043 1027
1044 dprintk(3, "entry"); 1028 pr_debug("entry");
1045 wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); 1029 wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
1046 1030
1047 if (sig.status == WL3501_STATUS_SUCCESS) 1031 if (sig.status == WL3501_STATUS_SUCCESS)
@@ -1053,7 +1037,7 @@ static inline void wl3501_auth_confirm_interrupt(struct wl3501_card *this,
1053{ 1037{
1054 struct wl3501_auth_confirm sig; 1038 struct wl3501_auth_confirm sig;
1055 1039
1056 dprintk(3, "entry"); 1040 pr_debug("entry");
1057 wl3501_get_from_wla(this, addr, &sig, sizeof(sig)); 1041 wl3501_get_from_wla(this, addr, &sig, sizeof(sig));
1058 1042
1059 if (sig.status == WL3501_STATUS_SUCCESS) 1043 if (sig.status == WL3501_STATUS_SUCCESS)
@@ -1069,7 +1053,7 @@ static inline void wl3501_rx_interrupt(struct net_device *dev)
1069 u8 sig_id; 1053 u8 sig_id;
1070 struct wl3501_card *this = netdev_priv(dev); 1054 struct wl3501_card *this = netdev_priv(dev);
1071 1055
1072 dprintk(3, "entry"); 1056 pr_debug("entry");
1073loop: 1057loop:
1074 morepkts = 0; 1058 morepkts = 0;
1075 if (!wl3501_esbq_confirm(this)) 1059 if (!wl3501_esbq_confirm(this))
@@ -1302,7 +1286,7 @@ static int wl3501_reset(struct net_device *dev)
1302 wl3501_ack_interrupt(this); 1286 wl3501_ack_interrupt(this);
1303 wl3501_unblock_interrupt(this); 1287 wl3501_unblock_interrupt(this);
1304 wl3501_mgmt_scan(this, 100); 1288 wl3501_mgmt_scan(this, 100);
1305 dprintk(1, "%s: device reset", dev->name); 1289 pr_debug("%s: device reset", dev->name);
1306 rc = 0; 1290 rc = 0;
1307out: 1291out:
1308 return rc; 1292 return rc;
@@ -1376,7 +1360,7 @@ static int wl3501_open(struct net_device *dev)
1376 link->open++; 1360 link->open++;
1377 1361
1378 /* Initial WL3501 firmware */ 1362 /* Initial WL3501 firmware */
1379 dprintk(1, "%s: Initialize WL3501 firmware...", dev->name); 1363 pr_debug("%s: Initialize WL3501 firmware...", dev->name);
1380 if (wl3501_init_firmware(this)) 1364 if (wl3501_init_firmware(this))
1381 goto fail; 1365 goto fail;
1382 /* Initial device variables */ 1366 /* Initial device variables */
@@ -1388,7 +1372,7 @@ static int wl3501_open(struct net_device *dev)
1388 wl3501_unblock_interrupt(this); 1372 wl3501_unblock_interrupt(this);
1389 wl3501_mgmt_scan(this, 100); 1373 wl3501_mgmt_scan(this, 100);
1390 rc = 0; 1374 rc = 0;
1391 dprintk(1, "%s: WL3501 opened", dev->name); 1375 pr_debug("%s: WL3501 opened", dev->name);
1392 printk(KERN_INFO "%s: Card Name: %s\n" 1376 printk(KERN_INFO "%s: Card Name: %s\n"
1393 "%s: Firmware Date: %s\n", 1377 "%s: Firmware Date: %s\n",
1394 dev->name, this->card_name, 1378 dev->name, this->card_name,
@@ -1914,8 +1898,7 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
1914 p_dev->io.IOAddrLines = 5; 1898 p_dev->io.IOAddrLines = 5;
1915 1899
1916 /* Interrupt setup */ 1900 /* Interrupt setup */
1917 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 1901 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
1918 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
1919 p_dev->irq.Handler = wl3501_interrupt; 1902 p_dev->irq.Handler = wl3501_interrupt;
1920 1903
1921 /* General socket configuration */ 1904 /* General socket configuration */
@@ -1938,16 +1921,13 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
1938 dev->wireless_handlers = &wl3501_handler_def; 1921 dev->wireless_handlers = &wl3501_handler_def;
1939 SET_ETHTOOL_OPS(dev, &ops); 1922 SET_ETHTOOL_OPS(dev, &ops);
1940 netif_stop_queue(dev); 1923 netif_stop_queue(dev);
1941 p_dev->priv = p_dev->irq.Instance = dev; 1924 p_dev->priv = dev;
1942 1925
1943 return wl3501_config(p_dev); 1926 return wl3501_config(p_dev);
1944out_link: 1927out_link:
1945 return -ENOMEM; 1928 return -ENOMEM;
1946} 1929}
1947 1930
1948#define CS_CHECK(fn, ret) \
1949do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
1950
1951/** 1931/**
1952 * wl3501_config - configure the PCMCIA socket and make eth device available 1932 * wl3501_config - configure the PCMCIA socket and make eth device available
1953 * @link - FILL_IN 1933 * @link - FILL_IN
@@ -1959,7 +1939,7 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
1959static int wl3501_config(struct pcmcia_device *link) 1939static int wl3501_config(struct pcmcia_device *link)
1960{ 1940{
1961 struct net_device *dev = link->priv; 1941 struct net_device *dev = link->priv;
1962 int i = 0, j, last_fn, last_ret; 1942 int i = 0, j, ret;
1963 struct wl3501_card *this; 1943 struct wl3501_card *this;
1964 1944
1965 /* Try allocating IO ports. This tries a few fixed addresses. If you 1945 /* Try allocating IO ports. This tries a few fixed addresses. If you
@@ -1975,24 +1955,26 @@ static int wl3501_config(struct pcmcia_device *link)
1975 if (i == 0) 1955 if (i == 0)
1976 break; 1956 break;
1977 } 1957 }
1978 if (i != 0) { 1958 if (i != 0)
1979 cs_error(link, RequestIO, i);
1980 goto failed; 1959 goto failed;
1981 }
1982 1960
1983 /* Now allocate an interrupt line. Note that this does not actually 1961 /* Now allocate an interrupt line. Note that this does not actually
1984 * assign a handler to the interrupt. */ 1962 * assign a handler to the interrupt. */
1985 1963
1986 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 1964 ret = pcmcia_request_irq(link, &link->irq);
1965 if (ret)
1966 goto failed;
1987 1967
1988 /* This actually configures the PCMCIA socket -- setting up the I/O 1968 /* This actually configures the PCMCIA socket -- setting up the I/O
1989 * windows and the interrupt mapping. */ 1969 * windows and the interrupt mapping. */
1990 1970
1991 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 1971 ret = pcmcia_request_configuration(link, &link->conf);
1972 if (ret)
1973 goto failed;
1992 1974
1993 dev->irq = link->irq.AssignedIRQ; 1975 dev->irq = link->irq.AssignedIRQ;
1994 dev->base_addr = link->io.BasePort1; 1976 dev->base_addr = link->io.BasePort1;
1995 SET_NETDEV_DEV(dev, &handle_to_dev(link)); 1977 SET_NETDEV_DEV(dev, &link->dev);
1996 if (register_netdev(dev)) { 1978 if (register_netdev(dev)) {
1997 printk(KERN_NOTICE "wl3501_cs: register_netdev() failed\n"); 1979 printk(KERN_NOTICE "wl3501_cs: register_netdev() failed\n");
1998 goto failed; 1980 goto failed;
@@ -2041,8 +2023,6 @@ static int wl3501_config(struct pcmcia_device *link)
2041 netif_start_queue(dev); 2023 netif_start_queue(dev);
2042 return 0; 2024 return 0;
2043 2025
2044cs_failed:
2045 cs_error(link, last_fn, last_ret);
2046failed: 2026failed:
2047 wl3501_release(link); 2027 wl3501_release(link);
2048 return -ENODEV; 2028 return -ENODEV;
diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c
index 8fdfa4f537a6..7dd370fa3439 100644
--- a/drivers/parport/parport_cs.c
+++ b/drivers/parport/parport_cs.c
@@ -67,14 +67,6 @@ MODULE_LICENSE("Dual MPL/GPL");
67 67
68INT_MODULE_PARM(epp_mode, 1); 68INT_MODULE_PARM(epp_mode, 1);
69 69
70#ifdef PCMCIA_DEBUG
71INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
72#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
73static char *version =
74"parport_cs.c 1.29 2002/10/11 06:57:41 (David Hinds)";
75#else
76#define DEBUG(n, args...)
77#endif
78 70
79/*====================================================================*/ 71/*====================================================================*/
80 72
@@ -103,7 +95,7 @@ static int parport_probe(struct pcmcia_device *link)
103{ 95{
104 parport_info_t *info; 96 parport_info_t *info;
105 97
106 DEBUG(0, "parport_attach()\n"); 98 dev_dbg(&link->dev, "parport_attach()\n");
107 99
108 /* Create new parport device */ 100 /* Create new parport device */
109 info = kzalloc(sizeof(*info), GFP_KERNEL); 101 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -114,7 +106,6 @@ static int parport_probe(struct pcmcia_device *link)
114 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 106 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
115 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 107 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
116 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 108 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
117 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
118 link->conf.Attributes = CONF_ENABLE_IRQ; 109 link->conf.Attributes = CONF_ENABLE_IRQ;
119 link->conf.IntType = INT_MEMORY_AND_IO; 110 link->conf.IntType = INT_MEMORY_AND_IO;
120 111
@@ -132,7 +123,7 @@ static int parport_probe(struct pcmcia_device *link)
132 123
133static void parport_detach(struct pcmcia_device *link) 124static void parport_detach(struct pcmcia_device *link)
134{ 125{
135 DEBUG(0, "parport_detach(0x%p)\n", link); 126 dev_dbg(&link->dev, "parport_detach\n");
136 127
137 parport_cs_release(link); 128 parport_cs_release(link);
138 129
@@ -147,9 +138,6 @@ static void parport_detach(struct pcmcia_device *link)
147 138
148======================================================================*/ 139======================================================================*/
149 140
150#define CS_CHECK(fn, ret) \
151do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
152
153static int parport_config_check(struct pcmcia_device *p_dev, 141static int parport_config_check(struct pcmcia_device *p_dev,
154 cistpl_cftable_entry_t *cfg, 142 cistpl_cftable_entry_t *cfg,
155 cistpl_cftable_entry_t *dflt, 143 cistpl_cftable_entry_t *dflt,
@@ -178,18 +166,20 @@ static int parport_config(struct pcmcia_device *link)
178{ 166{
179 parport_info_t *info = link->priv; 167 parport_info_t *info = link->priv;
180 struct parport *p; 168 struct parport *p;
181 int last_ret, last_fn; 169 int ret;
182 170
183 DEBUG(0, "parport_config(0x%p)\n", link); 171 dev_dbg(&link->dev, "parport_config\n");
184 172
185 last_ret = pcmcia_loop_config(link, parport_config_check, NULL); 173 ret = pcmcia_loop_config(link, parport_config_check, NULL);
186 if (last_ret) { 174 if (ret)
187 cs_error(link, RequestIO, last_ret);
188 goto failed; 175 goto failed;
189 }
190 176
191 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 177 ret = pcmcia_request_irq(link, &link->irq);
192 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 178 if (ret)
179 goto failed;
180 ret = pcmcia_request_configuration(link, &link->conf);
181 if (ret)
182 goto failed;
193 183
194 p = parport_pc_probe_port(link->io.BasePort1, link->io.BasePort2, 184 p = parport_pc_probe_port(link->io.BasePort1, link->io.BasePort2,
195 link->irq.AssignedIRQ, PARPORT_DMA_NONE, 185 link->irq.AssignedIRQ, PARPORT_DMA_NONE,
@@ -213,8 +203,6 @@ static int parport_config(struct pcmcia_device *link)
213 203
214 return 0; 204 return 0;
215 205
216cs_failed:
217 cs_error(link, last_fn, last_ret);
218failed: 206failed:
219 parport_cs_release(link); 207 parport_cs_release(link);
220 return -ENODEV; 208 return -ENODEV;
@@ -232,7 +220,7 @@ static void parport_cs_release(struct pcmcia_device *link)
232{ 220{
233 parport_info_t *info = link->priv; 221 parport_info_t *info = link->priv;
234 222
235 DEBUG(0, "parport_release(0x%p)\n", link); 223 dev_dbg(&link->dev, "parport_release\n");
236 224
237 if (info->ndev) { 225 if (info->ndev) {
238 struct parport *p = info->port; 226 struct parport *p = info->port;
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 22b02c6df854..416f6ac65b76 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -175,15 +175,6 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
175 int ret = 0; 175 int ret = 0;
176 176
177 drhd = (struct acpi_dmar_hardware_unit *)header; 177 drhd = (struct acpi_dmar_hardware_unit *)header;
178 if (!drhd->address) {
179 /* Promote an attitude of violence to a BIOS engineer today */
180 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
181 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
182 dmi_get_system_info(DMI_BIOS_VENDOR),
183 dmi_get_system_info(DMI_BIOS_VERSION),
184 dmi_get_system_info(DMI_PRODUCT_VERSION));
185 return -ENODEV;
186 }
187 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); 178 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
188 if (!dmaru) 179 if (!dmaru)
189 return -ENOMEM; 180 return -ENOMEM;
@@ -591,12 +582,53 @@ int __init dmar_table_init(void)
591 return 0; 582 return 0;
592} 583}
593 584
585int __init check_zero_address(void)
586{
587 struct acpi_table_dmar *dmar;
588 struct acpi_dmar_header *entry_header;
589 struct acpi_dmar_hardware_unit *drhd;
590
591 dmar = (struct acpi_table_dmar *)dmar_tbl;
592 entry_header = (struct acpi_dmar_header *)(dmar + 1);
593
594 while (((unsigned long)entry_header) <
595 (((unsigned long)dmar) + dmar_tbl->length)) {
596 /* Avoid looping forever on bad ACPI tables */
597 if (entry_header->length == 0) {
598 printk(KERN_WARNING PREFIX
599 "Invalid 0-length structure\n");
600 return 0;
601 }
602
603 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
604 drhd = (void *)entry_header;
605 if (!drhd->address) {
606 /* Promote an attitude of violence to a BIOS engineer today */
607 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
608 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
609 dmi_get_system_info(DMI_BIOS_VENDOR),
610 dmi_get_system_info(DMI_BIOS_VERSION),
611 dmi_get_system_info(DMI_PRODUCT_VERSION));
612#ifdef CONFIG_DMAR
613 dmar_disabled = 1;
614#endif
615 return 0;
616 }
617 break;
618 }
619
620 entry_header = ((void *)entry_header + entry_header->length);
621 }
622 return 1;
623}
624
594void __init detect_intel_iommu(void) 625void __init detect_intel_iommu(void)
595{ 626{
596 int ret; 627 int ret;
597 628
598 ret = dmar_table_detect(); 629 ret = dmar_table_detect();
599 630 if (ret)
631 ret = check_zero_address();
600 { 632 {
601#ifdef CONFIG_INTR_REMAP 633#ifdef CONFIG_INTR_REMAP
602 struct acpi_table_dmar *dmar; 634 struct acpi_table_dmar *dmar;
@@ -613,10 +645,13 @@ void __init detect_intel_iommu(void)
613 "x2apic and Intr-remapping.\n"); 645 "x2apic and Intr-remapping.\n");
614#endif 646#endif
615#ifdef CONFIG_DMAR 647#ifdef CONFIG_DMAR
616 if (ret && !no_iommu && !iommu_detected && !swiotlb && 648 if (ret && !no_iommu && !iommu_detected && !dmar_disabled)
617 !dmar_disabled)
618 iommu_detected = 1; 649 iommu_detected = 1;
619#endif 650#endif
651#ifdef CONFIG_X86
652 if (ret)
653 x86_init.iommu.iommu_init = intel_iommu_init;
654#endif
620 } 655 }
621 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); 656 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
622 dmar_tbl = NULL; 657 dmar_tbl = NULL;
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index b1e97e682500..9261327b49f3 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -2767,7 +2767,15 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
2767 2767
2768 size = PAGE_ALIGN(size); 2768 size = PAGE_ALIGN(size);
2769 order = get_order(size); 2769 order = get_order(size);
2770 flags &= ~(GFP_DMA | GFP_DMA32); 2770
2771 if (!iommu_no_mapping(hwdev))
2772 flags &= ~(GFP_DMA | GFP_DMA32);
2773 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) {
2774 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32))
2775 flags |= GFP_DMA;
2776 else
2777 flags |= GFP_DMA32;
2778 }
2771 2779
2772 vaddr = (void *)__get_free_pages(flags, order); 2780 vaddr = (void *)__get_free_pages(flags, order);
2773 if (!vaddr) 2781 if (!vaddr)
@@ -3207,6 +3215,33 @@ static int __init init_iommu_sysfs(void)
3207} 3215}
3208#endif /* CONFIG_PM */ 3216#endif /* CONFIG_PM */
3209 3217
3218/*
3219 * Here we only respond to action of unbound device from driver.
3220 *
3221 * Added device is not attached to its DMAR domain here yet. That will happen
3222 * when mapping the device to iova.
3223 */
3224static int device_notifier(struct notifier_block *nb,
3225 unsigned long action, void *data)
3226{
3227 struct device *dev = data;
3228 struct pci_dev *pdev = to_pci_dev(dev);
3229 struct dmar_domain *domain;
3230
3231 domain = find_domain(pdev);
3232 if (!domain)
3233 return 0;
3234
3235 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through)
3236 domain_remove_one_dev_info(domain, pdev);
3237
3238 return 0;
3239}
3240
3241static struct notifier_block device_nb = {
3242 .notifier_call = device_notifier,
3243};
3244
3210int __init intel_iommu_init(void) 3245int __init intel_iommu_init(void)
3211{ 3246{
3212 int ret = 0; 3247 int ret = 0;
@@ -3231,7 +3266,7 @@ int __init intel_iommu_init(void)
3231 * Check the need for DMA-remapping initialization now. 3266 * Check the need for DMA-remapping initialization now.
3232 * Above initialization will also be used by Interrupt-remapping. 3267 * Above initialization will also be used by Interrupt-remapping.
3233 */ 3268 */
3234 if (no_iommu || swiotlb || dmar_disabled) 3269 if (no_iommu || dmar_disabled)
3235 return -ENODEV; 3270 return -ENODEV;
3236 3271
3237 iommu_init_mempool(); 3272 iommu_init_mempool();
@@ -3252,13 +3287,17 @@ int __init intel_iommu_init(void)
3252 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n"); 3287 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
3253 3288
3254 init_timer(&unmap_timer); 3289 init_timer(&unmap_timer);
3255 force_iommu = 1; 3290#ifdef CONFIG_SWIOTLB
3291 swiotlb = 0;
3292#endif
3256 dma_ops = &intel_dma_ops; 3293 dma_ops = &intel_dma_ops;
3257 3294
3258 init_iommu_sysfs(); 3295 init_iommu_sysfs();
3259 3296
3260 register_iommu(&intel_iommu_ops); 3297 register_iommu(&intel_iommu_ops);
3261 3298
3299 bus_register_notifier(&pci_bus_type, &device_nb);
3300
3262 return 0; 3301 return 0;
3263} 3302}
3264 3303
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 17f38a781d47..f3ccbccf5f21 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -17,24 +17,6 @@ menuconfig PCCARD
17 17
18if PCCARD 18if PCCARD
19 19
20config PCMCIA_DEBUG
21 bool "Enable PCCARD debugging"
22 help
23 Say Y here to enable PCMCIA subsystem debugging. You
24 will need to choose the debugging level either via the
25 kernel command line, or module options depending whether
26 you build the PCMCIA as modules.
27
28 The kernel command line options are:
29 pcmcia_core.pc_debug=N
30 pcmcia.pc_debug=N
31 sa11xx_core.pc_debug=N
32
33 The module option is called pc_debug=N
34
35 In all the above examples, N is the debugging verbosity
36 level.
37
38config PCMCIA 20config PCMCIA
39 tristate "16-bit PCMCIA support" 21 tristate "16-bit PCMCIA support"
40 select CRC32 22 select CRC32
@@ -196,9 +178,13 @@ config PCMCIA_BCM63XX
196 tristate "bcm63xx pcmcia support" 178 tristate "bcm63xx pcmcia support"
197 depends on BCM63XX && PCMCIA 179 depends on BCM63XX && PCMCIA
198 180
181config PCMCIA_SOC_COMMON
182 bool
183
199config PCMCIA_SA1100 184config PCMCIA_SA1100
200 tristate "SA1100 support" 185 tristate "SA1100 support"
201 depends on ARM && ARCH_SA1100 && PCMCIA 186 depends on ARM && ARCH_SA1100 && PCMCIA
187 select PCMCIA_SOC_COMMON
202 help 188 help
203 Say Y here to include support for SA11x0-based PCMCIA or CF 189 Say Y here to include support for SA11x0-based PCMCIA or CF
204 sockets, found on HP iPAQs, Yopy, and other StrongARM(R)/ 190 sockets, found on HP iPAQs, Yopy, and other StrongARM(R)/
@@ -209,6 +195,7 @@ config PCMCIA_SA1100
209config PCMCIA_SA1111 195config PCMCIA_SA1111
210 tristate "SA1111 support" 196 tristate "SA1111 support"
211 depends on ARM && ARCH_SA1100 && SA1111 && PCMCIA 197 depends on ARM && ARCH_SA1100 && SA1111 && PCMCIA
198 select PCMCIA_SOC_COMMON
212 help 199 help
213 Say Y here to include support for SA1111-based PCMCIA or CF 200 Say Y here to include support for SA1111-based PCMCIA or CF
214 sockets, found on the Jornada 720, Graphicsmaster and other 201 sockets, found on the Jornada 720, Graphicsmaster and other
@@ -222,9 +209,28 @@ config PCMCIA_PXA2XX
222 depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \ 209 depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \
223 || MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \ 210 || MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \
224 || ARCH_VIPER || ARCH_PXA_ESERIES || MACH_STARGATE2) 211 || ARCH_VIPER || ARCH_PXA_ESERIES || MACH_STARGATE2)
212 select PCMCIA_SOC_COMMON
225 help 213 help
226 Say Y here to include support for the PXA2xx PCMCIA controller 214 Say Y here to include support for the PXA2xx PCMCIA controller
227 215
216config PCMCIA_DEBUG
217 bool "Enable debugging"
218 depends on (PCMCIA_SA1111 || PCMCIA_SA1100 || PCMCIA_PXA2XX)
219 help
220 Say Y here to enable debugging for the SoC PCMCIA layer.
221 You will need to choose the debugging level either via the
222 kernel command line, or module options depending whether
223 you build the drivers as modules.
224
225 The kernel command line options are:
226 sa11xx_core.pc_debug=N
227 pxa2xx_core.pc_debug=N
228
229 The module option is called pc_debug=N
230
231 In all the above examples, N is the debugging verbosity
232 level.
233
228config PCMCIA_PROBE 234config PCMCIA_PROBE
229 bool 235 bool
230 default y if ISA && !ARCH_SA1100 && !ARCH_CLPS711X && !PARISC 236 default y if ISA && !ARCH_SA1100 && !ARCH_CLPS711X && !PARISC
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index a03a38acd77d..382938313991 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -22,8 +22,9 @@ obj-$(CONFIG_I82365) += i82365.o
22obj-$(CONFIG_I82092) += i82092.o 22obj-$(CONFIG_I82092) += i82092.o
23obj-$(CONFIG_TCIC) += tcic.o 23obj-$(CONFIG_TCIC) += tcic.o
24obj-$(CONFIG_PCMCIA_M8XX) += m8xx_pcmcia.o 24obj-$(CONFIG_PCMCIA_M8XX) += m8xx_pcmcia.o
25obj-$(CONFIG_PCMCIA_SA1100) += sa11xx_core.o sa1100_cs.o 25obj-$(CONFIG_PCMCIA_SOC_COMMON) += soc_common.o
26obj-$(CONFIG_PCMCIA_SA1111) += sa11xx_core.o sa1111_cs.o 26obj-$(CONFIG_PCMCIA_SA1100) += sa11xx_base.o sa1100_cs.o
27obj-$(CONFIG_PCMCIA_SA1111) += sa11xx_base.o sa1111_cs.o
27obj-$(CONFIG_M32R_PCC) += m32r_pcc.o 28obj-$(CONFIG_M32R_PCC) += m32r_pcc.o
28obj-$(CONFIG_M32R_CFC) += m32r_cfc.o 29obj-$(CONFIG_M32R_CFC) += m32r_cfc.o
29obj-$(CONFIG_PCMCIA_AU1X00) += au1x00_ss.o 30obj-$(CONFIG_PCMCIA_AU1X00) += au1x00_ss.o
@@ -35,9 +36,6 @@ obj-$(CONFIG_BFIN_CFPCMCIA) += bfin_cf_pcmcia.o
35obj-$(CONFIG_AT91_CF) += at91_cf.o 36obj-$(CONFIG_AT91_CF) += at91_cf.o
36obj-$(CONFIG_ELECTRA_CF) += electra_cf.o 37obj-$(CONFIG_ELECTRA_CF) += electra_cf.o
37 38
38sa11xx_core-y += soc_common.o sa11xx_base.o
39pxa2xx_core-y += soc_common.o pxa2xx_base.o
40
41au1x00_ss-y += au1000_generic.o 39au1x00_ss-y += au1000_generic.o
42au1x00_ss-$(CONFIG_MIPS_PB1000) += au1000_pb1x00.o 40au1x00_ss-$(CONFIG_MIPS_PB1000) += au1000_pb1x00.o
43au1x00_ss-$(CONFIG_MIPS_PB1100) += au1000_pb1x00.o 41au1x00_ss-$(CONFIG_MIPS_PB1100) += au1000_pb1x00.o
@@ -77,4 +75,4 @@ pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o
77pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o 75pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o
78pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o 76pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o
79 77
80obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_core.o $(pxa2xx-obj-y) 78obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_base.o $(pxa2xx-obj-y)
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index db77e1f3309a..4cd70d056810 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -91,7 +91,7 @@ static u_int xlate_rom_addr(void __iomem *b, u_int addr)
91static void cb_release_cis_mem(struct pcmcia_socket * s) 91static void cb_release_cis_mem(struct pcmcia_socket * s)
92{ 92{
93 if (s->cb_cis_virt) { 93 if (s->cb_cis_virt) {
94 cs_dbg(s, 1, "cb_release_cis_mem()\n"); 94 dev_dbg(&s->dev, "cb_release_cis_mem()\n");
95 iounmap(s->cb_cis_virt); 95 iounmap(s->cb_cis_virt);
96 s->cb_cis_virt = NULL; 96 s->cb_cis_virt = NULL;
97 s->cb_cis_res = NULL; 97 s->cb_cis_res = NULL;
@@ -132,7 +132,7 @@ int read_cb_mem(struct pcmcia_socket * s, int space, u_int addr, u_int len, void
132 struct pci_dev *dev; 132 struct pci_dev *dev;
133 struct resource *res; 133 struct resource *res;
134 134
135 cs_dbg(s, 3, "read_cb_mem(%d, %#x, %u)\n", space, addr, len); 135 dev_dbg(&s->dev, "read_cb_mem(%d, %#x, %u)\n", space, addr, len);
136 136
137 dev = pci_get_slot(s->cb_dev->subordinate, 0); 137 dev = pci_get_slot(s->cb_dev->subordinate, 0);
138 if (!dev) 138 if (!dev)
diff --git a/drivers/pcmcia/cirrus.h b/drivers/pcmcia/cirrus.h
index ecd4fc7f666f..446a4576e73e 100644
--- a/drivers/pcmcia/cirrus.h
+++ b/drivers/pcmcia/cirrus.h
@@ -30,16 +30,6 @@
30#ifndef _LINUX_CIRRUS_H 30#ifndef _LINUX_CIRRUS_H
31#define _LINUX_CIRRUS_H 31#define _LINUX_CIRRUS_H
32 32
33#ifndef PCI_VENDOR_ID_CIRRUS
34#define PCI_VENDOR_ID_CIRRUS 0x1013
35#endif
36#ifndef PCI_DEVICE_ID_CIRRUS_6729
37#define PCI_DEVICE_ID_CIRRUS_6729 0x1100
38#endif
39#ifndef PCI_DEVICE_ID_CIRRUS_6832
40#define PCI_DEVICE_ID_CIRRUS_6832 0x1110
41#endif
42
43#define PD67_MISC_CTL_1 0x16 /* Misc control 1 */ 33#define PD67_MISC_CTL_1 0x16 /* Misc control 1 */
44#define PD67_FIFO_CTL 0x17 /* FIFO control */ 34#define PD67_FIFO_CTL 0x17 /* FIFO control */
45#define PD67_MISC_CTL_2 0x1E /* Misc control 2 */ 35#define PD67_MISC_CTL_2 0x1E /* Misc control 2 */
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 6c4a4fc83630..8c1b73cf021b 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -138,7 +138,7 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
138 void __iomem *sys, *end; 138 void __iomem *sys, *end;
139 unsigned char *buf = ptr; 139 unsigned char *buf = ptr;
140 140
141 cs_dbg(s, 3, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len); 141 dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len);
142 142
143 if (attr & IS_INDIRECT) { 143 if (attr & IS_INDIRECT) {
144 /* Indirect accesses use a bunch of special registers at fixed 144 /* Indirect accesses use a bunch of special registers at fixed
@@ -190,7 +190,7 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
190 addr = 0; 190 addr = 0;
191 } 191 }
192 } 192 }
193 cs_dbg(s, 3, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n", 193 dev_dbg(&s->dev, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n",
194 *(u_char *)(ptr+0), *(u_char *)(ptr+1), 194 *(u_char *)(ptr+0), *(u_char *)(ptr+1),
195 *(u_char *)(ptr+2), *(u_char *)(ptr+3)); 195 *(u_char *)(ptr+2), *(u_char *)(ptr+3));
196 return 0; 196 return 0;
@@ -204,7 +204,7 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
204 void __iomem *sys, *end; 204 void __iomem *sys, *end;
205 unsigned char *buf = ptr; 205 unsigned char *buf = ptr;
206 206
207 cs_dbg(s, 3, "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len); 207 dev_dbg(&s->dev, "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len);
208 208
209 if (attr & IS_INDIRECT) { 209 if (attr & IS_INDIRECT) {
210 /* Indirect accesses use a bunch of special registers at fixed 210 /* Indirect accesses use a bunch of special registers at fixed
@@ -584,7 +584,7 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_
584 ofs += link[1] + 2; 584 ofs += link[1] + 2;
585 } 585 }
586 if (i == MAX_TUPLES) { 586 if (i == MAX_TUPLES) {
587 cs_dbg(s, 1, "cs: overrun in pcmcia_get_next_tuple\n"); 587 dev_dbg(&s->dev, "cs: overrun in pcmcia_get_next_tuple\n");
588 return -ENOSPC; 588 return -ENOSPC;
589 } 589 }
590 590
@@ -1440,7 +1440,7 @@ int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse)
1440 break; 1440 break;
1441 } 1441 }
1442 if (ret) 1442 if (ret)
1443 __cs_dbg(0, "parse_tuple failed %d\n", ret); 1443 pr_debug("parse_tuple failed %d\n", ret);
1444 return ret; 1444 return ret;
1445} 1445}
1446EXPORT_SYMBOL(pcmcia_parse_tuple); 1446EXPORT_SYMBOL(pcmcia_parse_tuple);
@@ -1482,6 +1482,67 @@ done:
1482} 1482}
1483EXPORT_SYMBOL(pccard_read_tuple); 1483EXPORT_SYMBOL(pccard_read_tuple);
1484 1484
1485
1486/**
1487 * pccard_loop_tuple() - loop over tuples in the CIS
1488 * @s: the struct pcmcia_socket where the card is inserted
1489 * @function: the device function we loop for
1490 * @code: which CIS code shall we look for?
1491 * @parse: buffer where the tuple shall be parsed (or NULL, if no parse)
1492 * @priv_data: private data to be passed to the loop_tuple function.
1493 * @loop_tuple: function to call for each CIS entry of type @function. IT
1494 * gets passed the raw tuple, the paresed tuple (if @parse is
1495 * set) and @priv_data.
1496 *
1497 * pccard_loop_tuple() loops over all CIS entries of type @function, and
1498 * calls the @loop_tuple function for each entry. If the call to @loop_tuple
1499 * returns 0, the loop exits. Returns 0 on success or errorcode otherwise.
1500 */
1501int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function,
1502 cisdata_t code, cisparse_t *parse, void *priv_data,
1503 int (*loop_tuple) (tuple_t *tuple,
1504 cisparse_t *parse,
1505 void *priv_data))
1506{
1507 tuple_t tuple;
1508 cisdata_t *buf;
1509 int ret;
1510
1511 buf = kzalloc(256, GFP_KERNEL);
1512 if (buf == NULL) {
1513 dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n");
1514 return -ENOMEM;
1515 }
1516
1517 tuple.TupleData = buf;
1518 tuple.TupleDataMax = 255;
1519 tuple.TupleOffset = 0;
1520 tuple.DesiredTuple = code;
1521 tuple.Attributes = 0;
1522
1523 ret = pccard_get_first_tuple(s, function, &tuple);
1524 while (!ret) {
1525 if (pccard_get_tuple_data(s, &tuple))
1526 goto next_entry;
1527
1528 if (parse)
1529 if (pcmcia_parse_tuple(&tuple, parse))
1530 goto next_entry;
1531
1532 ret = loop_tuple(&tuple, parse, priv_data);
1533 if (!ret)
1534 break;
1535
1536next_entry:
1537 ret = pccard_get_next_tuple(s, function, &tuple);
1538 }
1539
1540 kfree(buf);
1541 return ret;
1542}
1543EXPORT_SYMBOL(pccard_loop_tuple);
1544
1545
1485/*====================================================================== 1546/*======================================================================
1486 1547
1487 This tries to determine if a card has a sensible CIS. It returns 1548 This tries to determine if a card has a sensible CIS. It returns
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 698d75cda084..790af87a922f 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -61,17 +61,6 @@ INT_MODULE_PARM(unreset_limit, 30); /* unreset_check's */
61/* Access speed for attribute memory windows */ 61/* Access speed for attribute memory windows */
62INT_MODULE_PARM(cis_speed, 300); /* ns */ 62INT_MODULE_PARM(cis_speed, 300); /* ns */
63 63
64#ifdef CONFIG_PCMCIA_DEBUG
65static int pc_debug;
66
67module_param(pc_debug, int, 0644);
68
69int cs_debug_level(int level)
70{
71 return pc_debug > level;
72}
73#endif
74
75 64
76socket_state_t dead_socket = { 65socket_state_t dead_socket = {
77 .csc_mask = SS_DETECT, 66 .csc_mask = SS_DETECT,
@@ -190,7 +179,7 @@ int pcmcia_register_socket(struct pcmcia_socket *socket)
190 if (!socket || !socket->ops || !socket->dev.parent || !socket->resource_ops) 179 if (!socket || !socket->ops || !socket->dev.parent || !socket->resource_ops)
191 return -EINVAL; 180 return -EINVAL;
192 181
193 cs_dbg(socket, 0, "pcmcia_register_socket(0x%p)\n", socket->ops); 182 dev_dbg(&socket->dev, "pcmcia_register_socket(0x%p)\n", socket->ops);
194 183
195 spin_lock_init(&socket->lock); 184 spin_lock_init(&socket->lock);
196 185
@@ -262,6 +251,13 @@ int pcmcia_register_socket(struct pcmcia_socket *socket)
262 251
263 pcmcia_parse_events(socket, SS_DETECT); 252 pcmcia_parse_events(socket, SS_DETECT);
264 253
254 /*
255 * Let's try to get the PCMCIA module for 16-bit PCMCIA support.
256 * If it fails, it doesn't matter -- we still have 32-bit CardBus
257 * support to offer, so this is not a failure mode.
258 */
259 request_module_nowait("pcmcia");
260
265 return 0; 261 return 0;
266 262
267 err: 263 err:
@@ -282,7 +278,7 @@ void pcmcia_unregister_socket(struct pcmcia_socket *socket)
282 if (!socket) 278 if (!socket)
283 return; 279 return;
284 280
285 cs_dbg(socket, 0, "pcmcia_unregister_socket(0x%p)\n", socket->ops); 281 dev_dbg(&socket->dev, "pcmcia_unregister_socket(0x%p)\n", socket->ops);
286 282
287 if (socket->thread) 283 if (socket->thread)
288 kthread_stop(socket->thread); 284 kthread_stop(socket->thread);
@@ -335,7 +331,7 @@ static int send_event(struct pcmcia_socket *s, event_t event, int priority)
335 if (s->state & SOCKET_CARDBUS) 331 if (s->state & SOCKET_CARDBUS)
336 return 0; 332 return 0;
337 333
338 cs_dbg(s, 1, "send_event(event %d, pri %d, callback 0x%p)\n", 334 dev_dbg(&s->dev, "send_event(event %d, pri %d, callback 0x%p)\n",
339 event, priority, s->callback); 335 event, priority, s->callback);
340 336
341 if (!s->callback) 337 if (!s->callback)
@@ -352,7 +348,7 @@ static int send_event(struct pcmcia_socket *s, event_t event, int priority)
352 348
353static void socket_remove_drivers(struct pcmcia_socket *skt) 349static void socket_remove_drivers(struct pcmcia_socket *skt)
354{ 350{
355 cs_dbg(skt, 4, "remove_drivers\n"); 351 dev_dbg(&skt->dev, "remove_drivers\n");
356 352
357 send_event(skt, CS_EVENT_CARD_REMOVAL, CS_EVENT_PRI_HIGH); 353 send_event(skt, CS_EVENT_CARD_REMOVAL, CS_EVENT_PRI_HIGH);
358} 354}
@@ -361,7 +357,7 @@ static int socket_reset(struct pcmcia_socket *skt)
361{ 357{
362 int status, i; 358 int status, i;
363 359
364 cs_dbg(skt, 4, "reset\n"); 360 dev_dbg(&skt->dev, "reset\n");
365 361
366 skt->socket.flags |= SS_OUTPUT_ENA | SS_RESET; 362 skt->socket.flags |= SS_OUTPUT_ENA | SS_RESET;
367 skt->ops->set_socket(skt, &skt->socket); 363 skt->ops->set_socket(skt, &skt->socket);
@@ -383,7 +379,7 @@ static int socket_reset(struct pcmcia_socket *skt)
383 msleep(unreset_check * 10); 379 msleep(unreset_check * 10);
384 } 380 }
385 381
386 cs_err(skt, "time out after reset.\n"); 382 dev_printk(KERN_ERR, &skt->dev, "time out after reset.\n");
387 return -ETIMEDOUT; 383 return -ETIMEDOUT;
388} 384}
389 385
@@ -397,7 +393,7 @@ static void socket_shutdown(struct pcmcia_socket *s)
397{ 393{
398 int status; 394 int status;
399 395
400 cs_dbg(s, 4, "shutdown\n"); 396 dev_dbg(&s->dev, "shutdown\n");
401 397
402 socket_remove_drivers(s); 398 socket_remove_drivers(s);
403 s->state &= SOCKET_INUSE | SOCKET_PRESENT; 399 s->state &= SOCKET_INUSE | SOCKET_PRESENT;
@@ -432,7 +428,7 @@ static int socket_setup(struct pcmcia_socket *skt, int initial_delay)
432{ 428{
433 int status, i; 429 int status, i;
434 430
435 cs_dbg(skt, 4, "setup\n"); 431 dev_dbg(&skt->dev, "setup\n");
436 432
437 skt->ops->get_status(skt, &status); 433 skt->ops->get_status(skt, &status);
438 if (!(status & SS_DETECT)) 434 if (!(status & SS_DETECT))
@@ -452,13 +448,15 @@ static int socket_setup(struct pcmcia_socket *skt, int initial_delay)
452 } 448 }
453 449
454 if (status & SS_PENDING) { 450 if (status & SS_PENDING) {
455 cs_err(skt, "voltage interrogation timed out.\n"); 451 dev_printk(KERN_ERR, &skt->dev,
452 "voltage interrogation timed out.\n");
456 return -ETIMEDOUT; 453 return -ETIMEDOUT;
457 } 454 }
458 455
459 if (status & SS_CARDBUS) { 456 if (status & SS_CARDBUS) {
460 if (!(skt->features & SS_CAP_CARDBUS)) { 457 if (!(skt->features & SS_CAP_CARDBUS)) {
461 cs_err(skt, "cardbus cards are not supported.\n"); 458 dev_printk(KERN_ERR, &skt->dev,
459 "cardbus cards are not supported.\n");
462 return -EINVAL; 460 return -EINVAL;
463 } 461 }
464 skt->state |= SOCKET_CARDBUS; 462 skt->state |= SOCKET_CARDBUS;
@@ -472,7 +470,7 @@ static int socket_setup(struct pcmcia_socket *skt, int initial_delay)
472 else if (!(status & SS_XVCARD)) 470 else if (!(status & SS_XVCARD))
473 skt->socket.Vcc = skt->socket.Vpp = 50; 471 skt->socket.Vcc = skt->socket.Vpp = 50;
474 else { 472 else {
475 cs_err(skt, "unsupported voltage key.\n"); 473 dev_printk(KERN_ERR, &skt->dev, "unsupported voltage key.\n");
476 return -EIO; 474 return -EIO;
477 } 475 }
478 476
@@ -489,7 +487,7 @@ static int socket_setup(struct pcmcia_socket *skt, int initial_delay)
489 487
490 skt->ops->get_status(skt, &status); 488 skt->ops->get_status(skt, &status);
491 if (!(status & SS_POWERON)) { 489 if (!(status & SS_POWERON)) {
492 cs_err(skt, "unable to apply power.\n"); 490 dev_printk(KERN_ERR, &skt->dev, "unable to apply power.\n");
493 return -EIO; 491 return -EIO;
494 } 492 }
495 493
@@ -509,7 +507,7 @@ static int socket_insert(struct pcmcia_socket *skt)
509{ 507{
510 int ret; 508 int ret;
511 509
512 cs_dbg(skt, 4, "insert\n"); 510 dev_dbg(&skt->dev, "insert\n");
513 511
514 if (!cs_socket_get(skt)) 512 if (!cs_socket_get(skt))
515 return -ENODEV; 513 return -ENODEV;
@@ -529,7 +527,7 @@ static int socket_insert(struct pcmcia_socket *skt)
529 skt->state |= SOCKET_CARDBUS_CONFIG; 527 skt->state |= SOCKET_CARDBUS_CONFIG;
530 } 528 }
531#endif 529#endif
532 cs_dbg(skt, 4, "insert done\n"); 530 dev_dbg(&skt->dev, "insert done\n");
533 531
534 send_event(skt, CS_EVENT_CARD_INSERTION, CS_EVENT_PRI_LOW); 532 send_event(skt, CS_EVENT_CARD_INSERTION, CS_EVENT_PRI_LOW);
535 } else { 533 } else {
@@ -576,7 +574,7 @@ static int socket_late_resume(struct pcmcia_socket *skt)
576 * FIXME: need a better check here for cardbus cards. 574 * FIXME: need a better check here for cardbus cards.
577 */ 575 */
578 if (verify_cis_cache(skt) != 0) { 576 if (verify_cis_cache(skt) != 0) {
579 cs_dbg(skt, 4, "cis mismatch - different card\n"); 577 dev_dbg(&skt->dev, "cis mismatch - different card\n");
580 socket_remove_drivers(skt); 578 socket_remove_drivers(skt);
581 destroy_cis_cache(skt); 579 destroy_cis_cache(skt);
582 /* 580 /*
@@ -587,7 +585,7 @@ static int socket_late_resume(struct pcmcia_socket *skt)
587 msleep(200); 585 msleep(200);
588 send_event(skt, CS_EVENT_CARD_INSERTION, CS_EVENT_PRI_LOW); 586 send_event(skt, CS_EVENT_CARD_INSERTION, CS_EVENT_PRI_LOW);
589 } else { 587 } else {
590 cs_dbg(skt, 4, "cis matches cache\n"); 588 dev_dbg(&skt->dev, "cis matches cache\n");
591 send_event(skt, CS_EVENT_PM_RESUME, CS_EVENT_PRI_LOW); 589 send_event(skt, CS_EVENT_PM_RESUME, CS_EVENT_PRI_LOW);
592 } 590 }
593 } else { 591 } else {
@@ -723,7 +721,7 @@ static int pccardd(void *__skt)
723void pcmcia_parse_events(struct pcmcia_socket *s, u_int events) 721void pcmcia_parse_events(struct pcmcia_socket *s, u_int events)
724{ 722{
725 unsigned long flags; 723 unsigned long flags;
726 cs_dbg(s, 4, "parse_events: events %08x\n", events); 724 dev_dbg(&s->dev, "parse_events: events %08x\n", events);
727 if (s->thread) { 725 if (s->thread) {
728 spin_lock_irqsave(&s->thread_lock, flags); 726 spin_lock_irqsave(&s->thread_lock, flags);
729 s->thread_events |= events; 727 s->thread_events |= events;
@@ -773,19 +771,22 @@ int pcmcia_reset_card(struct pcmcia_socket *skt)
773{ 771{
774 int ret; 772 int ret;
775 773
776 cs_dbg(skt, 1, "resetting socket\n"); 774 dev_dbg(&skt->dev, "resetting socket\n");
777 775
778 mutex_lock(&skt->skt_mutex); 776 mutex_lock(&skt->skt_mutex);
779 do { 777 do {
780 if (!(skt->state & SOCKET_PRESENT)) { 778 if (!(skt->state & SOCKET_PRESENT)) {
779 dev_dbg(&skt->dev, "can't reset, not present\n");
781 ret = -ENODEV; 780 ret = -ENODEV;
782 break; 781 break;
783 } 782 }
784 if (skt->state & SOCKET_SUSPEND) { 783 if (skt->state & SOCKET_SUSPEND) {
784 dev_dbg(&skt->dev, "can't reset, suspended\n");
785 ret = -EBUSY; 785 ret = -EBUSY;
786 break; 786 break;
787 } 787 }
788 if (skt->state & SOCKET_CARDBUS) { 788 if (skt->state & SOCKET_CARDBUS) {
789 dev_dbg(&skt->dev, "can't reset, is cardbus\n");
789 ret = -EPERM; 790 ret = -EPERM;
790 break; 791 break;
791 } 792 }
@@ -818,7 +819,7 @@ int pcmcia_suspend_card(struct pcmcia_socket *skt)
818{ 819{
819 int ret; 820 int ret;
820 821
821 cs_dbg(skt, 1, "suspending socket\n"); 822 dev_dbg(&skt->dev, "suspending socket\n");
822 823
823 mutex_lock(&skt->skt_mutex); 824 mutex_lock(&skt->skt_mutex);
824 do { 825 do {
@@ -848,7 +849,7 @@ int pcmcia_resume_card(struct pcmcia_socket *skt)
848{ 849{
849 int ret; 850 int ret;
850 851
851 cs_dbg(skt, 1, "waking up socket\n"); 852 dev_dbg(&skt->dev, "waking up socket\n");
852 853
853 mutex_lock(&skt->skt_mutex); 854 mutex_lock(&skt->skt_mutex);
854 do { 855 do {
@@ -876,7 +877,7 @@ int pcmcia_eject_card(struct pcmcia_socket *skt)
876{ 877{
877 int ret; 878 int ret;
878 879
879 cs_dbg(skt, 1, "user eject request\n"); 880 dev_dbg(&skt->dev, "user eject request\n");
880 881
881 mutex_lock(&skt->skt_mutex); 882 mutex_lock(&skt->skt_mutex);
882 do { 883 do {
@@ -905,7 +906,7 @@ int pcmcia_insert_card(struct pcmcia_socket *skt)
905{ 906{
906 int ret; 907 int ret;
907 908
908 cs_dbg(skt, 1, "user insert request\n"); 909 dev_dbg(&skt->dev, "user insert request\n");
909 910
910 mutex_lock(&skt->skt_mutex); 911 mutex_lock(&skt->skt_mutex);
911 do { 912 do {
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index 1f4098f1354d..3bc02d53a3a3 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -107,28 +107,6 @@ static inline void cs_socket_put(struct pcmcia_socket *skt)
107 } 107 }
108} 108}
109 109
110#ifdef CONFIG_PCMCIA_DEBUG
111extern int cs_debug_level(int);
112
113#define cs_dbg(skt, lvl, fmt, arg...) do { \
114 if (cs_debug_level(lvl)) \
115 dev_printk(KERN_DEBUG, &skt->dev, \
116 "cs: " fmt, ## arg); \
117} while (0)
118#define __cs_dbg(lvl, fmt, arg...) do { \
119 if (cs_debug_level(lvl)) \
120 printk(KERN_DEBUG \
121 "cs: " fmt, ## arg); \
122} while (0)
123
124#else
125#define cs_dbg(skt, lvl, fmt, arg...) do { } while (0)
126#define __cs_dbg(lvl, fmt, arg...) do { } while (0)
127#endif
128
129#define cs_err(skt, fmt, arg...) \
130 dev_printk(KERN_ERR, &skt->dev, "cs: " fmt, ## arg)
131
132 110
133/* 111/*
134 * Stuff internal to module "pcmcia_core": 112 * Stuff internal to module "pcmcia_core":
@@ -170,10 +148,6 @@ extern struct rw_semaphore pcmcia_socket_list_rwsem;
170extern struct list_head pcmcia_socket_list; 148extern struct list_head pcmcia_socket_list;
171extern struct class pcmcia_socket_class; 149extern struct class pcmcia_socket_class;
172 150
173int pcmcia_get_window(struct pcmcia_socket *s,
174 window_handle_t *handle,
175 int idx,
176 win_req_t *req);
177int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c); 151int pccard_register_pcmcia(struct pcmcia_socket *s, struct pcmcia_callback *c);
178struct pcmcia_socket *pcmcia_get_socket_by_nr(unsigned int nr); 152struct pcmcia_socket *pcmcia_get_socket_by_nr(unsigned int nr);
179 153
@@ -199,6 +173,22 @@ int pcmcia_replace_cis(struct pcmcia_socket *s,
199 const u8 *data, const size_t len); 173 const u8 *data, const size_t len);
200int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *count); 174int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *count);
201 175
176/* loop over CIS entries */
177int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function,
178 cisdata_t code, cisparse_t *parse, void *priv_data,
179 int (*loop_tuple) (tuple_t *tuple,
180 cisparse_t *parse,
181 void *priv_data));
182
183int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function,
184 tuple_t *tuple);
185
186int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function,
187 tuple_t *tuple);
188
189int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple);
190
191
202/* rsrc_mgr.c */ 192/* rsrc_mgr.c */
203int pcmcia_validate_mem(struct pcmcia_socket *s); 193int pcmcia_validate_mem(struct pcmcia_socket *s);
204struct resource *pcmcia_find_io_region(unsigned long base, 194struct resource *pcmcia_find_io_region(unsigned long base,
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index f5b7079f13d3..05893d41dd41 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -41,129 +41,11 @@ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
41MODULE_DESCRIPTION("PCMCIA Driver Services"); 41MODULE_DESCRIPTION("PCMCIA Driver Services");
42MODULE_LICENSE("GPL"); 42MODULE_LICENSE("GPL");
43 43
44#ifdef CONFIG_PCMCIA_DEBUG
45int ds_pc_debug;
46
47module_param_named(pc_debug, ds_pc_debug, int, 0644);
48
49#define ds_dbg(lvl, fmt, arg...) do { \
50 if (ds_pc_debug > (lvl)) \
51 printk(KERN_DEBUG "ds: " fmt , ## arg); \
52} while (0)
53#define ds_dev_dbg(lvl, dev, fmt, arg...) do { \
54 if (ds_pc_debug > (lvl)) \
55 dev_printk(KERN_DEBUG, dev, "ds: " fmt , ## arg); \
56} while (0)
57#else
58#define ds_dbg(lvl, fmt, arg...) do { } while (0)
59#define ds_dev_dbg(lvl, dev, fmt, arg...) do { } while (0)
60#endif
61 44
62spinlock_t pcmcia_dev_list_lock; 45spinlock_t pcmcia_dev_list_lock;
63 46
64/*====================================================================*/ 47/*====================================================================*/
65 48
66/* code which was in cs.c before */
67
68/* String tables for error messages */
69
70typedef struct lookup_t {
71 const int key;
72 const char *msg;
73} lookup_t;
74
75static const lookup_t error_table[] = {
76 { 0, "Operation succeeded" },
77 { -EIO, "Input/Output error" },
78 { -ENODEV, "No card present" },
79 { -EINVAL, "Bad parameter" },
80 { -EACCES, "Configuration locked" },
81 { -EBUSY, "Resource in use" },
82 { -ENOSPC, "No more items" },
83 { -ENOMEM, "Out of resource" },
84};
85
86
87static const lookup_t service_table[] = {
88 { AccessConfigurationRegister, "AccessConfigurationRegister" },
89 { AddSocketServices, "AddSocketServices" },
90 { AdjustResourceInfo, "AdjustResourceInfo" },
91 { CheckEraseQueue, "CheckEraseQueue" },
92 { CloseMemory, "CloseMemory" },
93 { DeregisterClient, "DeregisterClient" },
94 { DeregisterEraseQueue, "DeregisterEraseQueue" },
95 { GetCardServicesInfo, "GetCardServicesInfo" },
96 { GetClientInfo, "GetClientInfo" },
97 { GetConfigurationInfo, "GetConfigurationInfo" },
98 { GetEventMask, "GetEventMask" },
99 { GetFirstClient, "GetFirstClient" },
100 { GetFirstRegion, "GetFirstRegion" },
101 { GetFirstTuple, "GetFirstTuple" },
102 { GetNextClient, "GetNextClient" },
103 { GetNextRegion, "GetNextRegion" },
104 { GetNextTuple, "GetNextTuple" },
105 { GetStatus, "GetStatus" },
106 { GetTupleData, "GetTupleData" },
107 { MapMemPage, "MapMemPage" },
108 { ModifyConfiguration, "ModifyConfiguration" },
109 { ModifyWindow, "ModifyWindow" },
110 { OpenMemory, "OpenMemory" },
111 { ParseTuple, "ParseTuple" },
112 { ReadMemory, "ReadMemory" },
113 { RegisterClient, "RegisterClient" },
114 { RegisterEraseQueue, "RegisterEraseQueue" },
115 { RegisterMTD, "RegisterMTD" },
116 { ReleaseConfiguration, "ReleaseConfiguration" },
117 { ReleaseIO, "ReleaseIO" },
118 { ReleaseIRQ, "ReleaseIRQ" },
119 { ReleaseWindow, "ReleaseWindow" },
120 { RequestConfiguration, "RequestConfiguration" },
121 { RequestIO, "RequestIO" },
122 { RequestIRQ, "RequestIRQ" },
123 { RequestSocketMask, "RequestSocketMask" },
124 { RequestWindow, "RequestWindow" },
125 { ResetCard, "ResetCard" },
126 { SetEventMask, "SetEventMask" },
127 { ValidateCIS, "ValidateCIS" },
128 { WriteMemory, "WriteMemory" },
129 { BindDevice, "BindDevice" },
130 { BindMTD, "BindMTD" },
131 { ReportError, "ReportError" },
132 { SuspendCard, "SuspendCard" },
133 { ResumeCard, "ResumeCard" },
134 { EjectCard, "EjectCard" },
135 { InsertCard, "InsertCard" },
136 { ReplaceCIS, "ReplaceCIS" }
137};
138
139const char *pcmcia_error_func(int func)
140{
141 int i;
142
143 for (i = 0; i < ARRAY_SIZE(service_table); i++)
144 if (service_table[i].key == func)
145 return service_table[i].msg;
146
147 return "Unknown service number";
148}
149EXPORT_SYMBOL(pcmcia_error_func);
150
151const char *pcmcia_error_ret(int ret)
152{
153 int i;
154
155 for (i = 0; i < ARRAY_SIZE(error_table); i++)
156 if (error_table[i].key == ret)
157 return error_table[i].msg;
158
159 return "unknown";
160}
161EXPORT_SYMBOL(pcmcia_error_ret);
162
163/*======================================================================*/
164
165
166
167static void pcmcia_check_driver(struct pcmcia_driver *p_drv) 49static void pcmcia_check_driver(struct pcmcia_driver *p_drv)
168{ 50{
169 struct pcmcia_device_id *did = p_drv->id_table; 51 struct pcmcia_device_id *did = p_drv->id_table;
@@ -303,7 +185,7 @@ int pcmcia_register_driver(struct pcmcia_driver *driver)
303 spin_lock_init(&driver->dynids.lock); 185 spin_lock_init(&driver->dynids.lock);
304 INIT_LIST_HEAD(&driver->dynids.list); 186 INIT_LIST_HEAD(&driver->dynids.list);
305 187
306 ds_dbg(3, "registering driver %s\n", driver->drv.name); 188 pr_debug("registering driver %s\n", driver->drv.name);
307 189
308 error = driver_register(&driver->drv); 190 error = driver_register(&driver->drv);
309 if (error < 0) 191 if (error < 0)
@@ -323,7 +205,7 @@ EXPORT_SYMBOL(pcmcia_register_driver);
323 */ 205 */
324void pcmcia_unregister_driver(struct pcmcia_driver *driver) 206void pcmcia_unregister_driver(struct pcmcia_driver *driver)
325{ 207{
326 ds_dbg(3, "unregistering driver %s\n", driver->drv.name); 208 pr_debug("unregistering driver %s\n", driver->drv.name);
327 driver_unregister(&driver->drv); 209 driver_unregister(&driver->drv);
328 pcmcia_free_dynids(driver); 210 pcmcia_free_dynids(driver);
329} 211}
@@ -350,14 +232,14 @@ void pcmcia_put_dev(struct pcmcia_device *p_dev)
350static void pcmcia_release_function(struct kref *ref) 232static void pcmcia_release_function(struct kref *ref)
351{ 233{
352 struct config_t *c = container_of(ref, struct config_t, ref); 234 struct config_t *c = container_of(ref, struct config_t, ref);
353 ds_dbg(1, "releasing config_t\n"); 235 pr_debug("releasing config_t\n");
354 kfree(c); 236 kfree(c);
355} 237}
356 238
357static void pcmcia_release_dev(struct device *dev) 239static void pcmcia_release_dev(struct device *dev)
358{ 240{
359 struct pcmcia_device *p_dev = to_pcmcia_dev(dev); 241 struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
360 ds_dev_dbg(1, dev, "releasing device\n"); 242 dev_dbg(dev, "releasing device\n");
361 pcmcia_put_socket(p_dev->socket); 243 pcmcia_put_socket(p_dev->socket);
362 kfree(p_dev->devname); 244 kfree(p_dev->devname);
363 kref_put(&p_dev->function_config->ref, pcmcia_release_function); 245 kref_put(&p_dev->function_config->ref, pcmcia_release_function);
@@ -367,7 +249,7 @@ static void pcmcia_release_dev(struct device *dev)
367static void pcmcia_add_device_later(struct pcmcia_socket *s, int mfc) 249static void pcmcia_add_device_later(struct pcmcia_socket *s, int mfc)
368{ 250{
369 if (!s->pcmcia_state.device_add_pending) { 251 if (!s->pcmcia_state.device_add_pending) {
370 ds_dev_dbg(1, &s->dev, "scheduling to add %s secondary" 252 dev_dbg(&s->dev, "scheduling to add %s secondary"
371 " device to %d\n", mfc ? "mfc" : "pfc", s->sock); 253 " device to %d\n", mfc ? "mfc" : "pfc", s->sock);
372 s->pcmcia_state.device_add_pending = 1; 254 s->pcmcia_state.device_add_pending = 1;
373 s->pcmcia_state.mfc_pfc = mfc; 255 s->pcmcia_state.mfc_pfc = mfc;
@@ -405,7 +287,7 @@ static int pcmcia_device_probe(struct device * dev)
405 */ 287 */
406 did = dev_get_drvdata(&p_dev->dev); 288 did = dev_get_drvdata(&p_dev->dev);
407 289
408 ds_dev_dbg(1, dev, "trying to bind to %s\n", p_drv->drv.name); 290 dev_dbg(dev, "trying to bind to %s\n", p_drv->drv.name);
409 291
410 if ((!p_drv->probe) || (!p_dev->function_config) || 292 if ((!p_drv->probe) || (!p_dev->function_config) ||
411 (!try_module_get(p_drv->owner))) { 293 (!try_module_get(p_drv->owner))) {
@@ -428,7 +310,7 @@ static int pcmcia_device_probe(struct device * dev)
428 310
429 ret = p_drv->probe(p_dev); 311 ret = p_drv->probe(p_dev);
430 if (ret) { 312 if (ret) {
431 ds_dev_dbg(1, dev, "binding to %s failed with %d\n", 313 dev_dbg(dev, "binding to %s failed with %d\n",
432 p_drv->drv.name, ret); 314 p_drv->drv.name, ret);
433 goto put_module; 315 goto put_module;
434 } 316 }
@@ -456,7 +338,7 @@ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *le
456 struct pcmcia_device *tmp; 338 struct pcmcia_device *tmp;
457 unsigned long flags; 339 unsigned long flags;
458 340
459 ds_dev_dbg(2, leftover ? &leftover->dev : &s->dev, 341 dev_dbg(leftover ? &leftover->dev : &s->dev,
460 "pcmcia_card_remove(%d) %s\n", s->sock, 342 "pcmcia_card_remove(%d) %s\n", s->sock,
461 leftover ? leftover->devname : ""); 343 leftover ? leftover->devname : "");
462 344
@@ -475,7 +357,7 @@ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *le
475 p_dev->_removed=1; 357 p_dev->_removed=1;
476 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); 358 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
477 359
478 ds_dev_dbg(2, &p_dev->dev, "unregistering device\n"); 360 dev_dbg(&p_dev->dev, "unregistering device\n");
479 device_unregister(&p_dev->dev); 361 device_unregister(&p_dev->dev);
480 } 362 }
481 363
@@ -492,7 +374,7 @@ static int pcmcia_device_remove(struct device * dev)
492 p_dev = to_pcmcia_dev(dev); 374 p_dev = to_pcmcia_dev(dev);
493 p_drv = to_pcmcia_drv(dev->driver); 375 p_drv = to_pcmcia_drv(dev->driver);
494 376
495 ds_dev_dbg(1, dev, "removing device\n"); 377 dev_dbg(dev, "removing device\n");
496 378
497 /* If we're removing the primary module driving a 379 /* If we're removing the primary module driving a
498 * pseudo multi-function card, we need to unbind 380 * pseudo multi-function card, we need to unbind
@@ -572,7 +454,7 @@ static int pcmcia_device_query(struct pcmcia_device *p_dev)
572 } 454 }
573 if (!pccard_read_tuple(p_dev->socket, p_dev->func, 455 if (!pccard_read_tuple(p_dev->socket, p_dev->func,
574 CISTPL_DEVICE_GEO, devgeo)) { 456 CISTPL_DEVICE_GEO, devgeo)) {
575 ds_dev_dbg(0, &p_dev->dev, 457 dev_dbg(&p_dev->dev,
576 "mem device geometry probably means " 458 "mem device geometry probably means "
577 "FUNCID_MEMORY\n"); 459 "FUNCID_MEMORY\n");
578 p_dev->func_id = CISTPL_FUNCID_MEMORY; 460 p_dev->func_id = CISTPL_FUNCID_MEMORY;
@@ -628,7 +510,7 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
628 510
629 mutex_lock(&device_add_lock); 511 mutex_lock(&device_add_lock);
630 512
631 ds_dbg(3, "adding device to %d, function %d\n", s->sock, function); 513 pr_debug("adding device to %d, function %d\n", s->sock, function);
632 514
633 /* max of 4 devices per card */ 515 /* max of 4 devices per card */
634 if (s->device_count == 4) 516 if (s->device_count == 4)
@@ -654,7 +536,7 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
654 p_dev->devname = kasprintf(GFP_KERNEL, "pcmcia%s", dev_name(&p_dev->dev)); 536 p_dev->devname = kasprintf(GFP_KERNEL, "pcmcia%s", dev_name(&p_dev->dev));
655 if (!p_dev->devname) 537 if (!p_dev->devname)
656 goto err_free; 538 goto err_free;
657 ds_dev_dbg(3, &p_dev->dev, "devname is %s\n", p_dev->devname); 539 dev_dbg(&p_dev->dev, "devname is %s\n", p_dev->devname);
658 540
659 spin_lock_irqsave(&pcmcia_dev_list_lock, flags); 541 spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
660 542
@@ -677,7 +559,7 @@ struct pcmcia_device * pcmcia_device_add(struct pcmcia_socket *s, unsigned int f
677 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags); 559 spin_unlock_irqrestore(&pcmcia_dev_list_lock, flags);
678 560
679 if (!p_dev->function_config) { 561 if (!p_dev->function_config) {
680 ds_dev_dbg(3, &p_dev->dev, "creating config_t\n"); 562 dev_dbg(&p_dev->dev, "creating config_t\n");
681 p_dev->function_config = kzalloc(sizeof(struct config_t), 563 p_dev->function_config = kzalloc(sizeof(struct config_t),
682 GFP_KERNEL); 564 GFP_KERNEL);
683 if (!p_dev->function_config) 565 if (!p_dev->function_config)
@@ -722,20 +604,20 @@ static int pcmcia_card_add(struct pcmcia_socket *s)
722 int ret = 0; 604 int ret = 0;
723 605
724 if (!(s->resource_setup_done)) { 606 if (!(s->resource_setup_done)) {
725 ds_dev_dbg(3, &s->dev, 607 dev_dbg(&s->dev,
726 "no resources available, delaying card_add\n"); 608 "no resources available, delaying card_add\n");
727 return -EAGAIN; /* try again, but later... */ 609 return -EAGAIN; /* try again, but later... */
728 } 610 }
729 611
730 if (pcmcia_validate_mem(s)) { 612 if (pcmcia_validate_mem(s)) {
731 ds_dev_dbg(3, &s->dev, "validating mem resources failed, " 613 dev_dbg(&s->dev, "validating mem resources failed, "
732 "delaying card_add\n"); 614 "delaying card_add\n");
733 return -EAGAIN; /* try again, but later... */ 615 return -EAGAIN; /* try again, but later... */
734 } 616 }
735 617
736 ret = pccard_validate_cis(s, &no_chains); 618 ret = pccard_validate_cis(s, &no_chains);
737 if (ret || !no_chains) { 619 if (ret || !no_chains) {
738 ds_dev_dbg(0, &s->dev, "invalid CIS or invalid resources\n"); 620 dev_dbg(&s->dev, "invalid CIS or invalid resources\n");
739 return -ENODEV; 621 return -ENODEV;
740 } 622 }
741 623
@@ -756,7 +638,7 @@ static void pcmcia_delayed_add_device(struct work_struct *work)
756{ 638{
757 struct pcmcia_socket *s = 639 struct pcmcia_socket *s =
758 container_of(work, struct pcmcia_socket, device_add); 640 container_of(work, struct pcmcia_socket, device_add);
759 ds_dev_dbg(1, &s->dev, "adding additional device to %d\n", s->sock); 641 dev_dbg(&s->dev, "adding additional device to %d\n", s->sock);
760 pcmcia_device_add(s, s->pcmcia_state.mfc_pfc); 642 pcmcia_device_add(s, s->pcmcia_state.mfc_pfc);
761 s->pcmcia_state.device_add_pending = 0; 643 s->pcmcia_state.device_add_pending = 0;
762 s->pcmcia_state.mfc_pfc = 0; 644 s->pcmcia_state.mfc_pfc = 0;
@@ -766,7 +648,7 @@ static int pcmcia_requery(struct device *dev, void * _data)
766{ 648{
767 struct pcmcia_device *p_dev = to_pcmcia_dev(dev); 649 struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
768 if (!p_dev->dev.driver) { 650 if (!p_dev->dev.driver) {
769 ds_dev_dbg(1, dev, "update device information\n"); 651 dev_dbg(dev, "update device information\n");
770 pcmcia_device_query(p_dev); 652 pcmcia_device_query(p_dev);
771 } 653 }
772 654
@@ -780,7 +662,7 @@ static void pcmcia_bus_rescan(struct pcmcia_socket *skt, int new_cis)
780 unsigned long flags; 662 unsigned long flags;
781 663
782 /* must be called with skt_mutex held */ 664 /* must be called with skt_mutex held */
783 ds_dev_dbg(0, &skt->dev, "re-scanning socket %d\n", skt->sock); 665 dev_dbg(&skt->dev, "re-scanning socket %d\n", skt->sock);
784 666
785 spin_lock_irqsave(&pcmcia_dev_list_lock, flags); 667 spin_lock_irqsave(&pcmcia_dev_list_lock, flags);
786 if (list_empty(&skt->devices_list)) 668 if (list_empty(&skt->devices_list))
@@ -835,7 +717,7 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
835 if (!filename) 717 if (!filename)
836 return -EINVAL; 718 return -EINVAL;
837 719
838 ds_dev_dbg(1, &dev->dev, "trying to load CIS file %s\n", filename); 720 dev_dbg(&dev->dev, "trying to load CIS file %s\n", filename);
839 721
840 if (request_firmware(&fw, filename, &dev->dev) == 0) { 722 if (request_firmware(&fw, filename, &dev->dev) == 0) {
841 if (fw->size >= CISTPL_MAX_CIS_SIZE) { 723 if (fw->size >= CISTPL_MAX_CIS_SIZE) {
@@ -953,14 +835,14 @@ static inline int pcmcia_devmatch(struct pcmcia_device *dev,
953 * after it has re-checked that there is no possible module 835 * after it has re-checked that there is no possible module
954 * with a prod_id/manf_id/card_id match. 836 * with a prod_id/manf_id/card_id match.
955 */ 837 */
956 ds_dev_dbg(0, &dev->dev, 838 dev_dbg(&dev->dev,
957 "skipping FUNC_ID match until userspace interaction\n"); 839 "skipping FUNC_ID match until userspace interaction\n");
958 if (!dev->allow_func_id_match) 840 if (!dev->allow_func_id_match)
959 return 0; 841 return 0;
960 } 842 }
961 843
962 if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) { 844 if (did->match_flags & PCMCIA_DEV_ID_MATCH_FAKE_CIS) {
963 ds_dev_dbg(0, &dev->dev, "device needs a fake CIS\n"); 845 dev_dbg(&dev->dev, "device needs a fake CIS\n");
964 if (!dev->socket->fake_cis) 846 if (!dev->socket->fake_cis)
965 pcmcia_load_firmware(dev, did->cisfile); 847 pcmcia_load_firmware(dev, did->cisfile);
966 848
@@ -992,9 +874,9 @@ static int pcmcia_bus_match(struct device * dev, struct device_driver * drv) {
992 /* match dynamic devices first */ 874 /* match dynamic devices first */
993 spin_lock(&p_drv->dynids.lock); 875 spin_lock(&p_drv->dynids.lock);
994 list_for_each_entry(dynid, &p_drv->dynids.list, node) { 876 list_for_each_entry(dynid, &p_drv->dynids.list, node) {
995 ds_dev_dbg(3, dev, "trying to match to %s\n", drv->name); 877 dev_dbg(dev, "trying to match to %s\n", drv->name);
996 if (pcmcia_devmatch(p_dev, &dynid->id)) { 878 if (pcmcia_devmatch(p_dev, &dynid->id)) {
997 ds_dev_dbg(0, dev, "matched to %s\n", drv->name); 879 dev_dbg(dev, "matched to %s\n", drv->name);
998 spin_unlock(&p_drv->dynids.lock); 880 spin_unlock(&p_drv->dynids.lock);
999 return 1; 881 return 1;
1000 } 882 }
@@ -1004,15 +886,15 @@ static int pcmcia_bus_match(struct device * dev, struct device_driver * drv) {
1004#ifdef CONFIG_PCMCIA_IOCTL 886#ifdef CONFIG_PCMCIA_IOCTL
1005 /* matching by cardmgr */ 887 /* matching by cardmgr */
1006 if (p_dev->cardmgr == p_drv) { 888 if (p_dev->cardmgr == p_drv) {
1007 ds_dev_dbg(0, dev, "cardmgr matched to %s\n", drv->name); 889 dev_dbg(dev, "cardmgr matched to %s\n", drv->name);
1008 return 1; 890 return 1;
1009 } 891 }
1010#endif 892#endif
1011 893
1012 while (did && did->match_flags) { 894 while (did && did->match_flags) {
1013 ds_dev_dbg(3, dev, "trying to match to %s\n", drv->name); 895 dev_dbg(dev, "trying to match to %s\n", drv->name);
1014 if (pcmcia_devmatch(p_dev, did)) { 896 if (pcmcia_devmatch(p_dev, did)) {
1015 ds_dev_dbg(0, dev, "matched to %s\n", drv->name); 897 dev_dbg(dev, "matched to %s\n", drv->name);
1016 return 1; 898 return 1;
1017 } 899 }
1018 did++; 900 did++;
@@ -1218,7 +1100,7 @@ static int pcmcia_dev_suspend(struct device * dev, pm_message_t state)
1218 if (p_dev->suspended) 1100 if (p_dev->suspended)
1219 return 0; 1101 return 0;
1220 1102
1221 ds_dev_dbg(2, dev, "suspending\n"); 1103 dev_dbg(dev, "suspending\n");
1222 1104
1223 if (dev->driver) 1105 if (dev->driver)
1224 p_drv = to_pcmcia_drv(dev->driver); 1106 p_drv = to_pcmcia_drv(dev->driver);
@@ -1238,7 +1120,7 @@ static int pcmcia_dev_suspend(struct device * dev, pm_message_t state)
1238 } 1120 }
1239 1121
1240 if (p_dev->device_no == p_dev->func) { 1122 if (p_dev->device_no == p_dev->func) {
1241 ds_dev_dbg(2, dev, "releasing configuration\n"); 1123 dev_dbg(dev, "releasing configuration\n");
1242 pcmcia_release_configuration(p_dev); 1124 pcmcia_release_configuration(p_dev);
1243 } 1125 }
1244 1126
@@ -1258,7 +1140,7 @@ static int pcmcia_dev_resume(struct device * dev)
1258 if (!p_dev->suspended) 1140 if (!p_dev->suspended)
1259 return 0; 1141 return 0;
1260 1142
1261 ds_dev_dbg(2, dev, "resuming\n"); 1143 dev_dbg(dev, "resuming\n");
1262 1144
1263 if (dev->driver) 1145 if (dev->driver)
1264 p_drv = to_pcmcia_drv(dev->driver); 1146 p_drv = to_pcmcia_drv(dev->driver);
@@ -1267,7 +1149,7 @@ static int pcmcia_dev_resume(struct device * dev)
1267 goto out; 1149 goto out;
1268 1150
1269 if (p_dev->device_no == p_dev->func) { 1151 if (p_dev->device_no == p_dev->func) {
1270 ds_dev_dbg(2, dev, "requesting configuration\n"); 1152 dev_dbg(dev, "requesting configuration\n");
1271 ret = pcmcia_request_configuration(p_dev, &p_dev->conf); 1153 ret = pcmcia_request_configuration(p_dev, &p_dev->conf);
1272 if (ret) 1154 if (ret)
1273 goto out; 1155 goto out;
@@ -1309,14 +1191,14 @@ static int pcmcia_bus_resume_callback(struct device *dev, void * _data)
1309 1191
1310static int pcmcia_bus_resume(struct pcmcia_socket *skt) 1192static int pcmcia_bus_resume(struct pcmcia_socket *skt)
1311{ 1193{
1312 ds_dev_dbg(2, &skt->dev, "resuming socket %d\n", skt->sock); 1194 dev_dbg(&skt->dev, "resuming socket %d\n", skt->sock);
1313 bus_for_each_dev(&pcmcia_bus_type, NULL, skt, pcmcia_bus_resume_callback); 1195 bus_for_each_dev(&pcmcia_bus_type, NULL, skt, pcmcia_bus_resume_callback);
1314 return 0; 1196 return 0;
1315} 1197}
1316 1198
1317static int pcmcia_bus_suspend(struct pcmcia_socket *skt) 1199static int pcmcia_bus_suspend(struct pcmcia_socket *skt)
1318{ 1200{
1319 ds_dev_dbg(2, &skt->dev, "suspending socket %d\n", skt->sock); 1201 dev_dbg(&skt->dev, "suspending socket %d\n", skt->sock);
1320 if (bus_for_each_dev(&pcmcia_bus_type, NULL, skt, 1202 if (bus_for_each_dev(&pcmcia_bus_type, NULL, skt,
1321 pcmcia_bus_suspend_callback)) { 1203 pcmcia_bus_suspend_callback)) {
1322 pcmcia_bus_resume(skt); 1204 pcmcia_bus_resume(skt);
@@ -1348,7 +1230,7 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
1348 return -ENODEV; 1230 return -ENODEV;
1349 } 1231 }
1350 1232
1351 ds_dev_dbg(1, &skt->dev, "ds_event(0x%06x, %d, 0x%p)\n", 1233 dev_dbg(&skt->dev, "ds_event(0x%06x, %d, 0x%p)\n",
1352 event, priority, skt); 1234 event, priority, skt);
1353 1235
1354 switch (event) { 1236 switch (event) {
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index a4aacb830b80..c13fd9360511 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -63,21 +63,6 @@
63#include "vg468.h" 63#include "vg468.h"
64#include "ricoh.h" 64#include "ricoh.h"
65 65
66#ifdef CONFIG_PCMCIA_DEBUG
67static const char version[] =
68"i82365.c 1.265 1999/11/10 18:36:21 (David Hinds)";
69
70static int pc_debug;
71
72module_param(pc_debug, int, 0644);
73
74#define debug(lvl, fmt, arg...) do { \
75 if (pc_debug > (lvl)) \
76 printk(KERN_DEBUG "i82365: " fmt , ## arg); \
77} while (0)
78#else
79#define debug(lvl, fmt, arg...) do { } while (0)
80#endif
81 66
82static irqreturn_t i365_count_irq(int, void *); 67static irqreturn_t i365_count_irq(int, void *);
83static inline int _check_irq(int irq, int flags) 68static inline int _check_irq(int irq, int flags)
@@ -501,13 +486,13 @@ static irqreturn_t i365_count_irq(int irq, void *dev)
501{ 486{
502 i365_get(irq_sock, I365_CSC); 487 i365_get(irq_sock, I365_CSC);
503 irq_hits++; 488 irq_hits++;
504 debug(2, "-> hit on irq %d\n", irq); 489 pr_debug("i82365: -> hit on irq %d\n", irq);
505 return IRQ_HANDLED; 490 return IRQ_HANDLED;
506} 491}
507 492
508static u_int __init test_irq(u_short sock, int irq) 493static u_int __init test_irq(u_short sock, int irq)
509{ 494{
510 debug(2, " testing ISA irq %d\n", irq); 495 pr_debug("i82365: testing ISA irq %d\n", irq);
511 if (request_irq(irq, i365_count_irq, IRQF_PROBE_SHARED, "scan", 496 if (request_irq(irq, i365_count_irq, IRQF_PROBE_SHARED, "scan",
512 i365_count_irq) != 0) 497 i365_count_irq) != 0)
513 return 1; 498 return 1;
@@ -515,7 +500,7 @@ static u_int __init test_irq(u_short sock, int irq)
515 msleep(10); 500 msleep(10);
516 if (irq_hits) { 501 if (irq_hits) {
517 free_irq(irq, i365_count_irq); 502 free_irq(irq, i365_count_irq);
518 debug(2, " spurious hit!\n"); 503 pr_debug("i82365: spurious hit!\n");
519 return 1; 504 return 1;
520 } 505 }
521 506
@@ -528,7 +513,7 @@ static u_int __init test_irq(u_short sock, int irq)
528 513
529 /* mask all interrupts */ 514 /* mask all interrupts */
530 i365_set(sock, I365_CSCINT, 0); 515 i365_set(sock, I365_CSCINT, 0);
531 debug(2, " hits = %d\n", irq_hits); 516 pr_debug("i82365: hits = %d\n", irq_hits);
532 517
533 return (irq_hits != 1); 518 return (irq_hits != 1);
534} 519}
@@ -854,7 +839,7 @@ static irqreturn_t pcic_interrupt(int irq, void *dev)
854 u_long flags = 0; 839 u_long flags = 0;
855 int handled = 0; 840 int handled = 0;
856 841
857 debug(4, "pcic_interrupt(%d)\n", irq); 842 pr_debug("pcic_interrupt(%d)\n", irq);
858 843
859 for (j = 0; j < 20; j++) { 844 for (j = 0; j < 20; j++) {
860 active = 0; 845 active = 0;
@@ -878,7 +863,7 @@ static irqreturn_t pcic_interrupt(int irq, void *dev)
878 events |= (csc & I365_CSC_READY) ? SS_READY : 0; 863 events |= (csc & I365_CSC_READY) ? SS_READY : 0;
879 } 864 }
880 ISA_UNLOCK(i, flags); 865 ISA_UNLOCK(i, flags);
881 debug(2, "socket %d event 0x%02x\n", i, events); 866 pr_debug("socket %d event 0x%02x\n", i, events);
882 867
883 if (events) 868 if (events)
884 pcmcia_parse_events(&socket[i].socket, events); 869 pcmcia_parse_events(&socket[i].socket, events);
@@ -890,7 +875,7 @@ static irqreturn_t pcic_interrupt(int irq, void *dev)
890 if (j == 20) 875 if (j == 20)
891 printk(KERN_NOTICE "i82365: infinite loop in interrupt handler\n"); 876 printk(KERN_NOTICE "i82365: infinite loop in interrupt handler\n");
892 877
893 debug(4, "interrupt done\n"); 878 pr_debug("pcic_interrupt done\n");
894 return IRQ_RETVAL(handled); 879 return IRQ_RETVAL(handled);
895} /* pcic_interrupt */ 880} /* pcic_interrupt */
896 881
@@ -932,7 +917,7 @@ static int i365_get_status(u_short sock, u_int *value)
932 } 917 }
933 } 918 }
934 919
935 debug(1, "GetStatus(%d) = %#4.4x\n", sock, *value); 920 pr_debug("GetStatus(%d) = %#4.4x\n", sock, *value);
936 return 0; 921 return 0;
937} /* i365_get_status */ 922} /* i365_get_status */
938 923
@@ -943,7 +928,7 @@ static int i365_set_socket(u_short sock, socket_state_t *state)
943 struct i82365_socket *t = &socket[sock]; 928 struct i82365_socket *t = &socket[sock];
944 u_char reg; 929 u_char reg;
945 930
946 debug(1, "SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " 931 pr_debug("SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
947 "io_irq %d, csc_mask %#2.2x)\n", sock, state->flags, 932 "io_irq %d, csc_mask %#2.2x)\n", sock, state->flags,
948 state->Vcc, state->Vpp, state->io_irq, state->csc_mask); 933 state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
949 934
@@ -1052,7 +1037,7 @@ static int i365_set_io_map(u_short sock, struct pccard_io_map *io)
1052{ 1037{
1053 u_char map, ioctl; 1038 u_char map, ioctl;
1054 1039
1055 debug(1, "SetIOMap(%d, %d, %#2.2x, %d ns, " 1040 pr_debug("SetIOMap(%d, %d, %#2.2x, %d ns, "
1056 "%#llx-%#llx)\n", sock, io->map, io->flags, io->speed, 1041 "%#llx-%#llx)\n", sock, io->map, io->flags, io->speed,
1057 (unsigned long long)io->start, (unsigned long long)io->stop); 1042 (unsigned long long)io->start, (unsigned long long)io->stop);
1058 map = io->map; 1043 map = io->map;
@@ -1082,7 +1067,7 @@ static int i365_set_mem_map(u_short sock, struct pccard_mem_map *mem)
1082 u_short base, i; 1067 u_short base, i;
1083 u_char map; 1068 u_char map;
1084 1069
1085 debug(1, "SetMemMap(%d, %d, %#2.2x, %d ns, %#llx-%#llx, " 1070 pr_debug("SetMemMap(%d, %d, %#2.2x, %d ns, %#llx-%#llx, "
1086 "%#x)\n", sock, mem->map, mem->flags, mem->speed, 1071 "%#x)\n", sock, mem->map, mem->flags, mem->speed,
1087 (unsigned long long)mem->res->start, 1072 (unsigned long long)mem->res->start,
1088 (unsigned long long)mem->res->end, mem->card_start); 1073 (unsigned long long)mem->res->end, mem->card_start);
diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c
index 7dfbee1dcd76..26a621c9e2fc 100644
--- a/drivers/pcmcia/m32r_cfc.c
+++ b/drivers/pcmcia/m32r_cfc.c
@@ -38,17 +38,6 @@
38 38
39#include "m32r_cfc.h" 39#include "m32r_cfc.h"
40 40
41#ifdef CONFIG_PCMCIA_DEBUG
42static int m32r_cfc_debug;
43module_param(m32r_cfc_debug, int, 0644);
44#define debug(lvl, fmt, arg...) do { \
45 if (m32r_cfc_debug > (lvl)) \
46 printk(KERN_DEBUG "m32r_cfc: " fmt , ## arg); \
47} while (0)
48#else
49#define debug(n, args...) do { } while (0)
50#endif
51
52/* Poll status interval -- 0 means default to interrupt */ 41/* Poll status interval -- 0 means default to interrupt */
53static int poll_interval = 0; 42static int poll_interval = 0;
54 43
@@ -123,7 +112,7 @@ void pcc_ioread_byte(int sock, unsigned long port, void *buf, size_t size,
123 unsigned char *bp = (unsigned char *)buf; 112 unsigned char *bp = (unsigned char *)buf;
124 unsigned long flags; 113 unsigned long flags;
125 114
126 debug(3, "m32r_cfc: pcc_ioread_byte: sock=%d, port=%#lx, buf=%p, " 115 pr_debug("m32r_cfc: pcc_ioread_byte: sock=%d, port=%#lx, buf=%p, "
127 "size=%u, nmemb=%d, flag=%d\n", 116 "size=%u, nmemb=%d, flag=%d\n",
128 sock, port, buf, size, nmemb, flag); 117 sock, port, buf, size, nmemb, flag);
129 118
@@ -132,7 +121,7 @@ void pcc_ioread_byte(int sock, unsigned long port, void *buf, size_t size,
132 printk("m32r_cfc:ioread_byte null port :%#lx\n",port); 121 printk("m32r_cfc:ioread_byte null port :%#lx\n",port);
133 return; 122 return;
134 } 123 }
135 debug(3, "m32r_cfc: pcc_ioread_byte: addr=%#lx\n", addr); 124 pr_debug("m32r_cfc: pcc_ioread_byte: addr=%#lx\n", addr);
136 125
137 spin_lock_irqsave(&pcc_lock, flags); 126 spin_lock_irqsave(&pcc_lock, flags);
138 /* read Byte */ 127 /* read Byte */
@@ -148,7 +137,7 @@ void pcc_ioread_word(int sock, unsigned long port, void *buf, size_t size,
148 unsigned short *bp = (unsigned short *)buf; 137 unsigned short *bp = (unsigned short *)buf;
149 unsigned long flags; 138 unsigned long flags;
150 139
151 debug(3, "m32r_cfc: pcc_ioread_word: sock=%d, port=%#lx, " 140 pr_debug("m32r_cfc: pcc_ioread_word: sock=%d, port=%#lx, "
152 "buf=%p, size=%u, nmemb=%d, flag=%d\n", 141 "buf=%p, size=%u, nmemb=%d, flag=%d\n",
153 sock, port, buf, size, nmemb, flag); 142 sock, port, buf, size, nmemb, flag);
154 143
@@ -163,7 +152,7 @@ void pcc_ioread_word(int sock, unsigned long port, void *buf, size_t size,
163 printk("m32r_cfc:ioread_word null port :%#lx\n",port); 152 printk("m32r_cfc:ioread_word null port :%#lx\n",port);
164 return; 153 return;
165 } 154 }
166 debug(3, "m32r_cfc: pcc_ioread_word: addr=%#lx\n", addr); 155 pr_debug("m32r_cfc: pcc_ioread_word: addr=%#lx\n", addr);
167 156
168 spin_lock_irqsave(&pcc_lock, flags); 157 spin_lock_irqsave(&pcc_lock, flags);
169 /* read Word */ 158 /* read Word */
@@ -179,7 +168,7 @@ void pcc_iowrite_byte(int sock, unsigned long port, void *buf, size_t size,
179 unsigned char *bp = (unsigned char *)buf; 168 unsigned char *bp = (unsigned char *)buf;
180 unsigned long flags; 169 unsigned long flags;
181 170
182 debug(3, "m32r_cfc: pcc_iowrite_byte: sock=%d, port=%#lx, " 171 pr_debug("m32r_cfc: pcc_iowrite_byte: sock=%d, port=%#lx, "
183 "buf=%p, size=%u, nmemb=%d, flag=%d\n", 172 "buf=%p, size=%u, nmemb=%d, flag=%d\n",
184 sock, port, buf, size, nmemb, flag); 173 sock, port, buf, size, nmemb, flag);
185 174
@@ -189,7 +178,7 @@ void pcc_iowrite_byte(int sock, unsigned long port, void *buf, size_t size,
189 printk("m32r_cfc:iowrite_byte null port:%#lx\n",port); 178 printk("m32r_cfc:iowrite_byte null port:%#lx\n",port);
190 return; 179 return;
191 } 180 }
192 debug(3, "m32r_cfc: pcc_iowrite_byte: addr=%#lx\n", addr); 181 pr_debug("m32r_cfc: pcc_iowrite_byte: addr=%#lx\n", addr);
193 182
194 spin_lock_irqsave(&pcc_lock, flags); 183 spin_lock_irqsave(&pcc_lock, flags);
195 while (nmemb--) 184 while (nmemb--)
@@ -204,7 +193,7 @@ void pcc_iowrite_word(int sock, unsigned long port, void *buf, size_t size,
204 unsigned short *bp = (unsigned short *)buf; 193 unsigned short *bp = (unsigned short *)buf;
205 unsigned long flags; 194 unsigned long flags;
206 195
207 debug(3, "m32r_cfc: pcc_iowrite_word: sock=%d, port=%#lx, " 196 pr_debug("m32r_cfc: pcc_iowrite_word: sock=%d, port=%#lx, "
208 "buf=%p, size=%u, nmemb=%d, flag=%d\n", 197 "buf=%p, size=%u, nmemb=%d, flag=%d\n",
209 sock, port, buf, size, nmemb, flag); 198 sock, port, buf, size, nmemb, flag);
210 199
@@ -226,7 +215,7 @@ void pcc_iowrite_word(int sock, unsigned long port, void *buf, size_t size,
226 return; 215 return;
227 } 216 }
228#endif 217#endif
229 debug(3, "m32r_cfc: pcc_iowrite_word: addr=%#lx\n", addr); 218 pr_debug("m32r_cfc: pcc_iowrite_word: addr=%#lx\n", addr);
230 219
231 spin_lock_irqsave(&pcc_lock, flags); 220 spin_lock_irqsave(&pcc_lock, flags);
232 while (nmemb--) 221 while (nmemb--)
@@ -262,7 +251,7 @@ static struct timer_list poll_timer;
262static unsigned int pcc_get(u_short sock, unsigned int reg) 251static unsigned int pcc_get(u_short sock, unsigned int reg)
263{ 252{
264 unsigned int val = inw(reg); 253 unsigned int val = inw(reg);
265 debug(3, "m32r_cfc: pcc_get: reg(0x%08x)=0x%04x\n", reg, val); 254 pr_debug("m32r_cfc: pcc_get: reg(0x%08x)=0x%04x\n", reg, val);
266 return val; 255 return val;
267} 256}
268 257
@@ -270,7 +259,7 @@ static unsigned int pcc_get(u_short sock, unsigned int reg)
270static void pcc_set(u_short sock, unsigned int reg, unsigned int data) 259static void pcc_set(u_short sock, unsigned int reg, unsigned int data)
271{ 260{
272 outw(data, reg); 261 outw(data, reg);
273 debug(3, "m32r_cfc: pcc_set: reg(0x%08x)=0x%04x\n", reg, data); 262 pr_debug("m32r_cfc: pcc_set: reg(0x%08x)=0x%04x\n", reg, data);
274} 263}
275 264
276/*====================================================================== 265/*======================================================================
@@ -286,14 +275,14 @@ static int __init is_alive(u_short sock)
286{ 275{
287 unsigned int stat; 276 unsigned int stat;
288 277
289 debug(3, "m32r_cfc: is_alive:\n"); 278 pr_debug("m32r_cfc: is_alive:\n");
290 279
291 printk("CF: "); 280 printk("CF: ");
292 stat = pcc_get(sock, (unsigned int)PLD_CFSTS); 281 stat = pcc_get(sock, (unsigned int)PLD_CFSTS);
293 if (!stat) 282 if (!stat)
294 printk("No "); 283 printk("No ");
295 printk("Card is detected at socket %d : stat = 0x%08x\n", sock, stat); 284 printk("Card is detected at socket %d : stat = 0x%08x\n", sock, stat);
296 debug(3, "m32r_cfc: is_alive: sock stat is 0x%04x\n", stat); 285 pr_debug("m32r_cfc: is_alive: sock stat is 0x%04x\n", stat);
297 286
298 return 0; 287 return 0;
299} 288}
@@ -303,7 +292,7 @@ static void add_pcc_socket(ulong base, int irq, ulong mapaddr,
303{ 292{
304 pcc_socket_t *t = &socket[pcc_sockets]; 293 pcc_socket_t *t = &socket[pcc_sockets];
305 294
306 debug(3, "m32r_cfc: add_pcc_socket: base=%#lx, irq=%d, " 295 pr_debug("m32r_cfc: add_pcc_socket: base=%#lx, irq=%d, "
307 "mapaddr=%#lx, ioaddr=%08x\n", 296 "mapaddr=%#lx, ioaddr=%08x\n",
308 base, irq, mapaddr, ioaddr); 297 base, irq, mapaddr, ioaddr);
309 298
@@ -358,7 +347,7 @@ static void add_pcc_socket(ulong base, int irq, ulong mapaddr,
358 /* eject interrupt */ 347 /* eject interrupt */
359 request_irq(irq+1, pcc_interrupt, 0, "m32r_cfc", pcc_interrupt); 348 request_irq(irq+1, pcc_interrupt, 0, "m32r_cfc", pcc_interrupt);
360#endif 349#endif
361 debug(3, "m32r_cfc: enable CFMSK, RDYSEL\n"); 350 pr_debug("m32r_cfc: enable CFMSK, RDYSEL\n");
362 pcc_set(pcc_sockets, (unsigned int)PLD_CFIMASK, 0x01); 351 pcc_set(pcc_sockets, (unsigned int)PLD_CFIMASK, 0x01);
363#endif /* CONFIG_PLAT_USRV */ 352#endif /* CONFIG_PLAT_USRV */
364#if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_USRV) || defined(CONFIG_PLAT_OPSPUT) 353#if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_USRV) || defined(CONFIG_PLAT_OPSPUT)
@@ -378,26 +367,26 @@ static irqreturn_t pcc_interrupt(int irq, void *dev)
378 u_int events = 0; 367 u_int events = 0;
379 int handled = 0; 368 int handled = 0;
380 369
381 debug(3, "m32r_cfc: pcc_interrupt: irq=%d, dev=%p\n", irq, dev); 370 pr_debug("m32r_cfc: pcc_interrupt: irq=%d, dev=%p\n", irq, dev);
382 for (i = 0; i < pcc_sockets; i++) { 371 for (i = 0; i < pcc_sockets; i++) {
383 if (socket[i].cs_irq1 != irq && socket[i].cs_irq2 != irq) 372 if (socket[i].cs_irq1 != irq && socket[i].cs_irq2 != irq)
384 continue; 373 continue;
385 374
386 handled = 1; 375 handled = 1;
387 debug(3, "m32r_cfc: pcc_interrupt: socket %d irq 0x%02x ", 376 pr_debug("m32r_cfc: pcc_interrupt: socket %d irq 0x%02x ",
388 i, irq); 377 i, irq);
389 events |= SS_DETECT; /* insert or eject */ 378 events |= SS_DETECT; /* insert or eject */
390 if (events) 379 if (events)
391 pcmcia_parse_events(&socket[i].socket, events); 380 pcmcia_parse_events(&socket[i].socket, events);
392 } 381 }
393 debug(3, "m32r_cfc: pcc_interrupt: done\n"); 382 pr_debug("m32r_cfc: pcc_interrupt: done\n");
394 383
395 return IRQ_RETVAL(handled); 384 return IRQ_RETVAL(handled);
396} /* pcc_interrupt */ 385} /* pcc_interrupt */
397 386
398static void pcc_interrupt_wrapper(u_long data) 387static void pcc_interrupt_wrapper(u_long data)
399{ 388{
400 debug(3, "m32r_cfc: pcc_interrupt_wrapper:\n"); 389 pr_debug("m32r_cfc: pcc_interrupt_wrapper:\n");
401 pcc_interrupt(0, NULL); 390 pcc_interrupt(0, NULL);
402 init_timer(&poll_timer); 391 init_timer(&poll_timer);
403 poll_timer.expires = jiffies + poll_interval; 392 poll_timer.expires = jiffies + poll_interval;
@@ -410,17 +399,17 @@ static int _pcc_get_status(u_short sock, u_int *value)
410{ 399{
411 u_int status; 400 u_int status;
412 401
413 debug(3, "m32r_cfc: _pcc_get_status:\n"); 402 pr_debug("m32r_cfc: _pcc_get_status:\n");
414 status = pcc_get(sock, (unsigned int)PLD_CFSTS); 403 status = pcc_get(sock, (unsigned int)PLD_CFSTS);
415 *value = (status) ? SS_DETECT : 0; 404 *value = (status) ? SS_DETECT : 0;
416 debug(3, "m32r_cfc: _pcc_get_status: status=0x%08x\n", status); 405 pr_debug("m32r_cfc: _pcc_get_status: status=0x%08x\n", status);
417 406
418#if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_USRV) || defined(CONFIG_PLAT_OPSPUT) 407#if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_USRV) || defined(CONFIG_PLAT_OPSPUT)
419 if ( status ) { 408 if ( status ) {
420 /* enable CF power */ 409 /* enable CF power */
421 status = inw((unsigned int)PLD_CPCR); 410 status = inw((unsigned int)PLD_CPCR);
422 if (!(status & PLD_CPCR_CF)) { 411 if (!(status & PLD_CPCR_CF)) {
423 debug(3, "m32r_cfc: _pcc_get_status: " 412 pr_debug("m32r_cfc: _pcc_get_status: "
424 "power on (CPCR=0x%08x)\n", status); 413 "power on (CPCR=0x%08x)\n", status);
425 status |= PLD_CPCR_CF; 414 status |= PLD_CPCR_CF;
426 outw(status, (unsigned int)PLD_CPCR); 415 outw(status, (unsigned int)PLD_CPCR);
@@ -439,7 +428,7 @@ static int _pcc_get_status(u_short sock, u_int *value)
439 status &= ~PLD_CPCR_CF; 428 status &= ~PLD_CPCR_CF;
440 outw(status, (unsigned int)PLD_CPCR); 429 outw(status, (unsigned int)PLD_CPCR);
441 udelay(100); 430 udelay(100);
442 debug(3, "m32r_cfc: _pcc_get_status: " 431 pr_debug("m32r_cfc: _pcc_get_status: "
443 "power off (CPCR=0x%08x)\n", status); 432 "power off (CPCR=0x%08x)\n", status);
444 } 433 }
445#elif defined(CONFIG_PLAT_MAPPI2) || defined(CONFIG_PLAT_MAPPI3) 434#elif defined(CONFIG_PLAT_MAPPI2) || defined(CONFIG_PLAT_MAPPI3)
@@ -465,13 +454,13 @@ static int _pcc_get_status(u_short sock, u_int *value)
465 /* disable CF power */ 454 /* disable CF power */
466 pcc_set(sock, (unsigned int)PLD_CPCR, 0); 455 pcc_set(sock, (unsigned int)PLD_CPCR, 0);
467 udelay(100); 456 udelay(100);
468 debug(3, "m32r_cfc: _pcc_get_status: " 457 pr_debug("m32r_cfc: _pcc_get_status: "
469 "power off (CPCR=0x%08x)\n", status); 458 "power off (CPCR=0x%08x)\n", status);
470 } 459 }
471#else 460#else
472#error no platform configuration 461#error no platform configuration
473#endif 462#endif
474 debug(3, "m32r_cfc: _pcc_get_status: GetStatus(%d) = %#4.4x\n", 463 pr_debug("m32r_cfc: _pcc_get_status: GetStatus(%d) = %#4.4x\n",
475 sock, *value); 464 sock, *value);
476 return 0; 465 return 0;
477} /* _get_status */ 466} /* _get_status */
@@ -480,7 +469,7 @@ static int _pcc_get_status(u_short sock, u_int *value)
480 469
481static int _pcc_set_socket(u_short sock, socket_state_t *state) 470static int _pcc_set_socket(u_short sock, socket_state_t *state)
482{ 471{
483 debug(3, "m32r_cfc: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " 472 pr_debug("m32r_cfc: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
484 "io_irq %d, csc_mask %#2.2x)\n", sock, state->flags, 473 "io_irq %d, csc_mask %#2.2x)\n", sock, state->flags,
485 state->Vcc, state->Vpp, state->io_irq, state->csc_mask); 474 state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
486 475
@@ -492,41 +481,39 @@ static int _pcc_set_socket(u_short sock, socket_state_t *state)
492 } 481 }
493#endif 482#endif
494 if (state->flags & SS_RESET) { 483 if (state->flags & SS_RESET) {
495 debug(3, ":RESET\n"); 484 pr_debug(":RESET\n");
496 pcc_set(sock,(unsigned int)PLD_CFRSTCR,0x101); 485 pcc_set(sock,(unsigned int)PLD_CFRSTCR,0x101);
497 }else{ 486 }else{
498 pcc_set(sock,(unsigned int)PLD_CFRSTCR,0x100); 487 pcc_set(sock,(unsigned int)PLD_CFRSTCR,0x100);
499 } 488 }
500 if (state->flags & SS_OUTPUT_ENA){ 489 if (state->flags & SS_OUTPUT_ENA){
501 debug(3, ":OUTPUT_ENA\n"); 490 pr_debug(":OUTPUT_ENA\n");
502 /* bit clear */ 491 /* bit clear */
503 pcc_set(sock,(unsigned int)PLD_CFBUFCR,0); 492 pcc_set(sock,(unsigned int)PLD_CFBUFCR,0);
504 } else { 493 } else {
505 pcc_set(sock,(unsigned int)PLD_CFBUFCR,1); 494 pcc_set(sock,(unsigned int)PLD_CFBUFCR,1);
506 } 495 }
507 496
508#ifdef CONFIG_PCMCIA_DEBUG
509 if(state->flags & SS_IOCARD){ 497 if(state->flags & SS_IOCARD){
510 debug(3, ":IOCARD"); 498 pr_debug(":IOCARD");
511 } 499 }
512 if (state->flags & SS_PWR_AUTO) { 500 if (state->flags & SS_PWR_AUTO) {
513 debug(3, ":PWR_AUTO"); 501 pr_debug(":PWR_AUTO");
514 } 502 }
515 if (state->csc_mask & SS_DETECT) 503 if (state->csc_mask & SS_DETECT)
516 debug(3, ":csc-SS_DETECT"); 504 pr_debug(":csc-SS_DETECT");
517 if (state->flags & SS_IOCARD) { 505 if (state->flags & SS_IOCARD) {
518 if (state->csc_mask & SS_STSCHG) 506 if (state->csc_mask & SS_STSCHG)
519 debug(3, ":STSCHG"); 507 pr_debug(":STSCHG");
520 } else { 508 } else {
521 if (state->csc_mask & SS_BATDEAD) 509 if (state->csc_mask & SS_BATDEAD)
522 debug(3, ":BATDEAD"); 510 pr_debug(":BATDEAD");
523 if (state->csc_mask & SS_BATWARN) 511 if (state->csc_mask & SS_BATWARN)
524 debug(3, ":BATWARN"); 512 pr_debug(":BATWARN");
525 if (state->csc_mask & SS_READY) 513 if (state->csc_mask & SS_READY)
526 debug(3, ":READY"); 514 pr_debug(":READY");
527 } 515 }
528 debug(3, "\n"); 516 pr_debug("\n");
529#endif
530 return 0; 517 return 0;
531} /* _set_socket */ 518} /* _set_socket */
532 519
@@ -536,7 +523,7 @@ static int _pcc_set_io_map(u_short sock, struct pccard_io_map *io)
536{ 523{
537 u_char map; 524 u_char map;
538 525
539 debug(3, "m32r_cfc: SetIOMap(%d, %d, %#2.2x, %d ns, " 526 pr_debug("m32r_cfc: SetIOMap(%d, %d, %#2.2x, %d ns, "
540 "%#llx-%#llx)\n", sock, io->map, io->flags, 527 "%#llx-%#llx)\n", sock, io->map, io->flags,
541 io->speed, (unsigned long long)io->start, 528 io->speed, (unsigned long long)io->start,
542 (unsigned long long)io->stop); 529 (unsigned long long)io->stop);
@@ -554,7 +541,7 @@ static int _pcc_set_mem_map(u_short sock, struct pccard_mem_map *mem)
554 u_long addr; 541 u_long addr;
555 pcc_socket_t *t = &socket[sock]; 542 pcc_socket_t *t = &socket[sock];
556 543
557 debug(3, "m32r_cfc: SetMemMap(%d, %d, %#2.2x, %d ns, " 544 pr_debug("m32r_cfc: SetMemMap(%d, %d, %#2.2x, %d ns, "
558 "%#llx, %#x)\n", sock, map, mem->flags, 545 "%#llx, %#x)\n", sock, map, mem->flags,
559 mem->speed, (unsigned long long)mem->static_start, 546 mem->speed, (unsigned long long)mem->static_start,
560 mem->card_start); 547 mem->card_start);
@@ -640,11 +627,11 @@ static int pcc_get_status(struct pcmcia_socket *s, u_int *value)
640 unsigned int sock = container_of(s, struct pcc_socket, socket)->number; 627 unsigned int sock = container_of(s, struct pcc_socket, socket)->number;
641 628
642 if (socket[sock].flags & IS_ALIVE) { 629 if (socket[sock].flags & IS_ALIVE) {
643 debug(3, "m32r_cfc: pcc_get_status: sock(%d) -EINVAL\n", sock); 630 dev_dbg(&s->dev, "pcc_get_status: sock(%d) -EINVAL\n", sock);
644 *value = 0; 631 *value = 0;
645 return -EINVAL; 632 return -EINVAL;
646 } 633 }
647 debug(3, "m32r_cfc: pcc_get_status: sock(%d)\n", sock); 634 dev_dbg(&s->dev, "pcc_get_status: sock(%d)\n", sock);
648 LOCKED(_pcc_get_status(sock, value)); 635 LOCKED(_pcc_get_status(sock, value));
649} 636}
650 637
@@ -653,10 +640,10 @@ static int pcc_set_socket(struct pcmcia_socket *s, socket_state_t *state)
653 unsigned int sock = container_of(s, struct pcc_socket, socket)->number; 640 unsigned int sock = container_of(s, struct pcc_socket, socket)->number;
654 641
655 if (socket[sock].flags & IS_ALIVE) { 642 if (socket[sock].flags & IS_ALIVE) {
656 debug(3, "m32r_cfc: pcc_set_socket: sock(%d) -EINVAL\n", sock); 643 dev_dbg(&s->dev, "pcc_set_socket: sock(%d) -EINVAL\n", sock);
657 return -EINVAL; 644 return -EINVAL;
658 } 645 }
659 debug(3, "m32r_cfc: pcc_set_socket: sock(%d)\n", sock); 646 dev_dbg(&s->dev, "pcc_set_socket: sock(%d)\n", sock);
660 LOCKED(_pcc_set_socket(sock, state)); 647 LOCKED(_pcc_set_socket(sock, state));
661} 648}
662 649
@@ -665,10 +652,10 @@ static int pcc_set_io_map(struct pcmcia_socket *s, struct pccard_io_map *io)
665 unsigned int sock = container_of(s, struct pcc_socket, socket)->number; 652 unsigned int sock = container_of(s, struct pcc_socket, socket)->number;
666 653
667 if (socket[sock].flags & IS_ALIVE) { 654 if (socket[sock].flags & IS_ALIVE) {
668 debug(3, "m32r_cfc: pcc_set_io_map: sock(%d) -EINVAL\n", sock); 655 dev_dbg(&s->dev, "pcc_set_io_map: sock(%d) -EINVAL\n", sock);
669 return -EINVAL; 656 return -EINVAL;
670 } 657 }
671 debug(3, "m32r_cfc: pcc_set_io_map: sock(%d)\n", sock); 658 dev_dbg(&s->dev, "pcc_set_io_map: sock(%d)\n", sock);
672 LOCKED(_pcc_set_io_map(sock, io)); 659 LOCKED(_pcc_set_io_map(sock, io));
673} 660}
674 661
@@ -677,16 +664,16 @@ static int pcc_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *mem)
677 unsigned int sock = container_of(s, struct pcc_socket, socket)->number; 664 unsigned int sock = container_of(s, struct pcc_socket, socket)->number;
678 665
679 if (socket[sock].flags & IS_ALIVE) { 666 if (socket[sock].flags & IS_ALIVE) {
680 debug(3, "m32r_cfc: pcc_set_mem_map: sock(%d) -EINVAL\n", sock); 667 dev_dbg(&s->dev, "pcc_set_mem_map: sock(%d) -EINVAL\n", sock);
681 return -EINVAL; 668 return -EINVAL;
682 } 669 }
683 debug(3, "m32r_cfc: pcc_set_mem_map: sock(%d)\n", sock); 670 dev_dbg(&s->dev, "pcc_set_mem_map: sock(%d)\n", sock);
684 LOCKED(_pcc_set_mem_map(sock, mem)); 671 LOCKED(_pcc_set_mem_map(sock, mem));
685} 672}
686 673
687static int pcc_init(struct pcmcia_socket *s) 674static int pcc_init(struct pcmcia_socket *s)
688{ 675{
689 debug(3, "m32r_cfc: pcc_init()\n"); 676 dev_dbg(&s->dev, "pcc_init()\n");
690 return 0; 677 return 0;
691} 678}
692 679
diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c
index c6524f99ccc3..72844c5a6d05 100644
--- a/drivers/pcmcia/m32r_pcc.c
+++ b/drivers/pcmcia/m32r_pcc.c
@@ -45,16 +45,6 @@
45 45
46#define PCC_DEBUG_DBEX 46#define PCC_DEBUG_DBEX
47 47
48#ifdef CONFIG_PCMCIA_DEBUG
49static int m32r_pcc_debug;
50module_param(m32r_pcc_debug, int, 0644);
51#define debug(lvl, fmt, arg...) do { \
52 if (m32r_pcc_debug > (lvl)) \
53 printk(KERN_DEBUG "m32r_pcc: " fmt , ## arg); \
54} while (0)
55#else
56#define debug(n, args...) do { } while (0)
57#endif
58 48
59/* Poll status interval -- 0 means default to interrupt */ 49/* Poll status interval -- 0 means default to interrupt */
60static int poll_interval = 0; 50static int poll_interval = 0;
@@ -358,7 +348,7 @@ static irqreturn_t pcc_interrupt(int irq, void *dev)
358 u_int events, active; 348 u_int events, active;
359 int handled = 0; 349 int handled = 0;
360 350
361 debug(4, "m32r: pcc_interrupt(%d)\n", irq); 351 pr_debug("m32r_pcc: pcc_interrupt(%d)\n", irq);
362 352
363 for (j = 0; j < 20; j++) { 353 for (j = 0; j < 20; j++) {
364 active = 0; 354 active = 0;
@@ -369,13 +359,14 @@ static irqreturn_t pcc_interrupt(int irq, void *dev)
369 handled = 1; 359 handled = 1;
370 irc = pcc_get(i, PCIRC); 360 irc = pcc_get(i, PCIRC);
371 irc >>=16; 361 irc >>=16;
372 debug(2, "m32r-pcc:interrupt: socket %d pcirc 0x%02x ", i, irc); 362 pr_debug("m32r_pcc: interrupt: socket %d pcirc 0x%02x ",
363 i, irc);
373 if (!irc) 364 if (!irc)
374 continue; 365 continue;
375 366
376 events = (irc) ? SS_DETECT : 0; 367 events = (irc) ? SS_DETECT : 0;
377 events |= (pcc_get(i,PCCR) & PCCR_PCEN) ? SS_READY : 0; 368 events |= (pcc_get(i,PCCR) & PCCR_PCEN) ? SS_READY : 0;
378 debug(2, " event 0x%02x\n", events); 369 pr_debug("m32r_pcc: event 0x%02x\n", events);
379 370
380 if (events) 371 if (events)
381 pcmcia_parse_events(&socket[i].socket, events); 372 pcmcia_parse_events(&socket[i].socket, events);
@@ -388,7 +379,7 @@ static irqreturn_t pcc_interrupt(int irq, void *dev)
388 if (j == 20) 379 if (j == 20)
389 printk(KERN_NOTICE "m32r-pcc: infinite loop in interrupt handler\n"); 380 printk(KERN_NOTICE "m32r-pcc: infinite loop in interrupt handler\n");
390 381
391 debug(4, "m32r-pcc: interrupt done\n"); 382 pr_debug("m32r_pcc: interrupt done\n");
392 383
393 return IRQ_RETVAL(handled); 384 return IRQ_RETVAL(handled);
394} /* pcc_interrupt */ 385} /* pcc_interrupt */
@@ -422,7 +413,7 @@ static int _pcc_get_status(u_short sock, u_int *value)
422 status = pcc_get(sock,PCCSIGCR); 413 status = pcc_get(sock,PCCSIGCR);
423 *value |= (status & PCCSIGCR_VEN) ? SS_POWERON : 0; 414 *value |= (status & PCCSIGCR_VEN) ? SS_POWERON : 0;
424 415
425 debug(3, "m32r-pcc: GetStatus(%d) = %#4.4x\n", sock, *value); 416 pr_debug("m32r_pcc: GetStatus(%d) = %#4.4x\n", sock, *value);
426 return 0; 417 return 0;
427} /* _get_status */ 418} /* _get_status */
428 419
@@ -432,7 +423,7 @@ static int _pcc_set_socket(u_short sock, socket_state_t *state)
432{ 423{
433 u_long reg = 0; 424 u_long reg = 0;
434 425
435 debug(3, "m32r-pcc: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " 426 pr_debug("m32r_pcc: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
436 "io_irq %d, csc_mask %#2.2x)", sock, state->flags, 427 "io_irq %d, csc_mask %#2.2x)", sock, state->flags,
437 state->Vcc, state->Vpp, state->io_irq, state->csc_mask); 428 state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
438 429
@@ -448,11 +439,11 @@ static int _pcc_set_socket(u_short sock, socket_state_t *state)
448 } 439 }
449 440
450 if (state->flags & SS_RESET) { 441 if (state->flags & SS_RESET) {
451 debug(3, ":RESET\n"); 442 pr_debug("m32r_pcc: :RESET\n");
452 reg |= PCCSIGCR_CRST; 443 reg |= PCCSIGCR_CRST;
453 } 444 }
454 if (state->flags & SS_OUTPUT_ENA){ 445 if (state->flags & SS_OUTPUT_ENA){
455 debug(3, ":OUTPUT_ENA\n"); 446 pr_debug("m32r_pcc: :OUTPUT_ENA\n");
456 /* bit clear */ 447 /* bit clear */
457 } else { 448 } else {
458 reg |= PCCSIGCR_SEN; 449 reg |= PCCSIGCR_SEN;
@@ -460,28 +451,26 @@ static int _pcc_set_socket(u_short sock, socket_state_t *state)
460 451
461 pcc_set(sock,PCCSIGCR,reg); 452 pcc_set(sock,PCCSIGCR,reg);
462 453
463#ifdef CONFIG_PCMCIA_DEBUG
464 if(state->flags & SS_IOCARD){ 454 if(state->flags & SS_IOCARD){
465 debug(3, ":IOCARD"); 455 pr_debug("m32r_pcc: :IOCARD");
466 } 456 }
467 if (state->flags & SS_PWR_AUTO) { 457 if (state->flags & SS_PWR_AUTO) {
468 debug(3, ":PWR_AUTO"); 458 pr_debug("m32r_pcc: :PWR_AUTO");
469 } 459 }
470 if (state->csc_mask & SS_DETECT) 460 if (state->csc_mask & SS_DETECT)
471 debug(3, ":csc-SS_DETECT"); 461 pr_debug("m32r_pcc: :csc-SS_DETECT");
472 if (state->flags & SS_IOCARD) { 462 if (state->flags & SS_IOCARD) {
473 if (state->csc_mask & SS_STSCHG) 463 if (state->csc_mask & SS_STSCHG)
474 debug(3, ":STSCHG"); 464 pr_debug("m32r_pcc: :STSCHG");
475 } else { 465 } else {
476 if (state->csc_mask & SS_BATDEAD) 466 if (state->csc_mask & SS_BATDEAD)
477 debug(3, ":BATDEAD"); 467 pr_debug("m32r_pcc: :BATDEAD");
478 if (state->csc_mask & SS_BATWARN) 468 if (state->csc_mask & SS_BATWARN)
479 debug(3, ":BATWARN"); 469 pr_debug("m32r_pcc: :BATWARN");
480 if (state->csc_mask & SS_READY) 470 if (state->csc_mask & SS_READY)
481 debug(3, ":READY"); 471 pr_debug("m32r_pcc: :READY");
482 } 472 }
483 debug(3, "\n"); 473 pr_debug("m32r_pcc: \n");
484#endif
485 return 0; 474 return 0;
486} /* _set_socket */ 475} /* _set_socket */
487 476
@@ -491,7 +480,7 @@ static int _pcc_set_io_map(u_short sock, struct pccard_io_map *io)
491{ 480{
492 u_char map; 481 u_char map;
493 482
494 debug(3, "m32r-pcc: SetIOMap(%d, %d, %#2.2x, %d ns, " 483 pr_debug("m32r_pcc: SetIOMap(%d, %d, %#2.2x, %d ns, "
495 "%#llx-%#llx)\n", sock, io->map, io->flags, 484 "%#llx-%#llx)\n", sock, io->map, io->flags,
496 io->speed, (unsigned long long)io->start, 485 io->speed, (unsigned long long)io->start,
497 (unsigned long long)io->stop); 486 (unsigned long long)io->stop);
@@ -515,7 +504,7 @@ static int _pcc_set_mem_map(u_short sock, struct pccard_mem_map *mem)
515#endif 504#endif
516#endif 505#endif
517 506
518 debug(3, "m32r-pcc: SetMemMap(%d, %d, %#2.2x, %d ns, " 507 pr_debug("m32r_pcc: SetMemMap(%d, %d, %#2.2x, %d ns, "
519 "%#llx, %#x)\n", sock, map, mem->flags, 508 "%#llx, %#x)\n", sock, map, mem->flags,
520 mem->speed, (unsigned long long)mem->static_start, 509 mem->speed, (unsigned long long)mem->static_start,
521 mem->card_start); 510 mem->card_start);
@@ -662,7 +651,7 @@ static int pcc_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *mem)
662 651
663static int pcc_init(struct pcmcia_socket *s) 652static int pcc_init(struct pcmcia_socket *s)
664{ 653{
665 debug(4, "m32r-pcc: init call\n"); 654 pr_debug("m32r_pcc: init call\n");
666 return 0; 655 return 0;
667} 656}
668 657
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 403559ba49dd..7f79c4e169ae 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -64,14 +64,6 @@
64#include <pcmcia/cs.h> 64#include <pcmcia/cs.h>
65#include <pcmcia/ss.h> 65#include <pcmcia/ss.h>
66 66
67#ifdef CONFIG_PCMCIA_DEBUG
68static int pc_debug;
69module_param(pc_debug, int, 0);
70#define dprintk(args...) printk(KERN_DEBUG "m8xx_pcmcia: " args);
71#else
72#define dprintk(args...)
73#endif
74
75#define pcmcia_info(args...) printk(KERN_INFO "m8xx_pcmcia: "args) 67#define pcmcia_info(args...) printk(KERN_INFO "m8xx_pcmcia: "args)
76#define pcmcia_error(args...) printk(KERN_ERR "m8xx_pcmcia: "args) 68#define pcmcia_error(args...) printk(KERN_ERR "m8xx_pcmcia: "args)
77 69
@@ -565,7 +557,7 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
565 unsigned int i, events, pscr, pipr, per; 557 unsigned int i, events, pscr, pipr, per;
566 pcmconf8xx_t *pcmcia = socket[0].pcmcia; 558 pcmconf8xx_t *pcmcia = socket[0].pcmcia;
567 559
568 dprintk("Interrupt!\n"); 560 pr_debug("m8xx_pcmcia: Interrupt!\n");
569 /* get interrupt sources */ 561 /* get interrupt sources */
570 562
571 pscr = in_be32(&pcmcia->pcmc_pscr); 563 pscr = in_be32(&pcmcia->pcmc_pscr);
@@ -614,7 +606,7 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
614 606
615 /* call the handler */ 607 /* call the handler */
616 608
617 dprintk("slot %u: events = 0x%02x, pscr = 0x%08x, " 609 pr_debug("m8xx_pcmcia: slot %u: events = 0x%02x, pscr = 0x%08x, "
618 "pipr = 0x%08x\n", i, events, pscr, pipr); 610 "pipr = 0x%08x\n", i, events, pscr, pipr);
619 611
620 if (events) { 612 if (events) {
@@ -641,7 +633,7 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
641 /* clear the interrupt sources */ 633 /* clear the interrupt sources */
642 out_be32(&pcmcia->pcmc_pscr, pscr); 634 out_be32(&pcmcia->pcmc_pscr, pscr);
643 635
644 dprintk("Interrupt done.\n"); 636 pr_debug("m8xx_pcmcia: Interrupt done.\n");
645 637
646 return IRQ_HANDLED; 638 return IRQ_HANDLED;
647} 639}
@@ -815,7 +807,7 @@ static int m8xx_get_status(struct pcmcia_socket *sock, unsigned int *value)
815 }; 807 };
816 } 808 }
817 809
818 dprintk("GetStatus(%d) = %#2.2x\n", lsock, *value); 810 pr_debug("m8xx_pcmcia: GetStatus(%d) = %#2.2x\n", lsock, *value);
819 return 0; 811 return 0;
820} 812}
821 813
@@ -828,7 +820,7 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t * state)
828 unsigned long flags; 820 unsigned long flags;
829 pcmconf8xx_t *pcmcia = socket[0].pcmcia; 821 pcmconf8xx_t *pcmcia = socket[0].pcmcia;
830 822
831 dprintk("SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " 823 pr_debug("m8xx_pcmcia: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
832 "io_irq %d, csc_mask %#2.2x)\n", lsock, state->flags, 824 "io_irq %d, csc_mask %#2.2x)\n", lsock, state->flags,
833 state->Vcc, state->Vpp, state->io_irq, state->csc_mask); 825 state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
834 826
@@ -974,7 +966,7 @@ static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
974#define M8XX_SIZE (io->stop - io->start + 1) 966#define M8XX_SIZE (io->stop - io->start + 1)
975#define M8XX_BASE (PCMCIA_IO_WIN_BASE + io->start) 967#define M8XX_BASE (PCMCIA_IO_WIN_BASE + io->start)
976 968
977 dprintk("SetIOMap(%d, %d, %#2.2x, %d ns, " 969 pr_debug("m8xx_pcmcia: SetIOMap(%d, %d, %#2.2x, %d ns, "
978 "%#4.4llx-%#4.4llx)\n", lsock, io->map, io->flags, 970 "%#4.4llx-%#4.4llx)\n", lsock, io->map, io->flags,
979 io->speed, (unsigned long long)io->start, 971 io->speed, (unsigned long long)io->start,
980 (unsigned long long)io->stop); 972 (unsigned long long)io->stop);
@@ -988,7 +980,7 @@ static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
988 980
989 if (io->flags & MAP_ACTIVE) { 981 if (io->flags & MAP_ACTIVE) {
990 982
991 dprintk("io->flags & MAP_ACTIVE\n"); 983 pr_debug("m8xx_pcmcia: io->flags & MAP_ACTIVE\n");
992 984
993 winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO) 985 winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO)
994 + (lsock * PCMCIA_IO_WIN_NO) + io->map; 986 + (lsock * PCMCIA_IO_WIN_NO) + io->map;
@@ -1018,8 +1010,8 @@ static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
1018 1010
1019 out_be32(&w->or, reg); 1011 out_be32(&w->or, reg);
1020 1012
1021 dprintk("Socket %u: Mapped io window %u at %#8.8x, " 1013 pr_debug("m8xx_pcmcia: Socket %u: Mapped io window %u at "
1022 "OR = %#8.8x.\n", lsock, io->map, w->br, w->or); 1014 "%#8.8x, OR = %#8.8x.\n", lsock, io->map, w->br, w->or);
1023 } else { 1015 } else {
1024 /* shutdown IO window */ 1016 /* shutdown IO window */
1025 winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO) 1017 winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO)
@@ -1033,14 +1025,14 @@ static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
1033 out_be32(&w->or, 0); /* turn off window */ 1025 out_be32(&w->or, 0); /* turn off window */
1034 out_be32(&w->br, 0); /* turn off base address */ 1026 out_be32(&w->br, 0); /* turn off base address */
1035 1027
1036 dprintk("Socket %u: Unmapped io window %u at %#8.8x, " 1028 pr_debug("m8xx_pcmcia: Socket %u: Unmapped io window %u at "
1037 "OR = %#8.8x.\n", lsock, io->map, w->br, w->or); 1029 "%#8.8x, OR = %#8.8x.\n", lsock, io->map, w->br, w->or);
1038 } 1030 }
1039 1031
1040 /* copy the struct and modify the copy */ 1032 /* copy the struct and modify the copy */
1041 s->io_win[io->map] = *io; 1033 s->io_win[io->map] = *io;
1042 s->io_win[io->map].flags &= (MAP_WRPROT | MAP_16BIT | MAP_ACTIVE); 1034 s->io_win[io->map].flags &= (MAP_WRPROT | MAP_16BIT | MAP_ACTIVE);
1043 dprintk("SetIOMap exit\n"); 1035 pr_debug("m8xx_pcmcia: SetIOMap exit\n");
1044 1036
1045 return 0; 1037 return 0;
1046} 1038}
@@ -1055,7 +1047,7 @@ static int m8xx_set_mem_map(struct pcmcia_socket *sock,
1055 unsigned int reg, winnr; 1047 unsigned int reg, winnr;
1056 pcmconf8xx_t *pcmcia = s->pcmcia; 1048 pcmconf8xx_t *pcmcia = s->pcmcia;
1057 1049
1058 dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, " 1050 pr_debug("m8xx_pcmcia: SetMemMap(%d, %d, %#2.2x, %d ns, "
1059 "%#5.5llx, %#5.5x)\n", lsock, mem->map, mem->flags, 1051 "%#5.5llx, %#5.5x)\n", lsock, mem->map, mem->flags,
1060 mem->speed, (unsigned long long)mem->static_start, 1052 mem->speed, (unsigned long long)mem->static_start,
1061 mem->card_start); 1053 mem->card_start);
@@ -1098,7 +1090,7 @@ static int m8xx_set_mem_map(struct pcmcia_socket *sock,
1098 1090
1099 out_be32(&w->or, reg); 1091 out_be32(&w->or, reg);
1100 1092
1101 dprintk("Socket %u: Mapped memory window %u at %#8.8x, " 1093 pr_debug("m8xx_pcmcia: Socket %u: Mapped memory window %u at %#8.8x, "
1102 "OR = %#8.8x.\n", lsock, mem->map, w->br, w->or); 1094 "OR = %#8.8x.\n", lsock, mem->map, w->br, w->or);
1103 1095
1104 if (mem->flags & MAP_ACTIVE) { 1096 if (mem->flags & MAP_ACTIVE) {
@@ -1108,7 +1100,7 @@ static int m8xx_set_mem_map(struct pcmcia_socket *sock,
1108 + mem->card_start; 1100 + mem->card_start;
1109 } 1101 }
1110 1102
1111 dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, " 1103 pr_debug("m8xx_pcmcia: SetMemMap(%d, %d, %#2.2x, %d ns, "
1112 "%#5.5llx, %#5.5x)\n", lsock, mem->map, mem->flags, 1104 "%#5.5llx, %#5.5x)\n", lsock, mem->map, mem->flags,
1113 mem->speed, (unsigned long long)mem->static_start, 1105 mem->speed, (unsigned long long)mem->static_start,
1114 mem->card_start); 1106 mem->card_start);
@@ -1129,7 +1121,7 @@ static int m8xx_sock_init(struct pcmcia_socket *sock)
1129 pccard_io_map io = { 0, 0, 0, 0, 1 }; 1121 pccard_io_map io = { 0, 0, 0, 0, 1 };
1130 pccard_mem_map mem = { 0, 0, 0, 0, 0, 0 }; 1122 pccard_mem_map mem = { 0, 0, 0, 0, 0, 0 };
1131 1123
1132 dprintk("sock_init(%d)\n", s); 1124 pr_debug("m8xx_pcmcia: sock_init(%d)\n", s);
1133 1125
1134 m8xx_set_socket(sock, &dead_socket); 1126 m8xx_set_socket(sock, &dead_socket);
1135 for (i = 0; i < PCMCIA_IO_WIN_NO; i++) { 1127 for (i = 0; i < PCMCIA_IO_WIN_NO; i++) {
diff --git a/drivers/pcmcia/o2micro.h b/drivers/pcmcia/o2micro.h
index 72188c462c9c..624442fc0d35 100644
--- a/drivers/pcmcia/o2micro.h
+++ b/drivers/pcmcia/o2micro.h
@@ -30,28 +30,6 @@
30#ifndef _LINUX_O2MICRO_H 30#ifndef _LINUX_O2MICRO_H
31#define _LINUX_O2MICRO_H 31#define _LINUX_O2MICRO_H
32 32
33#ifndef PCI_VENDOR_ID_O2
34#define PCI_VENDOR_ID_O2 0x1217
35#endif
36#ifndef PCI_DEVICE_ID_O2_6729
37#define PCI_DEVICE_ID_O2_6729 0x6729
38#endif
39#ifndef PCI_DEVICE_ID_O2_6730
40#define PCI_DEVICE_ID_O2_6730 0x673a
41#endif
42#ifndef PCI_DEVICE_ID_O2_6832
43#define PCI_DEVICE_ID_O2_6832 0x6832
44#endif
45#ifndef PCI_DEVICE_ID_O2_6836
46#define PCI_DEVICE_ID_O2_6836 0x6836
47#endif
48#ifndef PCI_DEVICE_ID_O2_6812
49#define PCI_DEVICE_ID_O2_6812 0x6872
50#endif
51#ifndef PCI_DEVICE_ID_O2_6933
52#define PCI_DEVICE_ID_O2_6933 0x6933
53#endif
54
55/* Additional PCI configuration registers */ 33/* Additional PCI configuration registers */
56 34
57#define O2_MUX_CONTROL 0x90 /* 32 bit */ 35#define O2_MUX_CONTROL 0x90 /* 32 bit */
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
index 30cf71d2ee23..c4d7908fa37f 100644
--- a/drivers/pcmcia/pcmcia_ioctl.c
+++ b/drivers/pcmcia/pcmcia_ioctl.c
@@ -58,17 +58,6 @@ typedef struct user_info_t {
58} user_info_t; 58} user_info_t;
59 59
60 60
61#ifdef CONFIG_PCMCIA_DEBUG
62extern int ds_pc_debug;
63
64#define ds_dbg(lvl, fmt, arg...) do { \
65 if (ds_pc_debug >= lvl) \
66 printk(KERN_DEBUG "ds: " fmt , ## arg); \
67} while (0)
68#else
69#define ds_dbg(lvl, fmt, arg...) do { } while (0)
70#endif
71
72static struct pcmcia_device *get_pcmcia_device(struct pcmcia_socket *s, 61static struct pcmcia_device *get_pcmcia_device(struct pcmcia_socket *s,
73 unsigned int function) 62 unsigned int function)
74{ 63{
@@ -229,6 +218,61 @@ static int pcmcia_adjust_resource_info(adjust_t *adj)
229 return (ret); 218 return (ret);
230} 219}
231 220
221
222/** pcmcia_get_window
223 */
224static int pcmcia_get_window(struct pcmcia_socket *s, window_handle_t *wh_out,
225 window_handle_t wh, win_req_t *req)
226{
227 pccard_mem_map *win;
228 window_handle_t w;
229
230 wh--;
231 if (!s || !(s->state & SOCKET_PRESENT))
232 return -ENODEV;
233 if (wh >= MAX_WIN)
234 return -EINVAL;
235 for (w = wh; w < MAX_WIN; w++)
236 if (s->state & SOCKET_WIN_REQ(w))
237 break;
238 if (w == MAX_WIN)
239 return -EINVAL;
240 win = &s->win[w];
241 req->Base = win->res->start;
242 req->Size = win->res->end - win->res->start + 1;
243 req->AccessSpeed = win->speed;
244 req->Attributes = 0;
245 if (win->flags & MAP_ATTRIB)
246 req->Attributes |= WIN_MEMORY_TYPE_AM;
247 if (win->flags & MAP_ACTIVE)
248 req->Attributes |= WIN_ENABLE;
249 if (win->flags & MAP_16BIT)
250 req->Attributes |= WIN_DATA_WIDTH_16;
251 if (win->flags & MAP_USE_WAIT)
252 req->Attributes |= WIN_USE_WAIT;
253
254 *wh_out = w + 1;
255 return 0;
256} /* pcmcia_get_window */
257
258
259/** pcmcia_get_mem_page
260 *
261 * Change the card address of an already open memory window.
262 */
263static int pcmcia_get_mem_page(struct pcmcia_socket *skt, window_handle_t wh,
264 memreq_t *req)
265{
266 wh--;
267 if (wh >= MAX_WIN)
268 return -EINVAL;
269
270 req->Page = 0;
271 req->CardOffset = skt->win[wh].card_start;
272 return 0;
273} /* pcmcia_get_mem_page */
274
275
232/** pccard_get_status 276/** pccard_get_status
233 * 277 *
234 * Get the current socket state bits. We don't support the latched 278 * Get the current socket state bits. We don't support the latched
@@ -431,7 +475,7 @@ static int bind_request(struct pcmcia_socket *s, bind_info_t *bind_info)
431 if (!s) 475 if (!s)
432 return -EINVAL; 476 return -EINVAL;
433 477
434 ds_dbg(2, "bind_request(%d, '%s')\n", s->sock, 478 pr_debug("bind_request(%d, '%s')\n", s->sock,
435 (char *)bind_info->dev_info); 479 (char *)bind_info->dev_info);
436 480
437 p_drv = get_pcmcia_driver(&bind_info->dev_info); 481 p_drv = get_pcmcia_driver(&bind_info->dev_info);
@@ -623,7 +667,7 @@ static int ds_open(struct inode *inode, struct file *file)
623 static int warning_printed = 0; 667 static int warning_printed = 0;
624 int ret = 0; 668 int ret = 0;
625 669
626 ds_dbg(0, "ds_open(socket %d)\n", i); 670 pr_debug("ds_open(socket %d)\n", i);
627 671
628 lock_kernel(); 672 lock_kernel();
629 s = pcmcia_get_socket_by_nr(i); 673 s = pcmcia_get_socket_by_nr(i);
@@ -685,7 +729,7 @@ static int ds_release(struct inode *inode, struct file *file)
685 struct pcmcia_socket *s; 729 struct pcmcia_socket *s;
686 user_info_t *user, **link; 730 user_info_t *user, **link;
687 731
688 ds_dbg(0, "ds_release(socket %d)\n", iminor(inode)); 732 pr_debug("ds_release(socket %d)\n", iminor(inode));
689 733
690 user = file->private_data; 734 user = file->private_data;
691 if (CHECK_USER(user)) 735 if (CHECK_USER(user))
@@ -719,7 +763,7 @@ static ssize_t ds_read(struct file *file, char __user *buf,
719 user_info_t *user; 763 user_info_t *user;
720 int ret; 764 int ret;
721 765
722 ds_dbg(2, "ds_read(socket %d)\n", iminor(file->f_path.dentry->d_inode)); 766 pr_debug("ds_read(socket %d)\n", iminor(file->f_path.dentry->d_inode));
723 767
724 if (count < 4) 768 if (count < 4)
725 return -EINVAL; 769 return -EINVAL;
@@ -744,7 +788,7 @@ static ssize_t ds_read(struct file *file, char __user *buf,
744static ssize_t ds_write(struct file *file, const char __user *buf, 788static ssize_t ds_write(struct file *file, const char __user *buf,
745 size_t count, loff_t *ppos) 789 size_t count, loff_t *ppos)
746{ 790{
747 ds_dbg(2, "ds_write(socket %d)\n", iminor(file->f_path.dentry->d_inode)); 791 pr_debug("ds_write(socket %d)\n", iminor(file->f_path.dentry->d_inode));
748 792
749 if (count != 4) 793 if (count != 4)
750 return -EINVAL; 794 return -EINVAL;
@@ -762,7 +806,7 @@ static u_int ds_poll(struct file *file, poll_table *wait)
762 struct pcmcia_socket *s; 806 struct pcmcia_socket *s;
763 user_info_t *user; 807 user_info_t *user;
764 808
765 ds_dbg(2, "ds_poll(socket %d)\n", iminor(file->f_path.dentry->d_inode)); 809 pr_debug("ds_poll(socket %d)\n", iminor(file->f_path.dentry->d_inode));
766 810
767 user = file->private_data; 811 user = file->private_data;
768 if (CHECK_USER(user)) 812 if (CHECK_USER(user))
@@ -790,7 +834,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
790 ds_ioctl_arg_t *buf; 834 ds_ioctl_arg_t *buf;
791 user_info_t *user; 835 user_info_t *user;
792 836
793 ds_dbg(2, "ds_ioctl(socket %d, %#x, %#lx)\n", iminor(inode), cmd, arg); 837 pr_debug("ds_ioctl(socket %d, %#x, %#lx)\n", iminor(inode), cmd, arg);
794 838
795 user = file->private_data; 839 user = file->private_data;
796 if (CHECK_USER(user)) 840 if (CHECK_USER(user))
@@ -809,13 +853,13 @@ static int ds_ioctl(struct inode * inode, struct file * file,
809 853
810 if (cmd & IOC_IN) { 854 if (cmd & IOC_IN) {
811 if (!access_ok(VERIFY_READ, uarg, size)) { 855 if (!access_ok(VERIFY_READ, uarg, size)) {
812 ds_dbg(3, "ds_ioctl(): verify_read = %d\n", -EFAULT); 856 pr_debug("ds_ioctl(): verify_read = %d\n", -EFAULT);
813 return -EFAULT; 857 return -EFAULT;
814 } 858 }
815 } 859 }
816 if (cmd & IOC_OUT) { 860 if (cmd & IOC_OUT) {
817 if (!access_ok(VERIFY_WRITE, uarg, size)) { 861 if (!access_ok(VERIFY_WRITE, uarg, size)) {
818 ds_dbg(3, "ds_ioctl(): verify_write = %d\n", -EFAULT); 862 pr_debug("ds_ioctl(): verify_write = %d\n", -EFAULT);
819 return -EFAULT; 863 return -EFAULT;
820 } 864 }
821 } 865 }
@@ -927,15 +971,15 @@ static int ds_ioctl(struct inode * inode, struct file * file,
927 goto free_out; 971 goto free_out;
928 break; 972 break;
929 case DS_GET_FIRST_WINDOW: 973 case DS_GET_FIRST_WINDOW:
930 ret = pcmcia_get_window(s, &buf->win_info.handle, 0, 974 ret = pcmcia_get_window(s, &buf->win_info.handle, 1,
931 &buf->win_info.window); 975 &buf->win_info.window);
932 break; 976 break;
933 case DS_GET_NEXT_WINDOW: 977 case DS_GET_NEXT_WINDOW:
934 ret = pcmcia_get_window(s, &buf->win_info.handle, 978 ret = pcmcia_get_window(s, &buf->win_info.handle,
935 buf->win_info.handle->index + 1, &buf->win_info.window); 979 buf->win_info.handle + 1, &buf->win_info.window);
936 break; 980 break;
937 case DS_GET_MEM_PAGE: 981 case DS_GET_MEM_PAGE:
938 ret = pcmcia_get_mem_page(buf->win_info.handle, 982 ret = pcmcia_get_mem_page(s, buf->win_info.handle,
939 &buf->win_info.map); 983 &buf->win_info.map);
940 break; 984 break;
941 case DS_REPLACE_CIS: 985 case DS_REPLACE_CIS:
@@ -962,7 +1006,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
962 } 1006 }
963 1007
964 if ((err == 0) && (ret != 0)) { 1008 if ((err == 0) && (ret != 0)) {
965 ds_dbg(2, "ds_ioctl: ret = %d\n", ret); 1009 pr_debug("ds_ioctl: ret = %d\n", ret);
966 switch (ret) { 1010 switch (ret) {
967 case -ENODEV: 1011 case -ENODEV:
968 case -EINVAL: 1012 case -EINVAL:
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index d919e96c0afd..a8bf8c1b45ed 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -20,6 +20,7 @@
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/device.h> 22#include <linux/device.h>
23#include <linux/netdevice.h>
23 24
24#include <pcmcia/cs_types.h> 25#include <pcmcia/cs_types.h>
25#include <pcmcia/ss.h> 26#include <pcmcia/ss.h>
@@ -43,21 +44,6 @@ static u8 pcmcia_used_irq[NR_IRQS];
43#endif 44#endif
44 45
45 46
46#ifdef CONFIG_PCMCIA_DEBUG
47extern int ds_pc_debug;
48
49#define ds_dbg(skt, lvl, fmt, arg...) do { \
50 if (ds_pc_debug >= lvl) \
51 dev_printk(KERN_DEBUG, &skt->dev, \
52 "pcmcia_resource: " fmt, \
53 ## arg); \
54} while (0)
55#else
56#define ds_dbg(skt, lvl, fmt, arg...) do { } while (0)
57#endif
58
59
60
61/** alloc_io_space 47/** alloc_io_space
62 * 48 *
63 * Special stuff for managing IO windows, because they are scarce 49 * Special stuff for managing IO windows, because they are scarce
@@ -72,14 +58,14 @@ static int alloc_io_space(struct pcmcia_socket *s, u_int attr,
72 align = (*base) ? (lines ? 1<<lines : 0) : 1; 58 align = (*base) ? (lines ? 1<<lines : 0) : 1;
73 if (align && (align < num)) { 59 if (align && (align < num)) {
74 if (*base) { 60 if (*base) {
75 ds_dbg(s, 0, "odd IO request: num %#x align %#x\n", 61 dev_dbg(&s->dev, "odd IO request: num %#x align %#x\n",
76 num, align); 62 num, align);
77 align = 0; 63 align = 0;
78 } else 64 } else
79 while (align && (align < num)) align <<= 1; 65 while (align && (align < num)) align <<= 1;
80 } 66 }
81 if (*base & ~(align-1)) { 67 if (*base & ~(align-1)) {
82 ds_dbg(s, 0, "odd IO request: base %#x align %#x\n", 68 dev_dbg(&s->dev, "odd IO request: base %#x align %#x\n",
83 *base, align); 69 *base, align);
84 align = 0; 70 align = 0;
85 } 71 }
@@ -173,8 +159,10 @@ int pcmcia_access_configuration_register(struct pcmcia_device *p_dev,
173 s = p_dev->socket; 159 s = p_dev->socket;
174 c = p_dev->function_config; 160 c = p_dev->function_config;
175 161
176 if (!(c->state & CONFIG_LOCKED)) 162 if (!(c->state & CONFIG_LOCKED)) {
163 dev_dbg(&s->dev, "Configuration isnt't locked\n");
177 return -EACCES; 164 return -EACCES;
165 }
178 166
179 addr = (c->ConfigBase + reg->Offset) >> 1; 167 addr = (c->ConfigBase + reg->Offset) >> 1;
180 168
@@ -188,6 +176,7 @@ int pcmcia_access_configuration_register(struct pcmcia_device *p_dev,
188 pcmcia_write_cis_mem(s, 1, addr, 1, &val); 176 pcmcia_write_cis_mem(s, 1, addr, 1, &val);
189 break; 177 break;
190 default: 178 default:
179 dev_dbg(&s->dev, "Invalid conf register request\n");
191 return -EINVAL; 180 return -EINVAL;
192 break; 181 break;
193 } 182 }
@@ -196,68 +185,21 @@ int pcmcia_access_configuration_register(struct pcmcia_device *p_dev,
196EXPORT_SYMBOL(pcmcia_access_configuration_register); 185EXPORT_SYMBOL(pcmcia_access_configuration_register);
197 186
198 187
199/** pcmcia_get_window 188int pcmcia_map_mem_page(struct pcmcia_device *p_dev, window_handle_t wh,
200 */ 189 memreq_t *req)
201int pcmcia_get_window(struct pcmcia_socket *s, window_handle_t *handle,
202 int idx, win_req_t *req)
203{
204 window_t *win;
205 int w;
206
207 if (!s || !(s->state & SOCKET_PRESENT))
208 return -ENODEV;
209 for (w = idx; w < MAX_WIN; w++)
210 if (s->state & SOCKET_WIN_REQ(w))
211 break;
212 if (w == MAX_WIN)
213 return -EINVAL;
214 win = &s->win[w];
215 req->Base = win->ctl.res->start;
216 req->Size = win->ctl.res->end - win->ctl.res->start + 1;
217 req->AccessSpeed = win->ctl.speed;
218 req->Attributes = 0;
219 if (win->ctl.flags & MAP_ATTRIB)
220 req->Attributes |= WIN_MEMORY_TYPE_AM;
221 if (win->ctl.flags & MAP_ACTIVE)
222 req->Attributes |= WIN_ENABLE;
223 if (win->ctl.flags & MAP_16BIT)
224 req->Attributes |= WIN_DATA_WIDTH_16;
225 if (win->ctl.flags & MAP_USE_WAIT)
226 req->Attributes |= WIN_USE_WAIT;
227 *handle = win;
228 return 0;
229} /* pcmcia_get_window */
230EXPORT_SYMBOL(pcmcia_get_window);
231
232
233/** pcmcia_get_mem_page
234 *
235 * Change the card address of an already open memory window.
236 */
237int pcmcia_get_mem_page(window_handle_t win, memreq_t *req)
238{ 190{
239 if ((win == NULL) || (win->magic != WINDOW_MAGIC)) 191 struct pcmcia_socket *s = p_dev->socket;
240 return -EINVAL;
241 req->Page = 0;
242 req->CardOffset = win->ctl.card_start;
243 return 0;
244} /* pcmcia_get_mem_page */
245EXPORT_SYMBOL(pcmcia_get_mem_page);
246
247 192
248int pcmcia_map_mem_page(window_handle_t win, memreq_t *req) 193 wh--;
249{ 194 if (wh >= MAX_WIN)
250 struct pcmcia_socket *s;
251 if ((win == NULL) || (win->magic != WINDOW_MAGIC))
252 return -EINVAL; 195 return -EINVAL;
253 s = win->sock;
254 if (req->Page != 0) { 196 if (req->Page != 0) {
255 ds_dbg(s, 0, "failure: requested page is zero\n"); 197 dev_dbg(&s->dev, "failure: requested page is zero\n");
256 return -EINVAL; 198 return -EINVAL;
257 } 199 }
258 win->ctl.card_start = req->CardOffset; 200 s->win[wh].card_start = req->CardOffset;
259 if (s->ops->set_mem_map(s, &win->ctl) != 0) { 201 if (s->ops->set_mem_map(s, &s->win[wh]) != 0) {
260 ds_dbg(s, 0, "failed to set_mem_map\n"); 202 dev_dbg(&s->dev, "failed to set_mem_map\n");
261 return -EIO; 203 return -EIO;
262 } 204 }
263 return 0; 205 return 0;
@@ -278,10 +220,14 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
278 s = p_dev->socket; 220 s = p_dev->socket;
279 c = p_dev->function_config; 221 c = p_dev->function_config;
280 222
281 if (!(s->state & SOCKET_PRESENT)) 223 if (!(s->state & SOCKET_PRESENT)) {
224 dev_dbg(&s->dev, "No card present\n");
282 return -ENODEV; 225 return -ENODEV;
283 if (!(c->state & CONFIG_LOCKED)) 226 }
227 if (!(c->state & CONFIG_LOCKED)) {
228 dev_dbg(&s->dev, "Configuration isnt't locked\n");
284 return -EACCES; 229 return -EACCES;
230 }
285 231
286 if (mod->Attributes & CONF_IRQ_CHANGE_VALID) { 232 if (mod->Attributes & CONF_IRQ_CHANGE_VALID) {
287 if (mod->Attributes & CONF_ENABLE_IRQ) { 233 if (mod->Attributes & CONF_ENABLE_IRQ) {
@@ -295,7 +241,7 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
295 } 241 }
296 242
297 if (mod->Attributes & CONF_VCC_CHANGE_VALID) { 243 if (mod->Attributes & CONF_VCC_CHANGE_VALID) {
298 ds_dbg(s, 0, "changing Vcc is not allowed at this time\n"); 244 dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n");
299 return -EINVAL; 245 return -EINVAL;
300 } 246 }
301 247
@@ -303,7 +249,7 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
303 if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) && 249 if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) &&
304 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { 250 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
305 if (mod->Vpp1 != mod->Vpp2) { 251 if (mod->Vpp1 != mod->Vpp2) {
306 ds_dbg(s, 0, "Vpp1 and Vpp2 must be the same\n"); 252 dev_dbg(&s->dev, "Vpp1 and Vpp2 must be the same\n");
307 return -EINVAL; 253 return -EINVAL;
308 } 254 }
309 s->socket.Vpp = mod->Vpp1; 255 s->socket.Vpp = mod->Vpp1;
@@ -314,7 +260,7 @@ int pcmcia_modify_configuration(struct pcmcia_device *p_dev,
314 } 260 }
315 } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) || 261 } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) ||
316 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { 262 (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
317 ds_dbg(s, 0, "changing Vcc is not allowed at this time\n"); 263 dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n");
318 return -EINVAL; 264 return -EINVAL;
319 } 265 }
320 266
@@ -425,11 +371,11 @@ static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req)
425 if (c->state & CONFIG_LOCKED) 371 if (c->state & CONFIG_LOCKED)
426 return -EACCES; 372 return -EACCES;
427 if (c->irq.Attributes != req->Attributes) { 373 if (c->irq.Attributes != req->Attributes) {
428 ds_dbg(s, 0, "IRQ attributes must match assigned ones\n"); 374 dev_dbg(&s->dev, "IRQ attributes must match assigned ones\n");
429 return -EINVAL; 375 return -EINVAL;
430 } 376 }
431 if (s->irq.AssignedIRQ != req->AssignedIRQ) { 377 if (s->irq.AssignedIRQ != req->AssignedIRQ) {
432 ds_dbg(s, 0, "IRQ must match assigned one\n"); 378 dev_dbg(&s->dev, "IRQ must match assigned one\n");
433 return -EINVAL; 379 return -EINVAL;
434 } 380 }
435 if (--s->irq.Config == 0) { 381 if (--s->irq.Config == 0) {
@@ -437,8 +383,8 @@ static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req)
437 s->irq.AssignedIRQ = 0; 383 s->irq.AssignedIRQ = 0;
438 } 384 }
439 385
440 if (req->Attributes & IRQ_HANDLE_PRESENT) { 386 if (req->Handler) {
441 free_irq(req->AssignedIRQ, req->Instance); 387 free_irq(req->AssignedIRQ, p_dev->priv);
442 } 388 }
443 389
444#ifdef CONFIG_PCMCIA_PROBE 390#ifdef CONFIG_PCMCIA_PROBE
@@ -449,30 +395,34 @@ static int pcmcia_release_irq(struct pcmcia_device *p_dev, irq_req_t *req)
449} /* pcmcia_release_irq */ 395} /* pcmcia_release_irq */
450 396
451 397
452int pcmcia_release_window(window_handle_t win) 398int pcmcia_release_window(struct pcmcia_device *p_dev, window_handle_t wh)
453{ 399{
454 struct pcmcia_socket *s; 400 struct pcmcia_socket *s = p_dev->socket;
401 pccard_mem_map *win;
455 402
456 if ((win == NULL) || (win->magic != WINDOW_MAGIC)) 403 wh--;
404 if (wh >= MAX_WIN)
457 return -EINVAL; 405 return -EINVAL;
458 s = win->sock; 406
459 if (!(win->handle->_win & CLIENT_WIN_REQ(win->index))) 407 win = &s->win[wh];
408
409 if (!(p_dev->_win & CLIENT_WIN_REQ(wh))) {
410 dev_dbg(&s->dev, "not releasing unknown window\n");
460 return -EINVAL; 411 return -EINVAL;
412 }
461 413
462 /* Shut down memory window */ 414 /* Shut down memory window */
463 win->ctl.flags &= ~MAP_ACTIVE; 415 win->flags &= ~MAP_ACTIVE;
464 s->ops->set_mem_map(s, &win->ctl); 416 s->ops->set_mem_map(s, win);
465 s->state &= ~SOCKET_WIN_REQ(win->index); 417 s->state &= ~SOCKET_WIN_REQ(wh);
466 418
467 /* Release system memory */ 419 /* Release system memory */
468 if (win->ctl.res) { 420 if (win->res) {
469 release_resource(win->ctl.res); 421 release_resource(win->res);
470 kfree(win->ctl.res); 422 kfree(win->res);
471 win->ctl.res = NULL; 423 win->res = NULL;
472 } 424 }
473 win->handle->_win &= ~CLIENT_WIN_REQ(win->index); 425 p_dev->_win &= ~CLIENT_WIN_REQ(wh);
474
475 win->magic = 0;
476 426
477 return 0; 427 return 0;
478} /* pcmcia_release_window */ 428} /* pcmcia_release_window */
@@ -492,12 +442,14 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev,
492 return -ENODEV; 442 return -ENODEV;
493 443
494 if (req->IntType & INT_CARDBUS) { 444 if (req->IntType & INT_CARDBUS) {
495 ds_dbg(p_dev->socket, 0, "IntType may not be INT_CARDBUS\n"); 445 dev_dbg(&s->dev, "IntType may not be INT_CARDBUS\n");
496 return -EINVAL; 446 return -EINVAL;
497 } 447 }
498 c = p_dev->function_config; 448 c = p_dev->function_config;
499 if (c->state & CONFIG_LOCKED) 449 if (c->state & CONFIG_LOCKED) {
450 dev_dbg(&s->dev, "Configuration is locked\n");
500 return -EACCES; 451 return -EACCES;
452 }
501 453
502 /* Do power control. We don't allow changes in Vcc. */ 454 /* Do power control. We don't allow changes in Vcc. */
503 s->socket.Vpp = req->Vpp; 455 s->socket.Vpp = req->Vpp;
@@ -609,40 +561,44 @@ int pcmcia_request_io(struct pcmcia_device *p_dev, io_req_t *req)
609 struct pcmcia_socket *s = p_dev->socket; 561 struct pcmcia_socket *s = p_dev->socket;
610 config_t *c; 562 config_t *c;
611 563
612 if (!(s->state & SOCKET_PRESENT)) 564 if (!(s->state & SOCKET_PRESENT)) {
565 dev_dbg(&s->dev, "No card present\n");
613 return -ENODEV; 566 return -ENODEV;
567 }
614 568
615 if (!req) 569 if (!req)
616 return -EINVAL; 570 return -EINVAL;
617 c = p_dev->function_config; 571 c = p_dev->function_config;
618 if (c->state & CONFIG_LOCKED) 572 if (c->state & CONFIG_LOCKED) {
573 dev_dbg(&s->dev, "Configuration is locked\n");
619 return -EACCES; 574 return -EACCES;
575 }
620 if (c->state & CONFIG_IO_REQ) { 576 if (c->state & CONFIG_IO_REQ) {
621 ds_dbg(s, 0, "IO already configured\n"); 577 dev_dbg(&s->dev, "IO already configured\n");
622 return -EBUSY; 578 return -EBUSY;
623 } 579 }
624 if (req->Attributes1 & (IO_SHARED | IO_FORCE_ALIAS_ACCESS)) { 580 if (req->Attributes1 & (IO_SHARED | IO_FORCE_ALIAS_ACCESS)) {
625 ds_dbg(s, 0, "bad attribute setting for IO region 1\n"); 581 dev_dbg(&s->dev, "bad attribute setting for IO region 1\n");
626 return -EINVAL; 582 return -EINVAL;
627 } 583 }
628 if ((req->NumPorts2 > 0) && 584 if ((req->NumPorts2 > 0) &&
629 (req->Attributes2 & (IO_SHARED | IO_FORCE_ALIAS_ACCESS))) { 585 (req->Attributes2 & (IO_SHARED | IO_FORCE_ALIAS_ACCESS))) {
630 ds_dbg(s, 0, "bad attribute setting for IO region 2\n"); 586 dev_dbg(&s->dev, "bad attribute setting for IO region 2\n");
631 return -EINVAL; 587 return -EINVAL;
632 } 588 }
633 589
634 ds_dbg(s, 1, "trying to allocate resource 1\n"); 590 dev_dbg(&s->dev, "trying to allocate resource 1\n");
635 if (alloc_io_space(s, req->Attributes1, &req->BasePort1, 591 if (alloc_io_space(s, req->Attributes1, &req->BasePort1,
636 req->NumPorts1, req->IOAddrLines)) { 592 req->NumPorts1, req->IOAddrLines)) {
637 ds_dbg(s, 0, "allocation of resource 1 failed\n"); 593 dev_dbg(&s->dev, "allocation of resource 1 failed\n");
638 return -EBUSY; 594 return -EBUSY;
639 } 595 }
640 596
641 if (req->NumPorts2) { 597 if (req->NumPorts2) {
642 ds_dbg(s, 1, "trying to allocate resource 2\n"); 598 dev_dbg(&s->dev, "trying to allocate resource 2\n");
643 if (alloc_io_space(s, req->Attributes2, &req->BasePort2, 599 if (alloc_io_space(s, req->Attributes2, &req->BasePort2,
644 req->NumPorts2, req->IOAddrLines)) { 600 req->NumPorts2, req->IOAddrLines)) {
645 ds_dbg(s, 0, "allocation of resource 2 failed\n"); 601 dev_dbg(&s->dev, "allocation of resource 2 failed\n");
646 release_io_space(s, req->BasePort1, req->NumPorts1); 602 release_io_space(s, req->BasePort1, req->NumPorts1);
647 return -EBUSY; 603 return -EBUSY;
648 } 604 }
@@ -680,13 +636,17 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
680 int ret = -EINVAL, irq = 0; 636 int ret = -EINVAL, irq = 0;
681 int type; 637 int type;
682 638
683 if (!(s->state & SOCKET_PRESENT)) 639 if (!(s->state & SOCKET_PRESENT)) {
640 dev_dbg(&s->dev, "No card present\n");
684 return -ENODEV; 641 return -ENODEV;
642 }
685 c = p_dev->function_config; 643 c = p_dev->function_config;
686 if (c->state & CONFIG_LOCKED) 644 if (c->state & CONFIG_LOCKED) {
645 dev_dbg(&s->dev, "Configuration is locked\n");
687 return -EACCES; 646 return -EACCES;
647 }
688 if (c->state & CONFIG_IRQ_REQ) { 648 if (c->state & CONFIG_IRQ_REQ) {
689 ds_dbg(s, 0, "IRQ already configured\n"); 649 dev_dbg(&s->dev, "IRQ already configured\n");
690 return -EBUSY; 650 return -EBUSY;
691 } 651 }
692 652
@@ -704,7 +664,7 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
704 /* if the underlying IRQ infrastructure allows for it, only allocate 664 /* if the underlying IRQ infrastructure allows for it, only allocate
705 * the IRQ, but do not enable it 665 * the IRQ, but do not enable it
706 */ 666 */
707 if (!(req->Attributes & IRQ_HANDLE_PRESENT)) 667 if (!(req->Handler))
708 type |= IRQ_NOAUTOEN; 668 type |= IRQ_NOAUTOEN;
709#endif /* IRQ_NOAUTOEN */ 669#endif /* IRQ_NOAUTOEN */
710 670
@@ -714,7 +674,7 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
714 } else { 674 } else {
715 int try; 675 int try;
716 u32 mask = s->irq_mask; 676 u32 mask = s->irq_mask;
717 void *data = &p_dev->dev.driver; /* something unique to this device */ 677 void *data = p_dev; /* something unique to this device */
718 678
719 for (try = 0; try < 64; try++) { 679 for (try = 0; try < 64; try++) {
720 irq = try % 32; 680 irq = try % 32;
@@ -731,12 +691,12 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
731 * registering a dummy handle works, i.e. if the IRQ isn't 691 * registering a dummy handle works, i.e. if the IRQ isn't
732 * marked as used by the kernel resource management core */ 692 * marked as used by the kernel resource management core */
733 ret = request_irq(irq, 693 ret = request_irq(irq,
734 (req->Attributes & IRQ_HANDLE_PRESENT) ? req->Handler : test_action, 694 (req->Handler) ? req->Handler : test_action,
735 type, 695 type,
736 p_dev->devname, 696 p_dev->devname,
737 (req->Attributes & IRQ_HANDLE_PRESENT) ? req->Instance : data); 697 (req->Handler) ? p_dev->priv : data);
738 if (!ret) { 698 if (!ret) {
739 if (!(req->Attributes & IRQ_HANDLE_PRESENT)) 699 if (!req->Handler)
740 free_irq(irq, data); 700 free_irq(irq, data);
741 break; 701 break;
742 } 702 }
@@ -745,17 +705,22 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req)
745#endif 705#endif
746 /* only assign PCI irq if no IRQ already assigned */ 706 /* only assign PCI irq if no IRQ already assigned */
747 if (ret && !s->irq.AssignedIRQ) { 707 if (ret && !s->irq.AssignedIRQ) {
748 if (!s->pci_irq) 708 if (!s->pci_irq) {
709 dev_printk(KERN_INFO, &s->dev, "no IRQ found\n");
749 return ret; 710 return ret;
711 }
750 type = IRQF_SHARED; 712 type = IRQF_SHARED;
751 irq = s->pci_irq; 713 irq = s->pci_irq;
752 } 714 }
753 715
754 if (ret && (req->Attributes & IRQ_HANDLE_PRESENT)) { 716 if (ret && req->Handler) {
755 ret = request_irq(irq, req->Handler, type, 717 ret = request_irq(irq, req->Handler, type,
756 p_dev->devname, req->Instance); 718 p_dev->devname, p_dev->priv);
757 if (ret) 719 if (ret) {
720 dev_printk(KERN_INFO, &s->dev,
721 "request_irq() failed\n");
758 return ret; 722 return ret;
723 }
759 } 724 }
760 725
761 /* Make sure the fact the request type was overridden is passed back */ 726 /* Make sure the fact the request type was overridden is passed back */
@@ -787,17 +752,19 @@ EXPORT_SYMBOL(pcmcia_request_irq);
787 * Request_window() establishes a mapping between card memory space 752 * Request_window() establishes a mapping between card memory space
788 * and system memory space. 753 * and system memory space.
789 */ 754 */
790int pcmcia_request_window(struct pcmcia_device **p_dev, win_req_t *req, window_handle_t *wh) 755int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_handle_t *wh)
791{ 756{
792 struct pcmcia_socket *s = (*p_dev)->socket; 757 struct pcmcia_socket *s = p_dev->socket;
793 window_t *win; 758 pccard_mem_map *win;
794 u_long align; 759 u_long align;
795 int w; 760 int w;
796 761
797 if (!(s->state & SOCKET_PRESENT)) 762 if (!(s->state & SOCKET_PRESENT)) {
763 dev_dbg(&s->dev, "No card present\n");
798 return -ENODEV; 764 return -ENODEV;
765 }
799 if (req->Attributes & (WIN_PAGED | WIN_SHARED)) { 766 if (req->Attributes & (WIN_PAGED | WIN_SHARED)) {
800 ds_dbg(s, 0, "bad attribute setting for iomem region\n"); 767 dev_dbg(&s->dev, "bad attribute setting for iomem region\n");
801 return -EINVAL; 768 return -EINVAL;
802 } 769 }
803 770
@@ -808,12 +775,12 @@ int pcmcia_request_window(struct pcmcia_device **p_dev, win_req_t *req, window_h
808 (req->Attributes & WIN_STRICT_ALIGN)) ? 775 (req->Attributes & WIN_STRICT_ALIGN)) ?
809 req->Size : s->map_size); 776 req->Size : s->map_size);
810 if (req->Size & (s->map_size-1)) { 777 if (req->Size & (s->map_size-1)) {
811 ds_dbg(s, 0, "invalid map size\n"); 778 dev_dbg(&s->dev, "invalid map size\n");
812 return -EINVAL; 779 return -EINVAL;
813 } 780 }
814 if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) || 781 if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) ||
815 (req->Base & (align-1))) { 782 (req->Base & (align-1))) {
816 ds_dbg(s, 0, "invalid base address\n"); 783 dev_dbg(&s->dev, "invalid base address\n");
817 return -EINVAL; 784 return -EINVAL;
818 } 785 }
819 if (req->Base) 786 if (req->Base)
@@ -823,52 +790,48 @@ int pcmcia_request_window(struct pcmcia_device **p_dev, win_req_t *req, window_h
823 for (w = 0; w < MAX_WIN; w++) 790 for (w = 0; w < MAX_WIN; w++)
824 if (!(s->state & SOCKET_WIN_REQ(w))) break; 791 if (!(s->state & SOCKET_WIN_REQ(w))) break;
825 if (w == MAX_WIN) { 792 if (w == MAX_WIN) {
826 ds_dbg(s, 0, "all windows are used already\n"); 793 dev_dbg(&s->dev, "all windows are used already\n");
827 return -EINVAL; 794 return -EINVAL;
828 } 795 }
829 796
830 win = &s->win[w]; 797 win = &s->win[w];
831 win->magic = WINDOW_MAGIC;
832 win->index = w;
833 win->handle = *p_dev;
834 win->sock = s;
835 798
836 if (!(s->features & SS_CAP_STATIC_MAP)) { 799 if (!(s->features & SS_CAP_STATIC_MAP)) {
837 win->ctl.res = pcmcia_find_mem_region(req->Base, req->Size, align, 800 win->res = pcmcia_find_mem_region(req->Base, req->Size, align,
838 (req->Attributes & WIN_MAP_BELOW_1MB), s); 801 (req->Attributes & WIN_MAP_BELOW_1MB), s);
839 if (!win->ctl.res) { 802 if (!win->res) {
840 ds_dbg(s, 0, "allocating mem region failed\n"); 803 dev_dbg(&s->dev, "allocating mem region failed\n");
841 return -EINVAL; 804 return -EINVAL;
842 } 805 }
843 } 806 }
844 (*p_dev)->_win |= CLIENT_WIN_REQ(w); 807 p_dev->_win |= CLIENT_WIN_REQ(w);
845 808
846 /* Configure the socket controller */ 809 /* Configure the socket controller */
847 win->ctl.map = w+1; 810 win->map = w+1;
848 win->ctl.flags = 0; 811 win->flags = 0;
849 win->ctl.speed = req->AccessSpeed; 812 win->speed = req->AccessSpeed;
850 if (req->Attributes & WIN_MEMORY_TYPE) 813 if (req->Attributes & WIN_MEMORY_TYPE)
851 win->ctl.flags |= MAP_ATTRIB; 814 win->flags |= MAP_ATTRIB;
852 if (req->Attributes & WIN_ENABLE) 815 if (req->Attributes & WIN_ENABLE)
853 win->ctl.flags |= MAP_ACTIVE; 816 win->flags |= MAP_ACTIVE;
854 if (req->Attributes & WIN_DATA_WIDTH_16) 817 if (req->Attributes & WIN_DATA_WIDTH_16)
855 win->ctl.flags |= MAP_16BIT; 818 win->flags |= MAP_16BIT;
856 if (req->Attributes & WIN_USE_WAIT) 819 if (req->Attributes & WIN_USE_WAIT)
857 win->ctl.flags |= MAP_USE_WAIT; 820 win->flags |= MAP_USE_WAIT;
858 win->ctl.card_start = 0; 821 win->card_start = 0;
859 if (s->ops->set_mem_map(s, &win->ctl) != 0) { 822 if (s->ops->set_mem_map(s, win) != 0) {
860 ds_dbg(s, 0, "failed to set memory mapping\n"); 823 dev_dbg(&s->dev, "failed to set memory mapping\n");
861 return -EIO; 824 return -EIO;
862 } 825 }
863 s->state |= SOCKET_WIN_REQ(w); 826 s->state |= SOCKET_WIN_REQ(w);
864 827
865 /* Return window handle */ 828 /* Return window handle */
866 if (s->features & SS_CAP_STATIC_MAP) { 829 if (s->features & SS_CAP_STATIC_MAP) {
867 req->Base = win->ctl.static_start; 830 req->Base = win->static_start;
868 } else { 831 } else {
869 req->Base = win->ctl.res->start; 832 req->Base = win->res->start;
870 } 833 }
871 *wh = win; 834 *wh = w + 1;
872 835
873 return 0; 836 return 0;
874} /* pcmcia_request_window */ 837} /* pcmcia_request_window */
@@ -879,19 +842,46 @@ void pcmcia_disable_device(struct pcmcia_device *p_dev) {
879 pcmcia_release_io(p_dev, &p_dev->io); 842 pcmcia_release_io(p_dev, &p_dev->io);
880 pcmcia_release_irq(p_dev, &p_dev->irq); 843 pcmcia_release_irq(p_dev, &p_dev->irq);
881 if (p_dev->win) 844 if (p_dev->win)
882 pcmcia_release_window(p_dev->win); 845 pcmcia_release_window(p_dev, p_dev->win);
883} 846}
884EXPORT_SYMBOL(pcmcia_disable_device); 847EXPORT_SYMBOL(pcmcia_disable_device);
885 848
886 849
887struct pcmcia_cfg_mem { 850struct pcmcia_cfg_mem {
888 tuple_t tuple; 851 struct pcmcia_device *p_dev;
852 void *priv_data;
853 int (*conf_check) (struct pcmcia_device *p_dev,
854 cistpl_cftable_entry_t *cfg,
855 cistpl_cftable_entry_t *dflt,
856 unsigned int vcc,
857 void *priv_data);
889 cisparse_t parse; 858 cisparse_t parse;
890 u8 buf[256];
891 cistpl_cftable_entry_t dflt; 859 cistpl_cftable_entry_t dflt;
892}; 860};
893 861
894/** 862/**
863 * pcmcia_do_loop_config() - internal helper for pcmcia_loop_config()
864 *
865 * pcmcia_do_loop_config() is the internal callback for the call from
866 * pcmcia_loop_config() to pccard_loop_tuple(). Data is transferred
867 * by a struct pcmcia_cfg_mem.
868 */
869static int pcmcia_do_loop_config(tuple_t *tuple, cisparse_t *parse, void *priv)
870{
871 cistpl_cftable_entry_t *cfg = &parse->cftable_entry;
872 struct pcmcia_cfg_mem *cfg_mem = priv;
873
874 /* default values */
875 cfg_mem->p_dev->conf.ConfigIndex = cfg->index;
876 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
877 cfg_mem->dflt = *cfg;
878
879 return cfg_mem->conf_check(cfg_mem->p_dev, cfg, &cfg_mem->dflt,
880 cfg_mem->p_dev->socket->socket.Vcc,
881 cfg_mem->priv_data);
882}
883
884/**
895 * pcmcia_loop_config() - loop over configuration options 885 * pcmcia_loop_config() - loop over configuration options
896 * @p_dev: the struct pcmcia_device which we need to loop for. 886 * @p_dev: the struct pcmcia_device which we need to loop for.
897 * @conf_check: function to call for each configuration option. 887 * @conf_check: function to call for each configuration option.
@@ -913,48 +903,174 @@ int pcmcia_loop_config(struct pcmcia_device *p_dev,
913 void *priv_data) 903 void *priv_data)
914{ 904{
915 struct pcmcia_cfg_mem *cfg_mem; 905 struct pcmcia_cfg_mem *cfg_mem;
916
917 tuple_t *tuple;
918 int ret; 906 int ret;
919 unsigned int vcc;
920 907
921 cfg_mem = kzalloc(sizeof(struct pcmcia_cfg_mem), GFP_KERNEL); 908 cfg_mem = kzalloc(sizeof(struct pcmcia_cfg_mem), GFP_KERNEL);
922 if (cfg_mem == NULL) 909 if (cfg_mem == NULL)
923 return -ENOMEM; 910 return -ENOMEM;
924 911
925 /* get the current Vcc setting */ 912 cfg_mem->p_dev = p_dev;
926 vcc = p_dev->socket->socket.Vcc; 913 cfg_mem->conf_check = conf_check;
914 cfg_mem->priv_data = priv_data;
927 915
928 tuple = &cfg_mem->tuple; 916 ret = pccard_loop_tuple(p_dev->socket, p_dev->func,
929 tuple->TupleData = cfg_mem->buf; 917 CISTPL_CFTABLE_ENTRY, &cfg_mem->parse,
930 tuple->TupleDataMax = 255; 918 cfg_mem, pcmcia_do_loop_config);
931 tuple->TupleOffset = 0;
932 tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY;
933 tuple->Attributes = 0;
934 919
935 ret = pcmcia_get_first_tuple(p_dev, tuple); 920 kfree(cfg_mem);
936 while (!ret) { 921 return ret;
937 cistpl_cftable_entry_t *cfg = &cfg_mem->parse.cftable_entry; 922}
923EXPORT_SYMBOL(pcmcia_loop_config);
924
925
926struct pcmcia_loop_mem {
927 struct pcmcia_device *p_dev;
928 void *priv_data;
929 int (*loop_tuple) (struct pcmcia_device *p_dev,
930 tuple_t *tuple,
931 void *priv_data);
932};
933
934/**
935 * pcmcia_do_loop_tuple() - internal helper for pcmcia_loop_config()
936 *
937 * pcmcia_do_loop_tuple() is the internal callback for the call from
938 * pcmcia_loop_tuple() to pccard_loop_tuple(). Data is transferred
939 * by a struct pcmcia_cfg_mem.
940 */
941static int pcmcia_do_loop_tuple(tuple_t *tuple, cisparse_t *parse, void *priv)
942{
943 struct pcmcia_loop_mem *loop = priv;
944
945 return loop->loop_tuple(loop->p_dev, tuple, loop->priv_data);
946};
947
948/**
949 * pcmcia_loop_tuple() - loop over tuples in the CIS
950 * @p_dev: the struct pcmcia_device which we need to loop for.
951 * @code: which CIS code shall we look for?
952 * @priv_data: private data to be passed to the loop_tuple function.
953 * @loop_tuple: function to call for each CIS entry of type @function. IT
954 * gets passed the raw tuple and @priv_data.
955 *
956 * pcmcia_loop_tuple() loops over all CIS entries of type @function, and
957 * calls the @loop_tuple function for each entry. If the call to @loop_tuple
958 * returns 0, the loop exits. Returns 0 on success or errorcode otherwise.
959 */
960int pcmcia_loop_tuple(struct pcmcia_device *p_dev, cisdata_t code,
961 int (*loop_tuple) (struct pcmcia_device *p_dev,
962 tuple_t *tuple,
963 void *priv_data),
964 void *priv_data)
965{
966 struct pcmcia_loop_mem loop = {
967 .p_dev = p_dev,
968 .loop_tuple = loop_tuple,
969 .priv_data = priv_data};
938 970
939 if (pcmcia_get_tuple_data(p_dev, tuple)) 971 return pccard_loop_tuple(p_dev->socket, p_dev->func, code, NULL,
940 goto next_entry; 972 &loop, pcmcia_do_loop_tuple);
973};
974EXPORT_SYMBOL(pcmcia_loop_tuple);
941 975
942 if (pcmcia_parse_tuple(tuple, &cfg_mem->parse))
943 goto next_entry;
944 976
945 /* default values */ 977struct pcmcia_loop_get {
946 p_dev->conf.ConfigIndex = cfg->index; 978 size_t len;
947 if (cfg->flags & CISTPL_CFTABLE_DEFAULT) 979 cisdata_t **buf;
948 cfg_mem->dflt = *cfg; 980};
949 981
950 ret = conf_check(p_dev, cfg, &cfg_mem->dflt, vcc, priv_data); 982/**
951 if (!ret) 983 * pcmcia_do_get_tuple() - internal helper for pcmcia_get_tuple()
952 break; 984 *
985 * pcmcia_do_get_tuple() is the internal callback for the call from
986 * pcmcia_get_tuple() to pcmcia_loop_tuple(). As we're only interested in
987 * the first tuple, return 0 unconditionally. Create a memory buffer large
988 * enough to hold the content of the tuple, and fill it with the tuple data.
989 * The caller is responsible to free the buffer.
990 */
991static int pcmcia_do_get_tuple(struct pcmcia_device *p_dev, tuple_t *tuple,
992 void *priv)
993{
994 struct pcmcia_loop_get *get = priv;
995
996 *get->buf = kzalloc(tuple->TupleDataLen, GFP_KERNEL);
997 if (*get->buf) {
998 get->len = tuple->TupleDataLen;
999 memcpy(*get->buf, tuple->TupleData, tuple->TupleDataLen);
1000 } else
1001 dev_dbg(&p_dev->dev, "do_get_tuple: out of memory\n");
1002 return 0;
1003};
953 1004
954next_entry: 1005/**
955 ret = pcmcia_get_next_tuple(p_dev, tuple); 1006 * pcmcia_get_tuple() - get first tuple from CIS
1007 * @p_dev: the struct pcmcia_device which we need to loop for.
1008 * @code: which CIS code shall we look for?
1009 * @buf: pointer to store the buffer to.
1010 *
1011 * pcmcia_get_tuple() gets the content of the first CIS entry of type @code.
1012 * It returns the buffer length (or zero). The caller is responsible to free
1013 * the buffer passed in @buf.
1014 */
1015size_t pcmcia_get_tuple(struct pcmcia_device *p_dev, cisdata_t code,
1016 unsigned char **buf)
1017{
1018 struct pcmcia_loop_get get = {
1019 .len = 0,
1020 .buf = buf,
1021 };
1022
1023 *get.buf = NULL;
1024 pcmcia_loop_tuple(p_dev, code, pcmcia_do_get_tuple, &get);
1025
1026 return get.len;
1027};
1028EXPORT_SYMBOL(pcmcia_get_tuple);
1029
1030
1031/**
1032 * pcmcia_do_get_mac() - internal helper for pcmcia_get_mac_from_cis()
1033 *
1034 * pcmcia_do_get_mac() is the internal callback for the call from
1035 * pcmcia_get_mac_from_cis() to pcmcia_loop_tuple(). We check whether the
1036 * tuple contains a proper LAN_NODE_ID of length 6, and copy the data
1037 * to struct net_device->dev_addr[i].
1038 */
1039static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple,
1040 void *priv)
1041{
1042 struct net_device *dev = priv;
1043 int i;
1044
1045 if (tuple->TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID)
1046 return -EINVAL;
1047 if (tuple->TupleDataLen < ETH_ALEN + 2) {
1048 dev_warn(&p_dev->dev, "Invalid CIS tuple length for "
1049 "LAN_NODE_ID\n");
1050 return -EINVAL;
956 } 1051 }
957 1052
958 return ret; 1053 if (tuple->TupleData[1] != ETH_ALEN) {
959} 1054 dev_warn(&p_dev->dev, "Invalid header for LAN_NODE_ID\n");
960EXPORT_SYMBOL(pcmcia_loop_config); 1055 return -EINVAL;
1056 }
1057 for (i = 0; i < 6; i++)
1058 dev->dev_addr[i] = tuple->TupleData[i+2];
1059 return 0;
1060};
1061
1062/**
1063 * pcmcia_get_mac_from_cis() - read out MAC address from CISTPL_FUNCE
1064 * @p_dev: the struct pcmcia_device for which we want the address.
1065 * @dev: a properly prepared struct net_device to store the info to.
1066 *
1067 * pcmcia_get_mac_from_cis() reads out the hardware MAC address from
1068 * CISTPL_FUNCE and stores it into struct net_device *dev->dev_addr which
1069 * must be set up properly by the driver (see examples!).
1070 */
1071int pcmcia_get_mac_from_cis(struct pcmcia_device *p_dev, struct net_device *dev)
1072{
1073 return pcmcia_loop_tuple(p_dev, CISTPL_FUNCE, pcmcia_do_get_mac, dev);
1074};
1075EXPORT_SYMBOL(pcmcia_get_mac_from_cis);
1076
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index 70a33468bcd0..e1741cd875aa 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -213,7 +213,8 @@ static irqreturn_t pd6729_interrupt(int irq, void *dev)
213 213
214 if (csc & I365_CSC_DETECT) { 214 if (csc & I365_CSC_DETECT) {
215 events |= SS_DETECT; 215 events |= SS_DETECT;
216 dprintk("Card detected in socket %i!\n", i); 216 dev_vdbg(&socket[i].socket.dev,
217 "Card detected in socket %i!\n", i);
217 } 218 }
218 219
219 if (indirect_read(&socket[i], I365_INTCTL) 220 if (indirect_read(&socket[i], I365_INTCTL)
@@ -331,11 +332,11 @@ static int pd6729_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
331 reg = I365_PWR_NORESET; /* default: disable resetdrv on resume */ 332 reg = I365_PWR_NORESET; /* default: disable resetdrv on resume */
332 333
333 if (state->flags & SS_PWR_AUTO) { 334 if (state->flags & SS_PWR_AUTO) {
334 dprintk("Auto power\n"); 335 dev_dbg(&sock->dev, "Auto power\n");
335 reg |= I365_PWR_AUTO; /* automatic power mngmnt */ 336 reg |= I365_PWR_AUTO; /* automatic power mngmnt */
336 } 337 }
337 if (state->flags & SS_OUTPUT_ENA) { 338 if (state->flags & SS_OUTPUT_ENA) {
338 dprintk("Power Enabled\n"); 339 dev_dbg(&sock->dev, "Power Enabled\n");
339 reg |= I365_PWR_OUT; /* enable power */ 340 reg |= I365_PWR_OUT; /* enable power */
340 } 341 }
341 342
@@ -343,40 +344,44 @@ static int pd6729_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
343 case 0: 344 case 0:
344 break; 345 break;
345 case 33: 346 case 33:
346 dprintk("setting voltage to Vcc to 3.3V on socket %i\n", 347 dev_dbg(&sock->dev,
348 "setting voltage to Vcc to 3.3V on socket %i\n",
347 socket->number); 349 socket->number);
348 reg |= I365_VCC_5V; 350 reg |= I365_VCC_5V;
349 indirect_setbit(socket, PD67_MISC_CTL_1, PD67_MC1_VCC_3V); 351 indirect_setbit(socket, PD67_MISC_CTL_1, PD67_MC1_VCC_3V);
350 break; 352 break;
351 case 50: 353 case 50:
352 dprintk("setting voltage to Vcc to 5V on socket %i\n", 354 dev_dbg(&sock->dev,
355 "setting voltage to Vcc to 5V on socket %i\n",
353 socket->number); 356 socket->number);
354 reg |= I365_VCC_5V; 357 reg |= I365_VCC_5V;
355 indirect_resetbit(socket, PD67_MISC_CTL_1, PD67_MC1_VCC_3V); 358 indirect_resetbit(socket, PD67_MISC_CTL_1, PD67_MC1_VCC_3V);
356 break; 359 break;
357 default: 360 default:
358 dprintk("pd6729: pd6729_set_socket called with " 361 dev_dbg(&sock->dev,
359 "invalid VCC power value: %i\n", 362 "pd6729_set_socket called with invalid VCC power "
360 state->Vcc); 363 "value: %i\n", state->Vcc);
361 return -EINVAL; 364 return -EINVAL;
362 } 365 }
363 366
364 switch (state->Vpp) { 367 switch (state->Vpp) {
365 case 0: 368 case 0:
366 dprintk("not setting Vpp on socket %i\n", socket->number); 369 dev_dbg(&sock->dev, "not setting Vpp on socket %i\n",
370 socket->number);
367 break; 371 break;
368 case 33: 372 case 33:
369 case 50: 373 case 50:
370 dprintk("setting Vpp to Vcc for socket %i\n", socket->number); 374 dev_dbg(&sock->dev, "setting Vpp to Vcc for socket %i\n",
375 socket->number);
371 reg |= I365_VPP1_5V; 376 reg |= I365_VPP1_5V;
372 break; 377 break;
373 case 120: 378 case 120:
374 dprintk("setting Vpp to 12.0\n"); 379 dev_dbg(&sock->dev, "setting Vpp to 12.0\n");
375 reg |= I365_VPP1_12V; 380 reg |= I365_VPP1_12V;
376 break; 381 break;
377 default: 382 default:
378 dprintk("pd6729: pd6729_set_socket called with invalid VPP power value: %i\n", 383 dev_dbg(&sock->dev, "pd6729: pd6729_set_socket called with "
379 state->Vpp); 384 "invalid VPP power value: %i\n", state->Vpp);
380 return -EINVAL; 385 return -EINVAL;
381 } 386 }
382 387
@@ -438,7 +443,7 @@ static int pd6729_set_io_map(struct pcmcia_socket *sock,
438 443
439 /* Check error conditions */ 444 /* Check error conditions */
440 if (map > 1) { 445 if (map > 1) {
441 dprintk("pd6729_set_io_map with invalid map"); 446 dev_dbg(&sock->dev, "pd6729_set_io_map with invalid map\n");
442 return -EINVAL; 447 return -EINVAL;
443 } 448 }
444 449
@@ -446,7 +451,7 @@ static int pd6729_set_io_map(struct pcmcia_socket *sock,
446 if (indirect_read(socket, I365_ADDRWIN) & I365_ENA_IO(map)) 451 if (indirect_read(socket, I365_ADDRWIN) & I365_ENA_IO(map))
447 indirect_resetbit(socket, I365_ADDRWIN, I365_ENA_IO(map)); 452 indirect_resetbit(socket, I365_ADDRWIN, I365_ENA_IO(map));
448 453
449 /* dprintk("set_io_map: Setting range to %x - %x\n", 454 /* dev_dbg(&sock->dev, "set_io_map: Setting range to %x - %x\n",
450 io->start, io->stop);*/ 455 io->start, io->stop);*/
451 456
452 /* write the new values */ 457 /* write the new values */
@@ -478,12 +483,12 @@ static int pd6729_set_mem_map(struct pcmcia_socket *sock,
478 483
479 map = mem->map; 484 map = mem->map;
480 if (map > 4) { 485 if (map > 4) {
481 printk("pd6729_set_mem_map: invalid map"); 486 dev_warn(&sock->dev, "invalid map requested\n");
482 return -EINVAL; 487 return -EINVAL;
483 } 488 }
484 489
485 if ((mem->res->start > mem->res->end) || (mem->speed > 1000)) { 490 if ((mem->res->start > mem->res->end) || (mem->speed > 1000)) {
486 printk("pd6729_set_mem_map: invalid address / speed"); 491 dev_warn(&sock->dev, "invalid invalid address / speed\n");
487 return -EINVAL; 492 return -EINVAL;
488 } 493 }
489 494
@@ -529,12 +534,12 @@ static int pd6729_set_mem_map(struct pcmcia_socket *sock,
529 if (mem->flags & MAP_WRPROT) 534 if (mem->flags & MAP_WRPROT)
530 i |= I365_MEM_WRPROT; 535 i |= I365_MEM_WRPROT;
531 if (mem->flags & MAP_ATTRIB) { 536 if (mem->flags & MAP_ATTRIB) {
532 /* dprintk("requesting attribute memory for socket %i\n", 537 /* dev_dbg(&sock->dev, "requesting attribute memory for "
533 socket->number);*/ 538 "socket %i\n", socket->number);*/
534 i |= I365_MEM_REG; 539 i |= I365_MEM_REG;
535 } else { 540 } else {
536 /* dprintk("requesting normal memory for socket %i\n", 541 /* dev_dbg(&sock->dev, "requesting normal memory for "
537 socket->number);*/ 542 "socket %i\n", socket->number);*/
538 } 543 }
539 indirect_write16(socket, base + I365_W_OFF, i); 544 indirect_write16(socket, base + I365_W_OFF, i);
540 545
@@ -577,7 +582,7 @@ static struct pccard_operations pd6729_operations = {
577 582
578static irqreturn_t pd6729_test(int irq, void *dev) 583static irqreturn_t pd6729_test(int irq, void *dev)
579{ 584{
580 dprintk("-> hit on irq %d\n", irq); 585 pr_devel("-> hit on irq %d\n", irq);
581 return IRQ_HANDLED; 586 return IRQ_HANDLED;
582} 587}
583 588
@@ -642,13 +647,13 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
642 goto err_out_free_mem; 647 goto err_out_free_mem;
643 648
644 if (!pci_resource_start(dev, 0)) { 649 if (!pci_resource_start(dev, 0)) {
645 printk(KERN_INFO "pd6729: refusing to load the driver " 650 dev_warn(&dev->dev, "refusing to load the driver as the "
646 "as the io_base is 0.\n"); 651 "io_base is NULL.\n");
647 goto err_out_free_mem; 652 goto err_out_free_mem;
648 } 653 }
649 654
650 printk(KERN_INFO "pd6729: Cirrus PD6729 PCI to PCMCIA Bridge " 655 dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx "
651 "at 0x%llx on irq %d\n", 656 "on irq %d\n",
652 (unsigned long long)pci_resource_start(dev, 0), dev->irq); 657 (unsigned long long)pci_resource_start(dev, 0), dev->irq);
653 /* 658 /*
654 * Since we have no memory BARs some firmware may not 659 * Since we have no memory BARs some firmware may not
@@ -656,14 +661,14 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
656 */ 661 */
657 pci_read_config_byte(dev, PCI_COMMAND, &configbyte); 662 pci_read_config_byte(dev, PCI_COMMAND, &configbyte);
658 if (!(configbyte & PCI_COMMAND_MEMORY)) { 663 if (!(configbyte & PCI_COMMAND_MEMORY)) {
659 printk(KERN_DEBUG "pd6729: Enabling PCI_COMMAND_MEMORY.\n"); 664 dev_dbg(&dev->dev, "pd6729: Enabling PCI_COMMAND_MEMORY.\n");
660 configbyte |= PCI_COMMAND_MEMORY; 665 configbyte |= PCI_COMMAND_MEMORY;
661 pci_write_config_byte(dev, PCI_COMMAND, configbyte); 666 pci_write_config_byte(dev, PCI_COMMAND, configbyte);
662 } 667 }
663 668
664 ret = pci_request_regions(dev, "pd6729"); 669 ret = pci_request_regions(dev, "pd6729");
665 if (ret) { 670 if (ret) {
666 printk(KERN_INFO "pd6729: pci request region failed.\n"); 671 dev_warn(&dev->dev, "pci request region failed.\n");
667 goto err_out_disable; 672 goto err_out_disable;
668 } 673 }
669 674
@@ -672,7 +677,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
672 677
673 mask = pd6729_isa_scan(); 678 mask = pd6729_isa_scan();
674 if (irq_mode == 0 && mask == 0) { 679 if (irq_mode == 0 && mask == 0) {
675 printk(KERN_INFO "pd6729: no ISA interrupt is available.\n"); 680 dev_warn(&dev->dev, "no ISA interrupt is available.\n");
676 goto err_out_free_res; 681 goto err_out_free_res;
677 } 682 }
678 683
@@ -697,8 +702,8 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
697 /* Register the interrupt handler */ 702 /* Register the interrupt handler */
698 if ((ret = request_irq(dev->irq, pd6729_interrupt, IRQF_SHARED, 703 if ((ret = request_irq(dev->irq, pd6729_interrupt, IRQF_SHARED,
699 "pd6729", socket))) { 704 "pd6729", socket))) {
700 printk(KERN_ERR "pd6729: Failed to register irq %d, " 705 dev_err(&dev->dev, "Failed to register irq %d\n",
701 "aborting\n", dev->irq); 706 dev->irq);
702 goto err_out_free_res; 707 goto err_out_free_res;
703 } 708 }
704 } else { 709 } else {
@@ -713,8 +718,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
713 for (i = 0; i < MAX_SOCKETS; i++) { 718 for (i = 0; i < MAX_SOCKETS; i++) {
714 ret = pcmcia_register_socket(&socket[i].socket); 719 ret = pcmcia_register_socket(&socket[i].socket);
715 if (ret) { 720 if (ret) {
716 printk(KERN_INFO "pd6729: pcmcia_register_socket " 721 dev_warn(&dev->dev, "pcmcia_register_socket failed.\n");
717 "failed.\n");
718 for (j = 0; j < i ; j++) 722 for (j = 0; j < i ; j++)
719 pcmcia_unregister_socket(&socket[j].socket); 723 pcmcia_unregister_socket(&socket[j].socket);
720 goto err_out_free_res2; 724 goto err_out_free_res2;
diff --git a/drivers/pcmcia/pd6729.h b/drivers/pcmcia/pd6729.h
index f392e458cdfd..41418d394c55 100644
--- a/drivers/pcmcia/pd6729.h
+++ b/drivers/pcmcia/pd6729.h
@@ -1,13 +1,6 @@
1#ifndef _INCLUDE_GUARD_PD6729_H_ 1#ifndef _INCLUDE_GUARD_PD6729_H_
2#define _INCLUDE_GUARD_PD6729_H_ 2#define _INCLUDE_GUARD_PD6729_H_
3 3
4/* Debuging defines */
5#ifdef NOTRACE
6#define dprintk(fmt, args...) printk(fmt , ## args)
7#else
8#define dprintk(fmt, args...) do {} while (0)
9#endif
10
11/* Flags for I365_GENCTL */ 4/* Flags for I365_GENCTL */
12#define I365_DF_VS1 0x40 /* DF-step Voltage Sense */ 5#define I365_DF_VS1 0x40 /* DF-step Voltage Sense */
13#define I365_DF_VS2 0x80 6#define I365_DF_VS2 0x80
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 0e35acb1366b..84dde7768ad5 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -228,9 +228,43 @@ static const char *skt_names[] = {
228#define SKT_DEV_INFO_SIZE(n) \ 228#define SKT_DEV_INFO_SIZE(n) \
229 (sizeof(struct skt_dev_info) + (n)*sizeof(struct soc_pcmcia_socket)) 229 (sizeof(struct skt_dev_info) + (n)*sizeof(struct soc_pcmcia_socket))
230 230
231int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt)
232{
233 skt->res_skt.start = _PCMCIA(skt->nr);
234 skt->res_skt.end = _PCMCIA(skt->nr) + PCMCIASp - 1;
235 skt->res_skt.name = skt_names[skt->nr];
236 skt->res_skt.flags = IORESOURCE_MEM;
237
238 skt->res_io.start = _PCMCIAIO(skt->nr);
239 skt->res_io.end = _PCMCIAIO(skt->nr) + PCMCIAIOSp - 1;
240 skt->res_io.name = "io";
241 skt->res_io.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
242
243 skt->res_mem.start = _PCMCIAMem(skt->nr);
244 skt->res_mem.end = _PCMCIAMem(skt->nr) + PCMCIAMemSp - 1;
245 skt->res_mem.name = "memory";
246 skt->res_mem.flags = IORESOURCE_MEM;
247
248 skt->res_attr.start = _PCMCIAAttr(skt->nr);
249 skt->res_attr.end = _PCMCIAAttr(skt->nr) + PCMCIAAttrSp - 1;
250 skt->res_attr.name = "attribute";
251 skt->res_attr.flags = IORESOURCE_MEM;
252
253 return soc_pcmcia_add_one(skt);
254}
255
256void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops)
257{
258 /* Provide our PXA2xx specific timing routines. */
259 ops->set_timing = pxa2xx_pcmcia_set_timing;
260#ifdef CONFIG_CPU_FREQ
261 ops->frequency_change = pxa2xx_pcmcia_frequency_change;
262#endif
263}
264
231int __pxa2xx_drv_pcmcia_probe(struct device *dev) 265int __pxa2xx_drv_pcmcia_probe(struct device *dev)
232{ 266{
233 int i, ret; 267 int i, ret = 0;
234 struct pcmcia_low_level *ops; 268 struct pcmcia_low_level *ops;
235 struct skt_dev_info *sinfo; 269 struct skt_dev_info *sinfo;
236 struct soc_pcmcia_socket *skt; 270 struct soc_pcmcia_socket *skt;
@@ -240,6 +274,8 @@ int __pxa2xx_drv_pcmcia_probe(struct device *dev)
240 274
241 ops = (struct pcmcia_low_level *)dev->platform_data; 275 ops = (struct pcmcia_low_level *)dev->platform_data;
242 276
277 pxa2xx_drv_pcmcia_ops(ops);
278
243 sinfo = kzalloc(SKT_DEV_INFO_SIZE(ops->nr), GFP_KERNEL); 279 sinfo = kzalloc(SKT_DEV_INFO_SIZE(ops->nr), GFP_KERNEL);
244 if (!sinfo) 280 if (!sinfo)
245 return -ENOMEM; 281 return -ENOMEM;
@@ -250,40 +286,25 @@ int __pxa2xx_drv_pcmcia_probe(struct device *dev)
250 for (i = 0; i < ops->nr; i++) { 286 for (i = 0; i < ops->nr; i++) {
251 skt = &sinfo->skt[i]; 287 skt = &sinfo->skt[i];
252 288
253 skt->nr = ops->first + i; 289 skt->nr = ops->first + i;
254 skt->irq = NO_IRQ; 290 skt->ops = ops;
255 291 skt->socket.owner = ops->owner;
256 skt->res_skt.start = _PCMCIA(skt->nr); 292 skt->socket.dev.parent = dev;
257 skt->res_skt.end = _PCMCIA(skt->nr) + PCMCIASp - 1; 293 skt->socket.pci_irq = NO_IRQ;
258 skt->res_skt.name = skt_names[skt->nr];
259 skt->res_skt.flags = IORESOURCE_MEM;
260
261 skt->res_io.start = _PCMCIAIO(skt->nr);
262 skt->res_io.end = _PCMCIAIO(skt->nr) + PCMCIAIOSp - 1;
263 skt->res_io.name = "io";
264 skt->res_io.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
265 294
266 skt->res_mem.start = _PCMCIAMem(skt->nr); 295 ret = pxa2xx_drv_pcmcia_add_one(skt);
267 skt->res_mem.end = _PCMCIAMem(skt->nr) + PCMCIAMemSp - 1; 296 if (ret)
268 skt->res_mem.name = "memory"; 297 break;
269 skt->res_mem.flags = IORESOURCE_MEM;
270
271 skt->res_attr.start = _PCMCIAAttr(skt->nr);
272 skt->res_attr.end = _PCMCIAAttr(skt->nr) + PCMCIAAttrSp - 1;
273 skt->res_attr.name = "attribute";
274 skt->res_attr.flags = IORESOURCE_MEM;
275 } 298 }
276 299
277 /* Provide our PXA2xx specific timing routines. */ 300 if (ret) {
278 ops->set_timing = pxa2xx_pcmcia_set_timing; 301 while (--i >= 0)
279#ifdef CONFIG_CPU_FREQ 302 soc_pcmcia_remove_one(&sinfo->skt[i]);
280 ops->frequency_change = pxa2xx_pcmcia_frequency_change; 303 kfree(sinfo);
281#endif 304 } else {
282
283 ret = soc_common_drv_pcmcia_probe(dev, ops, sinfo);
284
285 if (!ret)
286 pxa2xx_configure_sockets(dev); 305 pxa2xx_configure_sockets(dev);
306 dev_set_drvdata(dev, sinfo);
307 }
287 308
288 return ret; 309 return ret;
289} 310}
@@ -297,7 +318,16 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
297 318
298static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev) 319static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
299{ 320{
300 return soc_common_drv_pcmcia_remove(&dev->dev); 321 struct skt_dev_info *sinfo = platform_get_drvdata(dev);
322 int i;
323
324 platform_set_drvdata(dev, NULL);
325
326 for (i = 0; i < sinfo->nskt; i++)
327 soc_pcmcia_remove_one(&sinfo->skt[i]);
328
329 kfree(sinfo);
330 return 0;
301} 331}
302 332
303static int pxa2xx_drv_pcmcia_suspend(struct device *dev) 333static int pxa2xx_drv_pcmcia_suspend(struct device *dev)
diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h
index 235d681652c3..cb5efaec886f 100644
--- a/drivers/pcmcia/pxa2xx_base.h
+++ b/drivers/pcmcia/pxa2xx_base.h
@@ -1,3 +1,6 @@
1/* temporary measure */ 1/* temporary measure */
2extern int __pxa2xx_drv_pcmcia_probe(struct device *); 2extern int __pxa2xx_drv_pcmcia_probe(struct device *);
3 3
4int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
5void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
6
diff --git a/drivers/pcmcia/pxa2xx_cm_x255.c b/drivers/pcmcia/pxa2xx_cm_x255.c
index 5143a760153b..05913d0bbdbe 100644
--- a/drivers/pcmcia/pxa2xx_cm_x255.c
+++ b/drivers/pcmcia/pxa2xx_cm_x255.c
@@ -44,7 +44,7 @@ static int cmx255_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
44 return ret; 44 return ret;
45 gpio_direction_output(GPIO_PCMCIA_RESET, 0); 45 gpio_direction_output(GPIO_PCMCIA_RESET, 0);
46 46
47 skt->irq = skt->nr == 0 ? PCMCIA_S0_RDYINT : PCMCIA_S1_RDYINT; 47 skt->socket.pci_irq = skt->nr == 0 ? PCMCIA_S0_RDYINT : PCMCIA_S1_RDYINT;
48 ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); 48 ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
49 if (!ret) 49 if (!ret)
50 gpio_free(GPIO_PCMCIA_RESET); 50 gpio_free(GPIO_PCMCIA_RESET);
diff --git a/drivers/pcmcia/pxa2xx_cm_x270.c b/drivers/pcmcia/pxa2xx_cm_x270.c
index a7b943d01e34..5662646b84da 100644
--- a/drivers/pcmcia/pxa2xx_cm_x270.c
+++ b/drivers/pcmcia/pxa2xx_cm_x270.c
@@ -38,7 +38,7 @@ static int cmx270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
38 return ret; 38 return ret;
39 gpio_direction_output(GPIO_PCMCIA_RESET, 0); 39 gpio_direction_output(GPIO_PCMCIA_RESET, 0);
40 40
41 skt->irq = PCMCIA_S0_RDYINT; 41 skt->socket.pci_irq = PCMCIA_S0_RDYINT;
42 ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); 42 ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
43 if (!ret) 43 if (!ret)
44 gpio_free(GPIO_PCMCIA_RESET); 44 gpio_free(GPIO_PCMCIA_RESET);
diff --git a/drivers/pcmcia/pxa2xx_e740.c b/drivers/pcmcia/pxa2xx_e740.c
index d09c0dc4a31a..8bfbd4dca131 100644
--- a/drivers/pcmcia/pxa2xx_e740.c
+++ b/drivers/pcmcia/pxa2xx_e740.c
@@ -38,7 +38,7 @@ static struct pcmcia_irqs cd_irqs[] = {
38 38
39static int e740_pcmcia_hw_init(struct soc_pcmcia_socket *skt) 39static int e740_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
40{ 40{
41 skt->irq = skt->nr == 0 ? IRQ_GPIO(GPIO_E740_PCMCIA_RDY0) : 41 skt->socket.pci_irq = skt->nr == 0 ? IRQ_GPIO(GPIO_E740_PCMCIA_RDY0) :
42 IRQ_GPIO(GPIO_E740_PCMCIA_RDY1); 42 IRQ_GPIO(GPIO_E740_PCMCIA_RDY1);
43 43
44 return soc_pcmcia_request_irqs(skt, &cd_irqs[skt->nr], 1); 44 return soc_pcmcia_request_irqs(skt, &cd_irqs[skt->nr], 1);
diff --git a/drivers/pcmcia/pxa2xx_lubbock.c b/drivers/pcmcia/pxa2xx_lubbock.c
index 6cbb1b1f7cfd..b9f8c8fb42bd 100644
--- a/drivers/pcmcia/pxa2xx_lubbock.c
+++ b/drivers/pcmcia/pxa2xx_lubbock.c
@@ -32,6 +32,7 @@ static int
32lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, 32lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
33 const socket_state_t *state) 33 const socket_state_t *state)
34{ 34{
35 struct sa1111_pcmcia_socket *s = to_skt(skt);
35 unsigned int pa_dwr_mask, pa_dwr_set, misc_mask, misc_set; 36 unsigned int pa_dwr_mask, pa_dwr_set, misc_mask, misc_set;
36 int ret = 0; 37 int ret = 0;
37 38
@@ -149,7 +150,7 @@ lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
149 150
150 if (ret == 0) { 151 if (ret == 0) {
151 lubbock_set_misc_wr(misc_mask, misc_set); 152 lubbock_set_misc_wr(misc_mask, misc_set);
152 sa1111_set_io(SA1111_DEV(skt->dev), pa_dwr_mask, pa_dwr_set); 153 sa1111_set_io(s->dev, pa_dwr_mask, pa_dwr_set);
153 } 154 }
154 155
155#if 1 156#if 1
@@ -175,7 +176,7 @@ lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
175 * Switch to 5V, Configure socket with 5V voltage 176 * Switch to 5V, Configure socket with 5V voltage
176 */ 177 */
177 lubbock_set_misc_wr(misc_mask, 0); 178 lubbock_set_misc_wr(misc_mask, 0);
178 sa1111_set_io(SA1111_DEV(skt->dev), pa_dwr_mask, 0); 179 sa1111_set_io(s->dev, pa_dwr_mask, 0);
179 180
180 /* 181 /*
181 * It takes about 100ms to turn off Vcc. 182 * It takes about 100ms to turn off Vcc.
@@ -200,12 +201,8 @@ lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
200 201
201static struct pcmcia_low_level lubbock_pcmcia_ops = { 202static struct pcmcia_low_level lubbock_pcmcia_ops = {
202 .owner = THIS_MODULE, 203 .owner = THIS_MODULE,
203 .hw_init = sa1111_pcmcia_hw_init,
204 .hw_shutdown = sa1111_pcmcia_hw_shutdown,
205 .socket_state = sa1111_pcmcia_socket_state,
206 .configure_socket = lubbock_pcmcia_configure_socket, 204 .configure_socket = lubbock_pcmcia_configure_socket,
207 .socket_init = sa1111_pcmcia_socket_init, 205 .socket_init = sa1111_pcmcia_socket_init,
208 .socket_suspend = sa1111_pcmcia_socket_suspend,
209 .first = 0, 206 .first = 0,
210 .nr = 2, 207 .nr = 2,
211}; 208};
@@ -228,8 +225,9 @@ int pcmcia_lubbock_init(struct sa1111_dev *sadev)
228 /* Set CF Socket 1 power to standby mode. */ 225 /* Set CF Socket 1 power to standby mode. */
229 lubbock_set_misc_wr((1 << 15) | (1 << 14), 0); 226 lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
230 227
231 sadev->dev.platform_data = &lubbock_pcmcia_ops; 228 pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
232 ret = __pxa2xx_drv_pcmcia_probe(&sadev->dev); 229 ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
230 pxa2xx_drv_pcmcia_add_one);
233 } 231 }
234 232
235 return ret; 233 return ret;
diff --git a/drivers/pcmcia/pxa2xx_mainstone.c b/drivers/pcmcia/pxa2xx_mainstone.c
index 1138551ba8f6..92016fe932b4 100644
--- a/drivers/pcmcia/pxa2xx_mainstone.c
+++ b/drivers/pcmcia/pxa2xx_mainstone.c
@@ -44,7 +44,7 @@ static int mst_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
44 * before we enable them as outputs. 44 * before we enable them as outputs.
45 */ 45 */
46 46
47 skt->irq = (skt->nr == 0) ? MAINSTONE_S0_IRQ : MAINSTONE_S1_IRQ; 47 skt->socket.pci_irq = (skt->nr == 0) ? MAINSTONE_S0_IRQ : MAINSTONE_S1_IRQ;
48 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); 48 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
49} 49}
50 50
diff --git a/drivers/pcmcia/pxa2xx_palmld.c b/drivers/pcmcia/pxa2xx_palmld.c
index 5ba9b3664a00..6fb6f7f0672e 100644
--- a/drivers/pcmcia/pxa2xx_palmld.c
+++ b/drivers/pcmcia/pxa2xx_palmld.c
@@ -45,7 +45,7 @@ static int palmld_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
45 if (ret) 45 if (ret)
46 goto err4; 46 goto err4;
47 47
48 skt->irq = IRQ_GPIO(GPIO_NR_PALMLD_PCMCIA_READY); 48 skt->socket.pci_irq = IRQ_GPIO(GPIO_NR_PALMLD_PCMCIA_READY);
49 return 0; 49 return 0;
50 50
51err4: 51err4:
diff --git a/drivers/pcmcia/pxa2xx_palmtx.c b/drivers/pcmcia/pxa2xx_palmtx.c
index e07b5c51ec5b..b07b247a399f 100644
--- a/drivers/pcmcia/pxa2xx_palmtx.c
+++ b/drivers/pcmcia/pxa2xx_palmtx.c
@@ -53,7 +53,7 @@ static int palmtx_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
53 if (ret) 53 if (ret)
54 goto err5; 54 goto err5;
55 55
56 skt->irq = gpio_to_irq(GPIO_NR_PALMTX_PCMCIA_READY); 56 skt->socket.pci_irq = gpio_to_irq(GPIO_NR_PALMTX_PCMCIA_READY);
57 return 0; 57 return 0;
58 58
59err5: 59err5:
diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c
index bc43f78f6f0b..0ea3b29440e6 100644
--- a/drivers/pcmcia/pxa2xx_sharpsl.c
+++ b/drivers/pcmcia/pxa2xx_sharpsl.c
@@ -66,7 +66,7 @@ static int sharpsl_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
66 } 66 }
67 } 67 }
68 68
69 skt->irq = SCOOP_DEV[skt->nr].irq; 69 skt->socket.pci_irq = SCOOP_DEV[skt->nr].irq;
70 70
71 return 0; 71 return 0;
72} 72}
diff --git a/drivers/pcmcia/pxa2xx_trizeps4.c b/drivers/pcmcia/pxa2xx_trizeps4.c
index e0e5cb339b4a..b7e596620db1 100644
--- a/drivers/pcmcia/pxa2xx_trizeps4.c
+++ b/drivers/pcmcia/pxa2xx_trizeps4.c
@@ -53,7 +53,7 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
53 gpio_free(GPIO_PRDY); 53 gpio_free(GPIO_PRDY);
54 return -EINVAL; 54 return -EINVAL;
55 } 55 }
56 skt->irq = IRQ_GPIO(GPIO_PRDY); 56 skt->socket.pci_irq = IRQ_GPIO(GPIO_PRDY);
57 break; 57 break;
58 58
59#ifndef CONFIG_MACH_TRIZEPS_CONXS 59#ifndef CONFIG_MACH_TRIZEPS_CONXS
@@ -63,7 +63,7 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
63 break; 63 break;
64 } 64 }
65 /* release the reset of this card */ 65 /* release the reset of this card */
66 pr_debug("%s: sock %d irq %d\n", __func__, skt->nr, skt->irq); 66 pr_debug("%s: sock %d irq %d\n", __func__, skt->nr, skt->socket.pci_irq);
67 67
68 /* supplementory irqs for the socket */ 68 /* supplementory irqs for the socket */
69 for (i = 0; i < ARRAY_SIZE(irqs); i++) { 69 for (i = 0; i < ARRAY_SIZE(irqs); i++) {
diff --git a/drivers/pcmcia/pxa2xx_viper.c b/drivers/pcmcia/pxa2xx_viper.c
index 17871360fe99..27be2e154df2 100644
--- a/drivers/pcmcia/pxa2xx_viper.c
+++ b/drivers/pcmcia/pxa2xx_viper.c
@@ -40,7 +40,7 @@ static int viper_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
40{ 40{
41 unsigned long flags; 41 unsigned long flags;
42 42
43 skt->irq = gpio_to_irq(VIPER_CF_RDY_GPIO); 43 skt->socket.pci_irq = gpio_to_irq(VIPER_CF_RDY_GPIO);
44 44
45 if (gpio_request(VIPER_CF_CD_GPIO, "CF detect")) 45 if (gpio_request(VIPER_CF_CD_GPIO, "CF detect"))
46 goto err_request_cd; 46 goto err_request_cd;
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c
index e592e0e0d7ed..de0e770ce6a3 100644
--- a/drivers/pcmcia/rsrc_mgr.c
+++ b/drivers/pcmcia/rsrc_mgr.c
@@ -18,6 +18,7 @@
18#include <pcmcia/cs_types.h> 18#include <pcmcia/cs_types.h>
19#include <pcmcia/ss.h> 19#include <pcmcia/ss.h>
20#include <pcmcia/cs.h> 20#include <pcmcia/cs.h>
21#include <pcmcia/cistpl.h>
21#include "cs_internal.h" 22#include "cs_internal.h"
22 23
23 24
diff --git a/drivers/pcmcia/sa1100_assabet.c b/drivers/pcmcia/sa1100_assabet.c
index ac8aa09ba0da..fd013a1ef47a 100644
--- a/drivers/pcmcia/sa1100_assabet.c
+++ b/drivers/pcmcia/sa1100_assabet.c
@@ -27,7 +27,7 @@ static struct pcmcia_irqs irqs[] = {
27 27
28static int assabet_pcmcia_hw_init(struct soc_pcmcia_socket *skt) 28static int assabet_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
29{ 29{
30 skt->irq = ASSABET_IRQ_GPIO_CF_IRQ; 30 skt->socket.pci_irq = ASSABET_IRQ_GPIO_CF_IRQ;
31 31
32 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); 32 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
33} 33}
diff --git a/drivers/pcmcia/sa1100_badge4.c b/drivers/pcmcia/sa1100_badge4.c
index 1ca9737ea79e..1ce53f493bef 100644
--- a/drivers/pcmcia/sa1100_badge4.c
+++ b/drivers/pcmcia/sa1100_badge4.c
@@ -127,13 +127,10 @@ badge4_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state
127 127
128static struct pcmcia_low_level badge4_pcmcia_ops = { 128static struct pcmcia_low_level badge4_pcmcia_ops = {
129 .owner = THIS_MODULE, 129 .owner = THIS_MODULE,
130 .hw_init = sa1111_pcmcia_hw_init,
131 .hw_shutdown = sa1111_pcmcia_hw_shutdown,
132 .socket_state = sa1111_pcmcia_socket_state,
133 .configure_socket = badge4_pcmcia_configure_socket, 130 .configure_socket = badge4_pcmcia_configure_socket,
134
135 .socket_init = sa1111_pcmcia_socket_init, 131 .socket_init = sa1111_pcmcia_socket_init,
136 .socket_suspend = sa1111_pcmcia_socket_suspend, 132 .first = 0,
133 .nr = 2,
137}; 134};
138 135
139int pcmcia_badge4_init(struct device *dev) 136int pcmcia_badge4_init(struct device *dev)
@@ -146,7 +143,9 @@ int pcmcia_badge4_init(struct device *dev)
146 __func__, 143 __func__,
147 badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc); 144 badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc);
148 145
149 ret = sa11xx_drv_pcmcia_probe(dev, &badge4_pcmcia_ops, 0, 2); 146 sa11xx_drv_pcmcia_ops(&badge4_pcmcia_ops);
147 ret = sa1111_pcmcia_add(dev, &badge4_pcmcia_ops,
148 sa11xx_drv_pcmcia_add_one);
150 } 149 }
151 150
152 return ret; 151 return ret;
diff --git a/drivers/pcmcia/sa1100_cerf.c b/drivers/pcmcia/sa1100_cerf.c
index 63e6bc431a0d..9bf088b17275 100644
--- a/drivers/pcmcia/sa1100_cerf.c
+++ b/drivers/pcmcia/sa1100_cerf.c
@@ -27,7 +27,7 @@ static struct pcmcia_irqs irqs[] = {
27 27
28static int cerf_pcmcia_hw_init(struct soc_pcmcia_socket *skt) 28static int cerf_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
29{ 29{
30 skt->irq = CERF_IRQ_GPIO_CF_IRQ; 30 skt->socket.pci_irq = CERF_IRQ_GPIO_CF_IRQ;
31 31
32 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); 32 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
33} 33}
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index 2d0e99751530..11cc3ba1260a 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -83,7 +83,16 @@ static int sa11x0_drv_pcmcia_probe(struct platform_device *dev)
83 83
84static int sa11x0_drv_pcmcia_remove(struct platform_device *dev) 84static int sa11x0_drv_pcmcia_remove(struct platform_device *dev)
85{ 85{
86 return soc_common_drv_pcmcia_remove(&dev->dev); 86 struct skt_dev_info *sinfo = platform_get_drvdata(dev);
87 int i;
88
89 platform_set_drvdata(dev, NULL);
90
91 for (i = 0; i < sinfo->nskt; i++)
92 soc_pcmcia_remove_one(&sinfo->skt[i]);
93
94 kfree(sinfo);
95 return 0;
87} 96}
88 97
89static int sa11x0_drv_pcmcia_suspend(struct platform_device *dev, 98static int sa11x0_drv_pcmcia_suspend(struct platform_device *dev,
diff --git a/drivers/pcmcia/sa1100_h3600.c b/drivers/pcmcia/sa1100_h3600.c
index 0cc3748f3758..3a121ac697d6 100644
--- a/drivers/pcmcia/sa1100_h3600.c
+++ b/drivers/pcmcia/sa1100_h3600.c
@@ -25,8 +25,8 @@ static struct pcmcia_irqs irqs[] = {
25 25
26static int h3600_pcmcia_hw_init(struct soc_pcmcia_socket *skt) 26static int h3600_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
27{ 27{
28 skt->irq = skt->nr ? IRQ_GPIO_H3600_PCMCIA_IRQ1 28 skt->socket.pci_irq = skt->nr ? IRQ_GPIO_H3600_PCMCIA_IRQ1
29 : IRQ_GPIO_H3600_PCMCIA_IRQ0; 29 : IRQ_GPIO_H3600_PCMCIA_IRQ0;
30 30
31 31
32 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); 32 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
diff --git a/drivers/pcmcia/sa1100_jornada720.c b/drivers/pcmcia/sa1100_jornada720.c
index 7eedb42f800c..6bcabee6bde4 100644
--- a/drivers/pcmcia/sa1100_jornada720.c
+++ b/drivers/pcmcia/sa1100_jornada720.c
@@ -22,25 +22,10 @@
22#define SOCKET1_POWER (GPIO_GPIO1 | GPIO_GPIO3) 22#define SOCKET1_POWER (GPIO_GPIO1 | GPIO_GPIO3)
23#define SOCKET1_3V GPIO_GPIO3 23#define SOCKET1_3V GPIO_GPIO3
24 24
25static int jornada720_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
26{
27 unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
28
29 /*
30 * What is all this crap for?
31 */
32 GRER |= 0x00000002;
33 /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
34 sa1111_set_io_dir(SA1111_DEV(skt->dev), pin, 0, 0);
35 sa1111_set_io(SA1111_DEV(skt->dev), pin, 0);
36 sa1111_set_sleep_io(SA1111_DEV(skt->dev), pin, 0);
37
38 return sa1111_pcmcia_hw_init(skt);
39}
40
41static int 25static int
42jornada720_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) 26jornada720_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state)
43{ 27{
28 struct sa1111_pcmcia_socket *s = to_skt(skt);
44 unsigned int pa_dwr_mask, pa_dwr_set; 29 unsigned int pa_dwr_mask, pa_dwr_set;
45 int ret; 30 int ret;
46 31
@@ -97,7 +82,7 @@ jornada720_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_s
97 unsigned long flags; 82 unsigned long flags;
98 83
99 local_irq_save(flags); 84 local_irq_save(flags);
100 sa1111_set_io(SA1111_DEV(skt->dev), pa_dwr_mask, pa_dwr_set); 85 sa1111_set_io(s->dev, pa_dwr_mask, pa_dwr_set);
101 local_irq_restore(flags); 86 local_irq_restore(flags);
102 } 87 }
103 88
@@ -106,21 +91,30 @@ jornada720_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_s
106 91
107static struct pcmcia_low_level jornada720_pcmcia_ops = { 92static struct pcmcia_low_level jornada720_pcmcia_ops = {
108 .owner = THIS_MODULE, 93 .owner = THIS_MODULE,
109 .hw_init = jornada720_pcmcia_hw_init,
110 .hw_shutdown = sa1111_pcmcia_hw_shutdown,
111 .socket_state = sa1111_pcmcia_socket_state,
112 .configure_socket = jornada720_pcmcia_configure_socket, 94 .configure_socket = jornada720_pcmcia_configure_socket,
113
114 .socket_init = sa1111_pcmcia_socket_init, 95 .socket_init = sa1111_pcmcia_socket_init,
115 .socket_suspend = sa1111_pcmcia_socket_suspend, 96 .first = 0,
97 .nr = 2,
116}; 98};
117 99
118int __devinit pcmcia_jornada720_init(struct device *dev) 100int __devinit pcmcia_jornada720_init(struct device *dev)
119{ 101{
120 int ret = -ENODEV; 102 int ret = -ENODEV;
121 103
122 if (machine_is_jornada720()) 104 if (machine_is_jornada720()) {
123 ret = sa11xx_drv_pcmcia_probe(dev, &jornada720_pcmcia_ops, 0, 2); 105 unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
106
107 GRER |= 0x00000002;
108
109 /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
110 sa1111_set_io_dir(dev, pin, 0, 0);
111 sa1111_set_io(dev, pin, 0);
112 sa1111_set_sleep_io(dev, pin, 0);
113
114 sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops);
115 ret = sa1111_pcmcia_add(dev, &jornada720_pcmcia_ops,
116 sa11xx_drv_pcmcia_add_one);
117 }
124 118
125 return ret; 119 return ret;
126} 120}
diff --git a/drivers/pcmcia/sa1100_neponset.c b/drivers/pcmcia/sa1100_neponset.c
index 0c76d337815b..c95639b5f2a0 100644
--- a/drivers/pcmcia/sa1100_neponset.c
+++ b/drivers/pcmcia/sa1100_neponset.c
@@ -43,6 +43,7 @@
43static int 43static int
44neponset_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) 44neponset_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state)
45{ 45{
46 struct sa1111_pcmcia_socket *s = to_skt(skt);
46 unsigned int ncr_mask, ncr_set, pa_dwr_mask, pa_dwr_set; 47 unsigned int ncr_mask, ncr_set, pa_dwr_mask, pa_dwr_set;
47 int ret; 48 int ret;
48 49
@@ -99,7 +100,7 @@ neponset_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_sta
99 NCR_0 = (NCR_0 & ~ncr_mask) | ncr_set; 100 NCR_0 = (NCR_0 & ~ncr_mask) | ncr_set;
100 101
101 local_irq_restore(flags); 102 local_irq_restore(flags);
102 sa1111_set_io(SA1111_DEV(skt->dev), pa_dwr_mask, pa_dwr_set); 103 sa1111_set_io(s->dev, pa_dwr_mask, pa_dwr_set);
103 } 104 }
104 105
105 return 0; 106 return 0;
@@ -115,12 +116,10 @@ static void neponset_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
115 116
116static struct pcmcia_low_level neponset_pcmcia_ops = { 117static struct pcmcia_low_level neponset_pcmcia_ops = {
117 .owner = THIS_MODULE, 118 .owner = THIS_MODULE,
118 .hw_init = sa1111_pcmcia_hw_init,
119 .hw_shutdown = sa1111_pcmcia_hw_shutdown,
120 .socket_state = sa1111_pcmcia_socket_state,
121 .configure_socket = neponset_pcmcia_configure_socket, 119 .configure_socket = neponset_pcmcia_configure_socket,
122 .socket_init = neponset_pcmcia_socket_init, 120 .socket_init = neponset_pcmcia_socket_init,
123 .socket_suspend = sa1111_pcmcia_socket_suspend, 121 .first = 0,
122 .nr = 2,
124}; 123};
125 124
126int pcmcia_neponset_init(struct sa1111_dev *sadev) 125int pcmcia_neponset_init(struct sa1111_dev *sadev)
@@ -135,7 +134,9 @@ int pcmcia_neponset_init(struct sa1111_dev *sadev)
135 sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0); 134 sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
136 sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); 135 sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
137 sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0); 136 sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
138 ret = sa11xx_drv_pcmcia_probe(&sadev->dev, &neponset_pcmcia_ops, 0, 2); 137 sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops);
138 ret = sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops,
139 sa11xx_drv_pcmcia_add_one);
139 } 140 }
140 141
141 return ret; 142 return ret;
diff --git a/drivers/pcmcia/sa1100_shannon.c b/drivers/pcmcia/sa1100_shannon.c
index 46d8c1977c2a..c4d51867a050 100644
--- a/drivers/pcmcia/sa1100_shannon.c
+++ b/drivers/pcmcia/sa1100_shannon.c
@@ -28,7 +28,7 @@ static int shannon_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
28 GAFR &= ~(SHANNON_GPIO_EJECT_0 | SHANNON_GPIO_EJECT_1 | 28 GAFR &= ~(SHANNON_GPIO_EJECT_0 | SHANNON_GPIO_EJECT_1 |
29 SHANNON_GPIO_RDY_0 | SHANNON_GPIO_RDY_1); 29 SHANNON_GPIO_RDY_0 | SHANNON_GPIO_RDY_1);
30 30
31 skt->irq = skt->nr ? SHANNON_IRQ_GPIO_RDY_1 : SHANNON_IRQ_GPIO_RDY_0; 31 skt->socket.pci_irq = skt->nr ? SHANNON_IRQ_GPIO_RDY_1 : SHANNON_IRQ_GPIO_RDY_0;
32 32
33 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); 33 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
34} 34}
diff --git a/drivers/pcmcia/sa1100_simpad.c b/drivers/pcmcia/sa1100_simpad.c
index 33a08ae09fdf..05bd504e6f18 100644
--- a/drivers/pcmcia/sa1100_simpad.c
+++ b/drivers/pcmcia/sa1100_simpad.c
@@ -28,7 +28,7 @@ static int simpad_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
28 28
29 clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1); 29 clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
30 30
31 skt->irq = IRQ_GPIO_CF_IRQ; 31 skt->socket.pci_irq = IRQ_GPIO_CF_IRQ;
32 32
33 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); 33 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
34} 34}
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index 4be4e172ffa1..de6bc333d299 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -28,23 +28,20 @@ static struct pcmcia_irqs irqs[] = {
28 { 1, IRQ_S1_BVD1_STSCHG, "SA1111 CF BVD1" }, 28 { 1, IRQ_S1_BVD1_STSCHG, "SA1111 CF BVD1" },
29}; 29};
30 30
31int sa1111_pcmcia_hw_init(struct soc_pcmcia_socket *skt) 31static int sa1111_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
32{ 32{
33 if (skt->irq == NO_IRQ)
34 skt->irq = skt->nr ? IRQ_S1_READY_NINT : IRQ_S0_READY_NINT;
35
36 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); 33 return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
37} 34}
38 35
39void sa1111_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) 36static void sa1111_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
40{ 37{
41 soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs)); 38 soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs));
42} 39}
43 40
44void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) 41void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state)
45{ 42{
46 struct sa1111_dev *sadev = SA1111_DEV(skt->dev); 43 struct sa1111_pcmcia_socket *s = to_skt(skt);
47 unsigned long status = sa1111_readl(sadev->mapbase + SA1111_PCSR); 44 unsigned long status = sa1111_readl(s->dev->mapbase + SA1111_PCSR);
48 45
49 switch (skt->nr) { 46 switch (skt->nr) {
50 case 0: 47 case 0:
@@ -71,7 +68,7 @@ void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_sta
71 68
72int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) 69int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state)
73{ 70{
74 struct sa1111_dev *sadev = SA1111_DEV(skt->dev); 71 struct sa1111_pcmcia_socket *s = to_skt(skt);
75 unsigned int pccr_skt_mask, pccr_set_mask, val; 72 unsigned int pccr_skt_mask, pccr_set_mask, val;
76 unsigned long flags; 73 unsigned long flags;
77 74
@@ -100,10 +97,10 @@ int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_s
100 pccr_set_mask |= PCCR_S0_FLT|PCCR_S1_FLT; 97 pccr_set_mask |= PCCR_S0_FLT|PCCR_S1_FLT;
101 98
102 local_irq_save(flags); 99 local_irq_save(flags);
103 val = sa1111_readl(sadev->mapbase + SA1111_PCCR); 100 val = sa1111_readl(s->dev->mapbase + SA1111_PCCR);
104 val &= ~pccr_skt_mask; 101 val &= ~pccr_skt_mask;
105 val |= pccr_set_mask & pccr_skt_mask; 102 val |= pccr_set_mask & pccr_skt_mask;
106 sa1111_writel(val, sadev->mapbase + SA1111_PCCR); 103 sa1111_writel(val, s->dev->mapbase + SA1111_PCCR);
107 local_irq_restore(flags); 104 local_irq_restore(flags);
108 105
109 return 0; 106 return 0;
@@ -114,15 +111,51 @@ void sa1111_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
114 soc_pcmcia_enable_irqs(skt, irqs, ARRAY_SIZE(irqs)); 111 soc_pcmcia_enable_irqs(skt, irqs, ARRAY_SIZE(irqs));
115} 112}
116 113
117void sa1111_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) 114static void sa1111_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
118{ 115{
119 soc_pcmcia_disable_irqs(skt, irqs, ARRAY_SIZE(irqs)); 116 soc_pcmcia_disable_irqs(skt, irqs, ARRAY_SIZE(irqs));
120} 117}
121 118
119int sa1111_pcmcia_add(struct sa1111_dev *dev, struct pcmcia_low_level *ops,
120 int (*add)(struct soc_pcmcia_socket *))
121{
122 struct sa1111_pcmcia_socket *s;
123 int i, ret = 0;
124
125 ops->hw_init = sa1111_pcmcia_hw_init;
126 ops->hw_shutdown = sa1111_pcmcia_hw_shutdown;
127 ops->socket_state = sa1111_pcmcia_socket_state;
128 ops->socket_suspend = sa1111_pcmcia_socket_suspend;
129
130 for (i = 0; i < ops->nr; i++) {
131 s = kzalloc(sizeof(*s), GFP_KERNEL);
132 if (!s)
133 return -ENOMEM;
134
135 s->soc.nr = ops->first + i;
136 s->soc.ops = ops;
137 s->soc.socket.owner = ops->owner;
138 s->soc.socket.dev.parent = &dev->dev;
139 s->soc.socket.pci_irq = s->soc.nr ? IRQ_S1_READY_NINT : IRQ_S0_READY_NINT;
140 s->dev = dev;
141
142 ret = add(&s->soc);
143 if (ret == 0) {
144 s->next = dev_get_drvdata(&dev->dev);
145 dev_set_drvdata(&dev->dev, s);
146 } else
147 kfree(s);
148 }
149
150 return ret;
151}
152
122static int pcmcia_probe(struct sa1111_dev *dev) 153static int pcmcia_probe(struct sa1111_dev *dev)
123{ 154{
124 void __iomem *base; 155 void __iomem *base;
125 156
157 dev_set_drvdata(&dev->dev, NULL);
158
126 if (!request_mem_region(dev->res.start, 512, 159 if (!request_mem_region(dev->res.start, 512,
127 SA1111_DRIVER_NAME(dev))) 160 SA1111_DRIVER_NAME(dev)))
128 return -EBUSY; 161 return -EBUSY;
@@ -152,7 +185,15 @@ static int pcmcia_probe(struct sa1111_dev *dev)
152 185
153static int __devexit pcmcia_remove(struct sa1111_dev *dev) 186static int __devexit pcmcia_remove(struct sa1111_dev *dev)
154{ 187{
155 soc_common_drv_pcmcia_remove(&dev->dev); 188 struct sa1111_pcmcia_socket *next, *s = dev_get_drvdata(&dev->dev);
189
190 dev_set_drvdata(&dev->dev, NULL);
191
192 for (; next = s->next, s; s = next) {
193 soc_pcmcia_remove_one(&s->soc);
194 kfree(s);
195 }
196
156 release_mem_region(dev->res.start, 512); 197 release_mem_region(dev->res.start, 512);
157 return 0; 198 return 0;
158} 199}
diff --git a/drivers/pcmcia/sa1111_generic.h b/drivers/pcmcia/sa1111_generic.h
index 10ced4a210d7..02dc8577cdaf 100644
--- a/drivers/pcmcia/sa1111_generic.h
+++ b/drivers/pcmcia/sa1111_generic.h
@@ -1,12 +1,23 @@
1#include "soc_common.h" 1#include "soc_common.h"
2#include "sa11xx_base.h" 2#include "sa11xx_base.h"
3 3
4extern int sa1111_pcmcia_hw_init(struct soc_pcmcia_socket *); 4struct sa1111_pcmcia_socket {
5extern void sa1111_pcmcia_hw_shutdown(struct soc_pcmcia_socket *); 5 struct soc_pcmcia_socket soc;
6 struct sa1111_dev *dev;
7 struct sa1111_pcmcia_socket *next;
8};
9
10static inline struct sa1111_pcmcia_socket *to_skt(struct soc_pcmcia_socket *s)
11{
12 return container_of(s, struct sa1111_pcmcia_socket, soc);
13}
14
15int sa1111_pcmcia_add(struct sa1111_dev *dev, struct pcmcia_low_level *ops,
16 int (*add)(struct soc_pcmcia_socket *));
17
6extern void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *, struct pcmcia_state *); 18extern void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *, struct pcmcia_state *);
7extern int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *, const socket_state_t *); 19extern int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *, const socket_state_t *);
8extern void sa1111_pcmcia_socket_init(struct soc_pcmcia_socket *); 20extern void sa1111_pcmcia_socket_init(struct soc_pcmcia_socket *);
9extern void sa1111_pcmcia_socket_suspend(struct soc_pcmcia_socket *);
10 21
11extern int pcmcia_badge4_init(struct device *); 22extern int pcmcia_badge4_init(struct device *);
12extern int pcmcia_jornada720_init(struct device *); 23extern int pcmcia_jornada720_init(struct device *);
diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c
index e15d59f2d8a9..fc9a6527019b 100644
--- a/drivers/pcmcia/sa11xx_base.c
+++ b/drivers/pcmcia/sa11xx_base.c
@@ -171,12 +171,58 @@ static const char *skt_names[] = {
171#define SKT_DEV_INFO_SIZE(n) \ 171#define SKT_DEV_INFO_SIZE(n) \
172 (sizeof(struct skt_dev_info) + (n)*sizeof(struct soc_pcmcia_socket)) 172 (sizeof(struct skt_dev_info) + (n)*sizeof(struct soc_pcmcia_socket))
173 173
174int sa11xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt)
175{
176 skt->res_skt.start = _PCMCIA(skt->nr);
177 skt->res_skt.end = _PCMCIA(skt->nr) + PCMCIASp - 1;
178 skt->res_skt.name = skt_names[skt->nr];
179 skt->res_skt.flags = IORESOURCE_MEM;
180
181 skt->res_io.start = _PCMCIAIO(skt->nr);
182 skt->res_io.end = _PCMCIAIO(skt->nr) + PCMCIAIOSp - 1;
183 skt->res_io.name = "io";
184 skt->res_io.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
185
186 skt->res_mem.start = _PCMCIAMem(skt->nr);
187 skt->res_mem.end = _PCMCIAMem(skt->nr) + PCMCIAMemSp - 1;
188 skt->res_mem.name = "memory";
189 skt->res_mem.flags = IORESOURCE_MEM;
190
191 skt->res_attr.start = _PCMCIAAttr(skt->nr);
192 skt->res_attr.end = _PCMCIAAttr(skt->nr) + PCMCIAAttrSp - 1;
193 skt->res_attr.name = "attribute";
194 skt->res_attr.flags = IORESOURCE_MEM;
195
196 return soc_pcmcia_add_one(skt);
197}
198EXPORT_SYMBOL(sa11xx_drv_pcmcia_add_one);
199
200void sa11xx_drv_pcmcia_ops(struct pcmcia_low_level *ops)
201{
202 /*
203 * set default MECR calculation if the board specific
204 * code did not specify one...
205 */
206 if (!ops->get_timing)
207 ops->get_timing = sa1100_pcmcia_default_mecr_timing;
208
209 /* Provide our SA11x0 specific timing routines. */
210 ops->set_timing = sa1100_pcmcia_set_timing;
211 ops->show_timing = sa1100_pcmcia_show_timing;
212#ifdef CONFIG_CPU_FREQ
213 ops->frequency_change = sa1100_pcmcia_frequency_change;
214#endif
215}
216EXPORT_SYMBOL(sa11xx_drv_pcmcia_ops);
217
174int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, 218int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
175 int first, int nr) 219 int first, int nr)
176{ 220{
177 struct skt_dev_info *sinfo; 221 struct skt_dev_info *sinfo;
178 struct soc_pcmcia_socket *skt; 222 struct soc_pcmcia_socket *skt;
179 int i; 223 int i, ret = 0;
224
225 sa11xx_drv_pcmcia_ops(ops);
180 226
181 sinfo = kzalloc(SKT_DEV_INFO_SIZE(nr), GFP_KERNEL); 227 sinfo = kzalloc(SKT_DEV_INFO_SIZE(nr), GFP_KERNEL);
182 if (!sinfo) 228 if (!sinfo)
@@ -188,45 +234,26 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
188 for (i = 0; i < nr; i++) { 234 for (i = 0; i < nr; i++) {
189 skt = &sinfo->skt[i]; 235 skt = &sinfo->skt[i];
190 236
191 skt->nr = first + i; 237 skt->nr = first + i;
192 skt->irq = NO_IRQ; 238 skt->ops = ops;
193 239 skt->socket.owner = ops->owner;
194 skt->res_skt.start = _PCMCIA(skt->nr); 240 skt->socket.dev.parent = dev;
195 skt->res_skt.end = _PCMCIA(skt->nr) + PCMCIASp - 1; 241 skt->socket.pci_irq = NO_IRQ;
196 skt->res_skt.name = skt_names[skt->nr];
197 skt->res_skt.flags = IORESOURCE_MEM;
198
199 skt->res_io.start = _PCMCIAIO(skt->nr);
200 skt->res_io.end = _PCMCIAIO(skt->nr) + PCMCIAIOSp - 1;
201 skt->res_io.name = "io";
202 skt->res_io.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
203 242
204 skt->res_mem.start = _PCMCIAMem(skt->nr); 243 ret = sa11xx_drv_pcmcia_add_one(skt);
205 skt->res_mem.end = _PCMCIAMem(skt->nr) + PCMCIAMemSp - 1; 244 if (ret)
206 skt->res_mem.name = "memory"; 245 break;
207 skt->res_mem.flags = IORESOURCE_MEM;
208
209 skt->res_attr.start = _PCMCIAAttr(skt->nr);
210 skt->res_attr.end = _PCMCIAAttr(skt->nr) + PCMCIAAttrSp - 1;
211 skt->res_attr.name = "attribute";
212 skt->res_attr.flags = IORESOURCE_MEM;
213 } 246 }
214 247
215 /* 248 if (ret) {
216 * set default MECR calculation if the board specific 249 while (--i >= 0)
217 * code did not specify one... 250 soc_pcmcia_remove_one(&sinfo->skt[i]);
218 */ 251 kfree(sinfo);
219 if (!ops->get_timing) 252 } else {
220 ops->get_timing = sa1100_pcmcia_default_mecr_timing; 253 dev_set_drvdata(dev, sinfo);
221 254 }
222 /* Provide our SA11x0 specific timing routines. */
223 ops->set_timing = sa1100_pcmcia_set_timing;
224 ops->show_timing = sa1100_pcmcia_show_timing;
225#ifdef CONFIG_CPU_FREQ
226 ops->frequency_change = sa1100_pcmcia_frequency_change;
227#endif
228 255
229 return soc_common_drv_pcmcia_probe(dev, ops, sinfo); 256 return ret;
230} 257}
231EXPORT_SYMBOL(sa11xx_drv_pcmcia_probe); 258EXPORT_SYMBOL(sa11xx_drv_pcmcia_probe);
232 259
diff --git a/drivers/pcmcia/sa11xx_base.h b/drivers/pcmcia/sa11xx_base.h
index 7bc208280527..3d76d720f463 100644
--- a/drivers/pcmcia/sa11xx_base.h
+++ b/drivers/pcmcia/sa11xx_base.h
@@ -118,6 +118,8 @@ static inline unsigned int sa1100_pcmcia_cmd_time(unsigned int cpu_clock_khz,
118} 118}
119 119
120 120
121int sa11xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
122void sa11xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
121extern int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, int first, int nr); 123extern int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, int first, int nr);
122 124
123#endif /* !defined(_PCMCIA_SA1100_H) */ 125#endif /* !defined(_PCMCIA_SA1100_H) */
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index ef7e9e58782b..6f1a86b43c60 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -144,10 +144,10 @@ soc_common_pcmcia_config_skt(struct soc_pcmcia_socket *skt, socket_state_t *stat
144 */ 144 */
145 if (skt->irq_state != 1 && state->io_irq) { 145 if (skt->irq_state != 1 && state->io_irq) {
146 skt->irq_state = 1; 146 skt->irq_state = 1;
147 set_irq_type(skt->irq, IRQ_TYPE_EDGE_FALLING); 147 set_irq_type(skt->socket.pci_irq, IRQ_TYPE_EDGE_FALLING);
148 } else if (skt->irq_state == 1 && state->io_irq == 0) { 148 } else if (skt->irq_state == 1 && state->io_irq == 0) {
149 skt->irq_state = 0; 149 skt->irq_state = 0;
150 set_irq_type(skt->irq, IRQ_TYPE_NONE); 150 set_irq_type(skt->socket.pci_irq, IRQ_TYPE_NONE);
151 } 151 }
152 152
153 skt->cs_state = *state; 153 skt->cs_state = *state;
@@ -492,7 +492,8 @@ static ssize_t show_status(struct device *dev, struct device_attribute *attr, ch
492 492
493 p+=sprintf(p, "Vcc : %d\n", skt->cs_state.Vcc); 493 p+=sprintf(p, "Vcc : %d\n", skt->cs_state.Vcc);
494 p+=sprintf(p, "Vpp : %d\n", skt->cs_state.Vpp); 494 p+=sprintf(p, "Vpp : %d\n", skt->cs_state.Vpp);
495 p+=sprintf(p, "IRQ : %d (%d)\n", skt->cs_state.io_irq, skt->irq); 495 p+=sprintf(p, "IRQ : %d (%d)\n", skt->cs_state.io_irq,
496 skt->socket.pci_irq);
496 if (skt->ops->show_timing) 497 if (skt->ops->show_timing)
497 p+=skt->ops->show_timing(skt, p); 498 p+=skt->ops->show_timing(skt, p);
498 499
@@ -574,7 +575,7 @@ void soc_pcmcia_enable_irqs(struct soc_pcmcia_socket *skt,
574EXPORT_SYMBOL(soc_pcmcia_enable_irqs); 575EXPORT_SYMBOL(soc_pcmcia_enable_irqs);
575 576
576 577
577LIST_HEAD(soc_pcmcia_sockets); 578static LIST_HEAD(soc_pcmcia_sockets);
578static DEFINE_MUTEX(soc_pcmcia_sockets_lock); 579static DEFINE_MUTEX(soc_pcmcia_sockets_lock);
579 580
580#ifdef CONFIG_CPU_FREQ 581#ifdef CONFIG_CPU_FREQ
@@ -609,177 +610,137 @@ static int soc_pcmcia_cpufreq_register(void)
609 "notifier for PCMCIA (%d)\n", ret); 610 "notifier for PCMCIA (%d)\n", ret);
610 return ret; 611 return ret;
611} 612}
613fs_initcall(soc_pcmcia_cpufreq_register);
612 614
613static void soc_pcmcia_cpufreq_unregister(void) 615static void soc_pcmcia_cpufreq_unregister(void)
614{ 616{
615 cpufreq_unregister_notifier(&soc_pcmcia_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); 617 cpufreq_unregister_notifier(&soc_pcmcia_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
616} 618}
619module_exit(soc_pcmcia_cpufreq_unregister);
617 620
618#else
619static int soc_pcmcia_cpufreq_register(void) { return 0; }
620static void soc_pcmcia_cpufreq_unregister(void) {}
621#endif 621#endif
622 622
623int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, 623void soc_pcmcia_remove_one(struct soc_pcmcia_socket *skt)
624 struct skt_dev_info *sinfo)
625{ 624{
626 struct soc_pcmcia_socket *skt;
627 int ret, i;
628
629 mutex_lock(&soc_pcmcia_sockets_lock); 625 mutex_lock(&soc_pcmcia_sockets_lock);
626 del_timer_sync(&skt->poll_timer);
630 627
631 /* 628 pcmcia_unregister_socket(&skt->socket);
632 * Initialise the per-socket structure.
633 */
634 for (i = 0; i < sinfo->nskt; i++) {
635 skt = &sinfo->skt[i];
636 629
637 skt->socket.ops = &soc_common_pcmcia_operations; 630 flush_scheduled_work();
638 skt->socket.owner = ops->owner;
639 skt->socket.dev.parent = dev;
640 631
641 init_timer(&skt->poll_timer); 632 skt->ops->hw_shutdown(skt);
642 skt->poll_timer.function = soc_common_pcmcia_poll_event;
643 skt->poll_timer.data = (unsigned long)skt;
644 skt->poll_timer.expires = jiffies + SOC_PCMCIA_POLL_PERIOD;
645 633
646 skt->dev = dev; 634 soc_common_pcmcia_config_skt(skt, &dead_socket);
647 skt->ops = ops;
648 635
649 ret = request_resource(&iomem_resource, &skt->res_skt); 636 list_del(&skt->node);
650 if (ret) 637 mutex_unlock(&soc_pcmcia_sockets_lock);
651 goto out_err_1;
652 638
653 ret = request_resource(&skt->res_skt, &skt->res_io); 639 iounmap(skt->virt_io);
654 if (ret) 640 skt->virt_io = NULL;
655 goto out_err_2; 641 release_resource(&skt->res_attr);
642 release_resource(&skt->res_mem);
643 release_resource(&skt->res_io);
644 release_resource(&skt->res_skt);
645}
646EXPORT_SYMBOL(soc_pcmcia_remove_one);
656 647
657 ret = request_resource(&skt->res_skt, &skt->res_mem); 648int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt)
658 if (ret) 649{
659 goto out_err_3; 650 int ret;
660 651
661 ret = request_resource(&skt->res_skt, &skt->res_attr); 652 init_timer(&skt->poll_timer);
662 if (ret) 653 skt->poll_timer.function = soc_common_pcmcia_poll_event;
663 goto out_err_4; 654 skt->poll_timer.data = (unsigned long)skt;
655 skt->poll_timer.expires = jiffies + SOC_PCMCIA_POLL_PERIOD;
664 656
665 skt->virt_io = ioremap(skt->res_io.start, 0x10000); 657 ret = request_resource(&iomem_resource, &skt->res_skt);
666 if (skt->virt_io == NULL) { 658 if (ret)
667 ret = -ENOMEM; 659 goto out_err_1;
668 goto out_err_5;
669 }
670 660
671 if (list_empty(&soc_pcmcia_sockets)) 661 ret = request_resource(&skt->res_skt, &skt->res_io);
672 soc_pcmcia_cpufreq_register(); 662 if (ret)
663 goto out_err_2;
673 664
674 list_add(&skt->node, &soc_pcmcia_sockets); 665 ret = request_resource(&skt->res_skt, &skt->res_mem);
666 if (ret)
667 goto out_err_3;
675 668
676 /* 669 ret = request_resource(&skt->res_skt, &skt->res_attr);
677 * We initialize default socket timing here, because 670 if (ret)
678 * we are not guaranteed to see a SetIOMap operation at 671 goto out_err_4;
679 * runtime.
680 */
681 ops->set_timing(skt);
682 672
683 ret = ops->hw_init(skt); 673 skt->virt_io = ioremap(skt->res_io.start, 0x10000);
684 if (ret) 674 if (skt->virt_io == NULL) {
685 goto out_err_6; 675 ret = -ENOMEM;
676 goto out_err_5;
677 }
686 678
687 skt->socket.features = SS_CAP_STATIC_MAP|SS_CAP_PCCARD; 679 mutex_lock(&soc_pcmcia_sockets_lock);
688 skt->socket.resource_ops = &pccard_static_ops;
689 skt->socket.irq_mask = 0;
690 skt->socket.map_size = PAGE_SIZE;
691 skt->socket.pci_irq = skt->irq;
692 skt->socket.io_offset = (unsigned long)skt->virt_io;
693 680
694 skt->status = soc_common_pcmcia_skt_state(skt); 681 list_add(&skt->node, &soc_pcmcia_sockets);
695 682
696 ret = pcmcia_register_socket(&skt->socket); 683 /*
697 if (ret) 684 * We initialize default socket timing here, because
698 goto out_err_7; 685 * we are not guaranteed to see a SetIOMap operation at
686 * runtime.
687 */
688 skt->ops->set_timing(skt);
699 689
700 WARN_ON(skt->socket.sock != i); 690 ret = skt->ops->hw_init(skt);
691 if (ret)
692 goto out_err_6;
701 693
702 add_timer(&skt->poll_timer); 694 skt->socket.ops = &soc_common_pcmcia_operations;
695 skt->socket.features = SS_CAP_STATIC_MAP|SS_CAP_PCCARD;
696 skt->socket.resource_ops = &pccard_static_ops;
697 skt->socket.irq_mask = 0;
698 skt->socket.map_size = PAGE_SIZE;
699 skt->socket.io_offset = (unsigned long)skt->virt_io;
703 700
704 ret = device_create_file(&skt->socket.dev, &dev_attr_status); 701 skt->status = soc_common_pcmcia_skt_state(skt);
705 if (ret)
706 goto out_err_8;
707 }
708 702
709 dev_set_drvdata(dev, sinfo); 703 ret = pcmcia_register_socket(&skt->socket);
710 ret = 0; 704 if (ret)
711 goto out; 705 goto out_err_7;
712 706
713 do { 707 add_timer(&skt->poll_timer);
714 skt = &sinfo->skt[i]; 708
709 mutex_unlock(&soc_pcmcia_sockets_lock);
710
711 ret = device_create_file(&skt->socket.dev, &dev_attr_status);
712 if (ret)
713 goto out_err_8;
714
715 return ret;
715 716
716 device_remove_file(&skt->socket.dev, &dev_attr_status);
717 out_err_8: 717 out_err_8:
718 del_timer_sync(&skt->poll_timer); 718 mutex_lock(&soc_pcmcia_sockets_lock);
719 pcmcia_unregister_socket(&skt->socket); 719 del_timer_sync(&skt->poll_timer);
720 pcmcia_unregister_socket(&skt->socket);
720 721
721 out_err_7: 722 out_err_7:
722 flush_scheduled_work(); 723 flush_scheduled_work();
723 724
724 ops->hw_shutdown(skt); 725 skt->ops->hw_shutdown(skt);
725 out_err_6: 726 out_err_6:
726 list_del(&skt->node); 727 list_del(&skt->node);
727 iounmap(skt->virt_io); 728 mutex_unlock(&soc_pcmcia_sockets_lock);
729 iounmap(skt->virt_io);
728 out_err_5: 730 out_err_5:
729 release_resource(&skt->res_attr); 731 release_resource(&skt->res_attr);
730 out_err_4: 732 out_err_4:
731 release_resource(&skt->res_mem); 733 release_resource(&skt->res_mem);
732 out_err_3: 734 out_err_3:
733 release_resource(&skt->res_io); 735 release_resource(&skt->res_io);
734 out_err_2: 736 out_err_2:
735 release_resource(&skt->res_skt); 737 release_resource(&skt->res_skt);
736 out_err_1: 738 out_err_1:
737 i--;
738 } while (i > 0);
739 739
740 kfree(sinfo);
741
742 out:
743 mutex_unlock(&soc_pcmcia_sockets_lock);
744 return ret; 740 return ret;
745} 741}
742EXPORT_SYMBOL(soc_pcmcia_add_one);
746 743
747int soc_common_drv_pcmcia_remove(struct device *dev) 744MODULE_AUTHOR("John Dorsey <john+@cs.cmu.edu>");
748{ 745MODULE_DESCRIPTION("Linux PCMCIA Card Services: Common SoC support");
749 struct skt_dev_info *sinfo = dev_get_drvdata(dev); 746MODULE_LICENSE("Dual MPL/GPL");
750 int i;
751
752 dev_set_drvdata(dev, NULL);
753
754 mutex_lock(&soc_pcmcia_sockets_lock);
755 for (i = 0; i < sinfo->nskt; i++) {
756 struct soc_pcmcia_socket *skt = &sinfo->skt[i];
757
758 del_timer_sync(&skt->poll_timer);
759
760 pcmcia_unregister_socket(&skt->socket);
761
762 flush_scheduled_work();
763
764 skt->ops->hw_shutdown(skt);
765
766 soc_common_pcmcia_config_skt(skt, &dead_socket);
767
768 list_del(&skt->node);
769 iounmap(skt->virt_io);
770 skt->virt_io = NULL;
771 release_resource(&skt->res_attr);
772 release_resource(&skt->res_mem);
773 release_resource(&skt->res_io);
774 release_resource(&skt->res_skt);
775 }
776 if (list_empty(&soc_pcmcia_sockets))
777 soc_pcmcia_cpufreq_unregister();
778
779 mutex_unlock(&soc_pcmcia_sockets_lock);
780
781 kfree(sinfo);
782
783 return 0;
784}
785EXPORT_SYMBOL(soc_common_drv_pcmcia_remove);
diff --git a/drivers/pcmcia/soc_common.h b/drivers/pcmcia/soc_common.h
index 290e143839ee..e40824ce6b0b 100644
--- a/drivers/pcmcia/soc_common.h
+++ b/drivers/pcmcia/soc_common.h
@@ -30,14 +30,12 @@ struct soc_pcmcia_socket {
30 /* 30 /*
31 * Info from low level handler 31 * Info from low level handler
32 */ 32 */
33 struct device *dev;
34 unsigned int nr; 33 unsigned int nr;
35 unsigned int irq;
36 34
37 /* 35 /*
38 * Core PCMCIA state 36 * Core PCMCIA state
39 */ 37 */
40 struct pcmcia_low_level *ops; 38 const struct pcmcia_low_level *ops;
41 39
42 unsigned int status; 40 unsigned int status;
43 socket_state_t cs_state; 41 socket_state_t cs_state;
@@ -135,10 +133,8 @@ extern void soc_pcmcia_enable_irqs(struct soc_pcmcia_socket *skt, struct pcmcia_
135extern void soc_common_pcmcia_get_timing(struct soc_pcmcia_socket *, struct soc_pcmcia_timing *); 133extern void soc_common_pcmcia_get_timing(struct soc_pcmcia_socket *, struct soc_pcmcia_timing *);
136 134
137 135
138extern struct list_head soc_pcmcia_sockets; 136void soc_pcmcia_remove_one(struct soc_pcmcia_socket *skt);
139 137int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt);
140extern int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, struct skt_dev_info *sinfo);
141extern int soc_common_drv_pcmcia_remove(struct device *dev);
142 138
143 139
144#ifdef CONFIG_PCMCIA_DEBUG 140#ifdef CONFIG_PCMCIA_DEBUG
diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c
index 6918849d511e..12c49ee135e1 100644
--- a/drivers/pcmcia/tcic.c
+++ b/drivers/pcmcia/tcic.c
@@ -55,21 +55,6 @@
55#include <pcmcia/ss.h> 55#include <pcmcia/ss.h>
56#include "tcic.h" 56#include "tcic.h"
57 57
58#ifdef CONFIG_PCMCIA_DEBUG
59static int pc_debug;
60
61module_param(pc_debug, int, 0644);
62static const char version[] =
63"tcic.c 1.111 2000/02/15 04:13:12 (David Hinds)";
64
65#define debug(lvl, fmt, arg...) do { \
66 if (pc_debug > (lvl)) \
67 printk(KERN_DEBUG "tcic: " fmt , ## arg); \
68} while (0)
69#else
70#define debug(lvl, fmt, arg...) do { } while (0)
71#endif
72
73MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); 58MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
74MODULE_DESCRIPTION("Databook TCIC-2 PCMCIA socket driver"); 59MODULE_DESCRIPTION("Databook TCIC-2 PCMCIA socket driver");
75MODULE_LICENSE("Dual MPL/GPL"); 60MODULE_LICENSE("Dual MPL/GPL");
@@ -574,7 +559,7 @@ static irqreturn_t tcic_interrupt(int irq, void *dev)
574 } else 559 } else
575 active = 1; 560 active = 1;
576 561
577 debug(2, "tcic_interrupt()\n"); 562 pr_debug("tcic_interrupt()\n");
578 563
579 for (i = 0; i < sockets; i++) { 564 for (i = 0; i < sockets; i++) {
580 psock = socket_table[i].psock; 565 psock = socket_table[i].psock;
@@ -611,13 +596,13 @@ static irqreturn_t tcic_interrupt(int irq, void *dev)
611 } 596 }
612 active = 0; 597 active = 0;
613 598
614 debug(2, "interrupt done\n"); 599 pr_debug("interrupt done\n");
615 return IRQ_HANDLED; 600 return IRQ_HANDLED;
616} /* tcic_interrupt */ 601} /* tcic_interrupt */
617 602
618static void tcic_timer(u_long data) 603static void tcic_timer(u_long data)
619{ 604{
620 debug(2, "tcic_timer()\n"); 605 pr_debug("tcic_timer()\n");
621 tcic_timer_pending = 0; 606 tcic_timer_pending = 0;
622 tcic_interrupt(0, NULL); 607 tcic_interrupt(0, NULL);
623} /* tcic_timer */ 608} /* tcic_timer */
@@ -644,7 +629,7 @@ static int tcic_get_status(struct pcmcia_socket *sock, u_int *value)
644 reg = tcic_getb(TCIC_PWR); 629 reg = tcic_getb(TCIC_PWR);
645 if (reg & (TCIC_PWR_VCC(psock)|TCIC_PWR_VPP(psock))) 630 if (reg & (TCIC_PWR_VCC(psock)|TCIC_PWR_VPP(psock)))
646 *value |= SS_POWERON; 631 *value |= SS_POWERON;
647 debug(1, "GetStatus(%d) = %#2.2x\n", psock, *value); 632 dev_dbg(&sock->dev, "GetStatus(%d) = %#2.2x\n", psock, *value);
648 return 0; 633 return 0;
649} /* tcic_get_status */ 634} /* tcic_get_status */
650 635
@@ -656,7 +641,7 @@ static int tcic_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
656 u_char reg; 641 u_char reg;
657 u_short scf1, scf2; 642 u_short scf1, scf2;
658 643
659 debug(1, "SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " 644 dev_dbg(&sock->dev, "SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
660 "io_irq %d, csc_mask %#2.2x)\n", psock, state->flags, 645 "io_irq %d, csc_mask %#2.2x)\n", psock, state->flags,
661 state->Vcc, state->Vpp, state->io_irq, state->csc_mask); 646 state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
662 tcic_setw(TCIC_ADDR+2, (psock << TCIC_SS_SHFT) | TCIC_ADR2_INDREG); 647 tcic_setw(TCIC_ADDR+2, (psock << TCIC_SS_SHFT) | TCIC_ADR2_INDREG);
@@ -731,7 +716,7 @@ static int tcic_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
731 u_int addr; 716 u_int addr;
732 u_short base, len, ioctl; 717 u_short base, len, ioctl;
733 718
734 debug(1, "SetIOMap(%d, %d, %#2.2x, %d ns, " 719 dev_dbg(&sock->dev, "SetIOMap(%d, %d, %#2.2x, %d ns, "
735 "%#llx-%#llx)\n", psock, io->map, io->flags, io->speed, 720 "%#llx-%#llx)\n", psock, io->map, io->flags, io->speed,
736 (unsigned long long)io->start, (unsigned long long)io->stop); 721 (unsigned long long)io->start, (unsigned long long)io->stop);
737 if ((io->map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) || 722 if ((io->map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) ||
@@ -768,7 +753,7 @@ static int tcic_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *m
768 u_short addr, ctl; 753 u_short addr, ctl;
769 u_long base, len, mmap; 754 u_long base, len, mmap;
770 755
771 debug(1, "SetMemMap(%d, %d, %#2.2x, %d ns, " 756 dev_dbg(&sock->dev, "SetMemMap(%d, %d, %#2.2x, %d ns, "
772 "%#llx-%#llx, %#x)\n", psock, mem->map, mem->flags, 757 "%#llx-%#llx, %#x)\n", psock, mem->map, mem->flags,
773 mem->speed, (unsigned long long)mem->res->start, 758 mem->speed, (unsigned long long)mem->res->start,
774 (unsigned long long)mem->res->end, mem->card_start); 759 (unsigned long long)mem->res->end, mem->card_start);
diff --git a/drivers/pcmcia/topic.h b/drivers/pcmcia/topic.h
index edccfa5bb400..615a45a8fe86 100644
--- a/drivers/pcmcia/topic.h
+++ b/drivers/pcmcia/topic.h
@@ -114,22 +114,17 @@ static void topic97_zoom_video(struct pcmcia_socket *sock, int onoff)
114 reg_zv |= TOPIC97_ZV_CONTROL_ENABLE; 114 reg_zv |= TOPIC97_ZV_CONTROL_ENABLE;
115 config_writeb(socket, TOPIC97_ZOOM_VIDEO_CONTROL, reg_zv); 115 config_writeb(socket, TOPIC97_ZOOM_VIDEO_CONTROL, reg_zv);
116 116
117 reg = config_readb(socket, TOPIC97_MISC2);
118 reg |= TOPIC97_MISC2_ZV_ENABLE;
119 config_writeb(socket, TOPIC97_MISC2, reg);
120
121 /* not sure this is needed, doc is unclear */
122#if 0
123 reg = config_readb(socket, TOPIC97_AUDIO_VIDEO_SWITCH); 117 reg = config_readb(socket, TOPIC97_AUDIO_VIDEO_SWITCH);
124 reg |= TOPIC97_AVS_AUDIO_CONTROL | TOPIC97_AVS_VIDEO_CONTROL; 118 reg |= TOPIC97_AVS_AUDIO_CONTROL | TOPIC97_AVS_VIDEO_CONTROL;
125 config_writeb(socket, TOPIC97_AUDIO_VIDEO_SWITCH, reg); 119 config_writeb(socket, TOPIC97_AUDIO_VIDEO_SWITCH, reg);
126#endif 120 } else {
127 }
128 else {
129 reg_zv &= ~TOPIC97_ZV_CONTROL_ENABLE; 121 reg_zv &= ~TOPIC97_ZV_CONTROL_ENABLE;
130 config_writeb(socket, TOPIC97_ZOOM_VIDEO_CONTROL, reg_zv); 122 config_writeb(socket, TOPIC97_ZOOM_VIDEO_CONTROL, reg_zv);
131 }
132 123
124 reg = config_readb(socket, TOPIC97_AUDIO_VIDEO_SWITCH);
125 reg &= ~(TOPIC97_AVS_AUDIO_CONTROL | TOPIC97_AVS_VIDEO_CONTROL);
126 config_writeb(socket, TOPIC97_AUDIO_VIDEO_SWITCH, reg);
127 }
133} 128}
134 129
135static int topic97_override(struct yenta_socket *socket) 130static int topic97_override(struct yenta_socket *socket)
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 0a8f735f6c4a..ab64522aaa64 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -52,7 +52,7 @@
52 */ 52 */
53#undef START_IN_KERNEL_MODE 53#undef START_IN_KERNEL_MODE
54 54
55#define DRV_VER "0.5.17" 55#define DRV_VER "0.5.18"
56 56
57/* 57/*
58 * According to the Atom N270 datasheet, 58 * According to the Atom N270 datasheet,
@@ -61,7 +61,7 @@
61 * measured by the on-die thermal monitor are within 0 <= Tj <= 90. So, 61 * measured by the on-die thermal monitor are within 0 <= Tj <= 90. So,
62 * assume 89°C is critical temperature. 62 * assume 89°C is critical temperature.
63 */ 63 */
64#define ACERHDF_TEMP_CRIT 89 64#define ACERHDF_TEMP_CRIT 89000
65#define ACERHDF_FAN_OFF 0 65#define ACERHDF_FAN_OFF 0
66#define ACERHDF_FAN_AUTO 1 66#define ACERHDF_FAN_AUTO 1
67 67
@@ -69,7 +69,7 @@
69 * No matter what value the user puts into the fanon variable, turn on the fan 69 * No matter what value the user puts into the fanon variable, turn on the fan
70 * at 80 degree Celsius to prevent hardware damage 70 * at 80 degree Celsius to prevent hardware damage
71 */ 71 */
72#define ACERHDF_MAX_FANON 80 72#define ACERHDF_MAX_FANON 80000
73 73
74/* 74/*
75 * Maximum interval between two temperature checks is 15 seconds, as the die 75 * Maximum interval between two temperature checks is 15 seconds, as the die
@@ -85,8 +85,8 @@ static int kernelmode;
85#endif 85#endif
86 86
87static unsigned int interval = 10; 87static unsigned int interval = 10;
88static unsigned int fanon = 63; 88static unsigned int fanon = 63000;
89static unsigned int fanoff = 58; 89static unsigned int fanoff = 58000;
90static unsigned int verbose; 90static unsigned int verbose;
91static unsigned int fanstate = ACERHDF_FAN_AUTO; 91static unsigned int fanstate = ACERHDF_FAN_AUTO;
92static char force_bios[16]; 92static char force_bios[16];
@@ -171,7 +171,7 @@ static int acerhdf_get_temp(int *temp)
171 if (ec_read(bios_cfg->tempreg, &read_temp)) 171 if (ec_read(bios_cfg->tempreg, &read_temp))
172 return -EINVAL; 172 return -EINVAL;
173 173
174 *temp = read_temp; 174 *temp = read_temp * 1000;
175 175
176 return 0; 176 return 0;
177} 177}
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index d93108d148fc..a848c7e20aeb 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -1680,36 +1680,48 @@ static void tpacpi_remove_driver_attributes(struct device_driver *drv)
1680 | (__bv1) << 8 | (__bv2) } 1680 | (__bv1) << 8 | (__bv2) }
1681 1681
1682#define TPV_Q_X(__v, __bid1, __bid2, __bv1, __bv2, \ 1682#define TPV_Q_X(__v, __bid1, __bid2, __bv1, __bv2, \
1683 __eid1, __eid2, __ev1, __ev2) \ 1683 __eid, __ev1, __ev2) \
1684 { .vendor = (__v), \ 1684 { .vendor = (__v), \
1685 .bios = TPID(__bid1, __bid2), \ 1685 .bios = TPID(__bid1, __bid2), \
1686 .ec = TPID(__eid1, __eid2), \ 1686 .ec = __eid, \
1687 .quirks = (__ev1) << 24 | (__ev2) << 16 \ 1687 .quirks = (__ev1) << 24 | (__ev2) << 16 \
1688 | (__bv1) << 8 | (__bv2) } 1688 | (__bv1) << 8 | (__bv2) }
1689 1689
1690#define TPV_QI0(__id1, __id2, __bv1, __bv2) \ 1690#define TPV_QI0(__id1, __id2, __bv1, __bv2) \
1691 TPV_Q(PCI_VENDOR_ID_IBM, __id1, __id2, __bv1, __bv2) 1691 TPV_Q(PCI_VENDOR_ID_IBM, __id1, __id2, __bv1, __bv2)
1692 1692
1693/* Outdated IBM BIOSes often lack the EC id string */
1693#define TPV_QI1(__id1, __id2, __bv1, __bv2, __ev1, __ev2) \ 1694#define TPV_QI1(__id1, __id2, __bv1, __bv2, __ev1, __ev2) \
1694 TPV_Q_X(PCI_VENDOR_ID_IBM, __id1, __id2, \ 1695 TPV_Q_X(PCI_VENDOR_ID_IBM, __id1, __id2, \
1695 __bv1, __bv2, __id1, __id2, __ev1, __ev2) 1696 __bv1, __bv2, TPID(__id1, __id2), \
1697 __ev1, __ev2), \
1698 TPV_Q_X(PCI_VENDOR_ID_IBM, __id1, __id2, \
1699 __bv1, __bv2, TPACPI_MATCH_UNKNOWN, \
1700 __ev1, __ev2)
1696 1701
1702/* Outdated IBM BIOSes often lack the EC id string */
1697#define TPV_QI2(__bid1, __bid2, __bv1, __bv2, \ 1703#define TPV_QI2(__bid1, __bid2, __bv1, __bv2, \
1698 __eid1, __eid2, __ev1, __ev2) \ 1704 __eid1, __eid2, __ev1, __ev2) \
1699 TPV_Q_X(PCI_VENDOR_ID_IBM, __bid1, __bid2, \ 1705 TPV_Q_X(PCI_VENDOR_ID_IBM, __bid1, __bid2, \
1700 __bv1, __bv2, __eid1, __eid2, __ev1, __ev2) 1706 __bv1, __bv2, TPID(__eid1, __eid2), \
1707 __ev1, __ev2), \
1708 TPV_Q_X(PCI_VENDOR_ID_IBM, __bid1, __bid2, \
1709 __bv1, __bv2, TPACPI_MATCH_UNKNOWN, \
1710 __ev1, __ev2)
1701 1711
1702#define TPV_QL0(__id1, __id2, __bv1, __bv2) \ 1712#define TPV_QL0(__id1, __id2, __bv1, __bv2) \
1703 TPV_Q(PCI_VENDOR_ID_LENOVO, __id1, __id2, __bv1, __bv2) 1713 TPV_Q(PCI_VENDOR_ID_LENOVO, __id1, __id2, __bv1, __bv2)
1704 1714
1705#define TPV_QL1(__id1, __id2, __bv1, __bv2, __ev1, __ev2) \ 1715#define TPV_QL1(__id1, __id2, __bv1, __bv2, __ev1, __ev2) \
1706 TPV_Q_X(PCI_VENDOR_ID_LENOVO, __id1, __id2, \ 1716 TPV_Q_X(PCI_VENDOR_ID_LENOVO, __id1, __id2, \
1707 __bv1, __bv2, __id1, __id2, __ev1, __ev2) 1717 __bv1, __bv2, TPID(__id1, __id2), \
1718 __ev1, __ev2)
1708 1719
1709#define TPV_QL2(__bid1, __bid2, __bv1, __bv2, \ 1720#define TPV_QL2(__bid1, __bid2, __bv1, __bv2, \
1710 __eid1, __eid2, __ev1, __ev2) \ 1721 __eid1, __eid2, __ev1, __ev2) \
1711 TPV_Q_X(PCI_VENDOR_ID_LENOVO, __bid1, __bid2, \ 1722 TPV_Q_X(PCI_VENDOR_ID_LENOVO, __bid1, __bid2, \
1712 __bv1, __bv2, __eid1, __eid2, __ev1, __ev2) 1723 __bv1, __bv2, TPID(__eid1, __eid2), \
1724 __ev1, __ev2)
1713 1725
1714static const struct tpacpi_quirk tpacpi_bios_version_qtable[] __initconst = { 1726static const struct tpacpi_quirk tpacpi_bios_version_qtable[] __initconst = {
1715 /* Numeric models ------------------ */ 1727 /* Numeric models ------------------ */
@@ -6313,7 +6325,7 @@ static int brightness_write(char *buf)
6313 * Doing it this way makes the syscall restartable in case of EINTR 6325 * Doing it this way makes the syscall restartable in case of EINTR
6314 */ 6326 */
6315 rc = brightness_set(level); 6327 rc = brightness_set(level);
6316 return (rc == -EINTR)? ERESTARTSYS : rc; 6328 return (rc == -EINTR)? -ERESTARTSYS : rc;
6317} 6329}
6318 6330
6319static struct ibm_struct brightness_driver_data = { 6331static struct ibm_struct brightness_driver_data = {
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 744ea1d0b59b..efe568deda12 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1283,7 +1283,8 @@ static int _regulator_disable(struct regulator_dev *rdev)
1283 return -EIO; 1283 return -EIO;
1284 1284
1285 /* are we the last user and permitted to disable ? */ 1285 /* are we the last user and permitted to disable ? */
1286 if (rdev->use_count == 1 && !rdev->constraints->always_on) { 1286 if (rdev->use_count == 1 &&
1287 (rdev->constraints && !rdev->constraints->always_on)) {
1287 1288
1288 /* we are last user */ 1289 /* we are last user */
1289 if (_regulator_can_change_status(rdev) && 1290 if (_regulator_can_change_status(rdev) &&
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index f8b295700d7d..f9f516a3028a 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -196,11 +196,10 @@ static int regulator_fixed_voltage_remove(struct platform_device *pdev)
196 struct fixed_voltage_data *drvdata = platform_get_drvdata(pdev); 196 struct fixed_voltage_data *drvdata = platform_get_drvdata(pdev);
197 197
198 regulator_unregister(drvdata->dev); 198 regulator_unregister(drvdata->dev);
199 kfree(drvdata->desc.name);
200 kfree(drvdata);
201
202 if (gpio_is_valid(drvdata->gpio)) 199 if (gpio_is_valid(drvdata->gpio))
203 gpio_free(drvdata->gpio); 200 gpio_free(drvdata->gpio);
201 kfree(drvdata->desc.name);
202 kfree(drvdata);
204 203
205 return 0; 204 return 0;
206} 205}
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index 1d8d9879d3a1..48857008758c 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -167,6 +167,8 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev)
167 return -ENOMEM; 167 return -ENOMEM;
168 } 168 }
169 169
170 isink->wm831x = wm831x;
171
170 res = platform_get_resource(pdev, IORESOURCE_IO, 0); 172 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
171 if (res == NULL) { 173 if (res == NULL) {
172 dev_err(&pdev->dev, "No I/O resource\n"); 174 dev_err(&pdev->dev, "No I/O resource\n");
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index bb61aede4801..902db56ce099 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -175,18 +175,18 @@ static unsigned int wm831x_gp_ldo_get_mode(struct regulator_dev *rdev)
175 struct wm831x *wm831x = ldo->wm831x; 175 struct wm831x *wm831x = ldo->wm831x;
176 int ctrl_reg = ldo->base + WM831X_LDO_CONTROL; 176 int ctrl_reg = ldo->base + WM831X_LDO_CONTROL;
177 int on_reg = ldo->base + WM831X_LDO_ON_CONTROL; 177 int on_reg = ldo->base + WM831X_LDO_ON_CONTROL;
178 unsigned int ret; 178 int ret;
179 179
180 ret = wm831x_reg_read(wm831x, on_reg); 180 ret = wm831x_reg_read(wm831x, on_reg);
181 if (ret < 0) 181 if (ret < 0)
182 return 0; 182 return ret;
183 183
184 if (!(ret & WM831X_LDO1_ON_MODE)) 184 if (!(ret & WM831X_LDO1_ON_MODE))
185 return REGULATOR_MODE_NORMAL; 185 return REGULATOR_MODE_NORMAL;
186 186
187 ret = wm831x_reg_read(wm831x, ctrl_reg); 187 ret = wm831x_reg_read(wm831x, ctrl_reg);
188 if (ret < 0) 188 if (ret < 0)
189 return 0; 189 return ret;
190 190
191 if (ret & WM831X_LDO1_LP_MODE) 191 if (ret & WM831X_LDO1_LP_MODE)
192 return REGULATOR_MODE_STANDBY; 192 return REGULATOR_MODE_STANDBY;
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index 7fe1fa26c52c..03ea530981d1 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -58,7 +58,16 @@ static irqreturn_t coh901331_interrupt(int irq, void *data)
58 clk_enable(rtap->clk); 58 clk_enable(rtap->clk);
59 /* Ack IRQ */ 59 /* Ack IRQ */
60 writel(1, rtap->virtbase + COH901331_IRQ_EVENT); 60 writel(1, rtap->virtbase + COH901331_IRQ_EVENT);
61 /*
62 * Disable the interrupt. This is necessary because
63 * the RTC lives on a lower-clocked line and will
64 * not release the IRQ line until after a few (slower)
65 * clock cycles. The interrupt will be re-enabled when
66 * a new alarm is set anyway.
67 */
68 writel(0, rtap->virtbase + COH901331_IRQ_MASK);
61 clk_disable(rtap->clk); 69 clk_disable(rtap->clk);
70
62 /* Set alarm flag */ 71 /* Set alarm flag */
63 rtc_update_irq(rtap->rtc, 1, RTC_AF); 72 rtc_update_irq(rtap->rtc, 1, RTC_AF);
64 73
@@ -128,6 +137,8 @@ static int coh901331_alarm_irq_enable(struct device *dev, unsigned int enabled)
128 else 137 else
129 writel(0, rtap->virtbase + COH901331_IRQ_MASK); 138 writel(0, rtap->virtbase + COH901331_IRQ_MASK);
130 clk_disable(rtap->clk); 139 clk_disable(rtap->clk);
140
141 return 0;
131} 142}
132 143
133static struct rtc_class_ops coh901331_ops = { 144static struct rtc_class_ops coh901331_ops = {
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c
index 33a10c47260e..4c5d5d0c4cfc 100644
--- a/drivers/rtc/rtc-pcf50633.c
+++ b/drivers/rtc/rtc-pcf50633.c
@@ -292,8 +292,9 @@ static int __devinit pcf50633_rtc_probe(struct platform_device *pdev)
292 &pcf50633_rtc_ops, THIS_MODULE); 292 &pcf50633_rtc_ops, THIS_MODULE);
293 293
294 if (IS_ERR(rtc->rtc_dev)) { 294 if (IS_ERR(rtc->rtc_dev)) {
295 int ret = PTR_ERR(rtc->rtc_dev);
295 kfree(rtc); 296 kfree(rtc);
296 return PTR_ERR(rtc->rtc_dev); 297 return ret;
297 } 298 }
298 299
299 pcf50633_register_irq(rtc->pcf, PCF50633_IRQ_ALARM, 300 pcf50633_register_irq(rtc->pcf, PCF50633_IRQ_ALARM,
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index 310c10795e9a..6583c1a8b070 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -195,7 +195,7 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
195 /* year, since the rtc epoch*/ 195 /* year, since the rtc epoch*/
196 buf[CCR_YEAR] = bin2bcd(tm->tm_year % 100); 196 buf[CCR_YEAR] = bin2bcd(tm->tm_year % 100);
197 buf[CCR_WDAY] = tm->tm_wday & 0x07; 197 buf[CCR_WDAY] = tm->tm_wday & 0x07;
198 buf[CCR_Y2K] = bin2bcd(tm->tm_year / 100); 198 buf[CCR_Y2K] = bin2bcd((tm->tm_year + 1900) / 100);
199 } 199 }
200 200
201 /* If writing alarm registers, set compare bits on registers 0-4 */ 201 /* If writing alarm registers, set compare bits on registers 0-4 */
@@ -280,9 +280,9 @@ static int x1205_fix_osc(struct i2c_client *client)
280 int err; 280 int err;
281 struct rtc_time tm; 281 struct rtc_time tm;
282 282
283 tm.tm_hour = tm.tm_min = tm.tm_sec = 0; 283 memset(&tm, 0, sizeof(tm));
284 284
285 err = x1205_set_datetime(client, &tm, 0, X1205_CCR_BASE, 0); 285 err = x1205_set_datetime(client, &tm, 1, X1205_CCR_BASE, 0);
286 if (err < 0) 286 if (err < 0)
287 dev_err(&client->dev, "unable to restart the oscillator\n"); 287 dev_err(&client->dev, "unable to restart the oscillator\n");
288 288
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 89ece1c235aa..66e21dd23154 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -357,6 +357,7 @@ static int mon_close(struct inode *inode, struct file *filp)
357 atomic_set(&monpriv->msglim_count, 0); 357 atomic_set(&monpriv->msglim_count, 0);
358 monpriv->write_index = 0; 358 monpriv->write_index = 0;
359 monpriv->read_index = 0; 359 monpriv->read_index = 0;
360 dev_set_drvdata(monreader_device, NULL);
360 361
361 for (i = 0; i < MON_MSGLIM; i++) 362 for (i = 0; i < MON_MSGLIM; i++)
362 kfree(monpriv->msg_array[i]); 363 kfree(monpriv->msg_array[i]);
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 84c191c1cd62..05909a7df8b3 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -20,9 +20,12 @@
20 20
21#include "sclp.h" 21#include "sclp.h"
22 22
23static void (*old_machine_restart)(char *);
24static void (*old_machine_halt)(void);
25static void (*old_machine_power_off)(void);
26
23/* Shutdown handler. Signal completion of shutdown by loading special PSW. */ 27/* Shutdown handler. Signal completion of shutdown by loading special PSW. */
24static void 28static void do_machine_quiesce(void)
25do_machine_quiesce(void)
26{ 29{
27 psw_t quiesce_psw; 30 psw_t quiesce_psw;
28 31
@@ -33,23 +36,48 @@ do_machine_quiesce(void)
33} 36}
34 37
35/* Handler for quiesce event. Start shutdown procedure. */ 38/* Handler for quiesce event. Start shutdown procedure. */
36static void 39static void sclp_quiesce_handler(struct evbuf_header *evbuf)
37sclp_quiesce_handler(struct evbuf_header *evbuf)
38{ 40{
39 _machine_restart = (void *) do_machine_quiesce; 41 if (_machine_restart != (void *) do_machine_quiesce) {
40 _machine_halt = do_machine_quiesce; 42 old_machine_restart = _machine_restart;
41 _machine_power_off = do_machine_quiesce; 43 old_machine_halt = _machine_halt;
44 old_machine_power_off = _machine_power_off;
45 _machine_restart = (void *) do_machine_quiesce;
46 _machine_halt = do_machine_quiesce;
47 _machine_power_off = do_machine_quiesce;
48 }
42 ctrl_alt_del(); 49 ctrl_alt_del();
43} 50}
44 51
52/* Undo machine restart/halt/power_off modification on resume */
53static void sclp_quiesce_pm_event(struct sclp_register *reg,
54 enum sclp_pm_event sclp_pm_event)
55{
56 switch (sclp_pm_event) {
57 case SCLP_PM_EVENT_RESTORE:
58 if (old_machine_restart) {
59 _machine_restart = old_machine_restart;
60 _machine_halt = old_machine_halt;
61 _machine_power_off = old_machine_power_off;
62 old_machine_restart = NULL;
63 old_machine_halt = NULL;
64 old_machine_power_off = NULL;
65 }
66 break;
67 case SCLP_PM_EVENT_FREEZE:
68 case SCLP_PM_EVENT_THAW:
69 break;
70 }
71}
72
45static struct sclp_register sclp_quiesce_event = { 73static struct sclp_register sclp_quiesce_event = {
46 .receive_mask = EVTYP_SIGQUIESCE_MASK, 74 .receive_mask = EVTYP_SIGQUIESCE_MASK,
47 .receiver_fn = sclp_quiesce_handler 75 .receiver_fn = sclp_quiesce_handler,
76 .pm_event_fn = sclp_quiesce_pm_event
48}; 77};
49 78
50/* Initialize quiesce driver. */ 79/* Initialize quiesce driver. */
51static int __init 80static int __init sclp_quiesce_init(void)
52sclp_quiesce_init(void)
53{ 81{
54 return sclp_register(&sclp_quiesce_event); 82 return sclp_register(&sclp_quiesce_event);
55} 83}
diff --git a/drivers/scsi/bfa/bfad_fwimg.c b/drivers/scsi/bfa/bfad_fwimg.c
index b2f6949bc8d3..bd34b0db2d6b 100644
--- a/drivers/scsi/bfa/bfad_fwimg.c
+++ b/drivers/scsi/bfa/bfad_fwimg.c
@@ -41,6 +41,8 @@ u32 *bfi_image_cb;
41 41
42#define BFAD_FW_FILE_CT "ctfw.bin" 42#define BFAD_FW_FILE_CT "ctfw.bin"
43#define BFAD_FW_FILE_CB "cbfw.bin" 43#define BFAD_FW_FILE_CB "cbfw.bin"
44MODULE_FIRMWARE(BFAD_FW_FILE_CT);
45MODULE_FIRMWARE(BFAD_FW_FILE_CB);
44 46
45u32 * 47u32 *
46bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image, 48bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 158c99243c08..55d012a9a668 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -948,7 +948,7 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
948 if (bfad_supported_fc4s & (BFA_PORT_ROLE_FCP_IM | BFA_PORT_ROLE_FCP_TM)) 948 if (bfad_supported_fc4s & (BFA_PORT_ROLE_FCP_IM | BFA_PORT_ROLE_FCP_TM))
949 /* For FCP type 0x08 */ 949 /* For FCP type 0x08 */
950 fc_host_supported_fc4s(host)[2] = 1; 950 fc_host_supported_fc4s(host)[2] = 1;
951 if (bfad_supported_fc4s | BFA_PORT_ROLE_FCP_IPFC) 951 if (bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IPFC)
952 /* For LLC/SNAP type 0x05 */ 952 /* For LLC/SNAP type 0x05 */
953 fc_host_supported_fc4s(host)[3] = 0x20; 953 fc_host_supported_fc4s(host)[3] = 0x20;
954 /* For fibre channel services type 0x20 */ 954 /* For fibre channel services type 0x20 */
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 185e6bc4dd40..9e8fce0f0c1b 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -2900,7 +2900,7 @@ static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr)
2900 eindex = handle; 2900 eindex = handle;
2901 estr->event_source = 0; 2901 estr->event_source = 0;
2902 2902
2903 if (eindex >= MAX_EVENTS) { 2903 if (eindex < 0 || eindex >= MAX_EVENTS) {
2904 spin_unlock_irqrestore(&ha->smp_lock, flags); 2904 spin_unlock_irqrestore(&ha->smp_lock, flags);
2905 return eindex; 2905 return eindex;
2906 } 2906 }
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 5fd2da494d08..c968cc31cd86 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -164,8 +164,8 @@ void scsi_remove_host(struct Scsi_Host *shost)
164 return; 164 return;
165 } 165 }
166 spin_unlock_irqrestore(shost->host_lock, flags); 166 spin_unlock_irqrestore(shost->host_lock, flags);
167 mutex_unlock(&shost->scan_mutex);
168 scsi_forget_host(shost); 167 scsi_forget_host(shost);
168 mutex_unlock(&shost->scan_mutex);
169 scsi_proc_host_rm(shost); 169 scsi_proc_host_rm(shost);
170 170
171 spin_lock_irqsave(shost->host_lock, flags); 171 spin_lock_irqsave(shost->host_lock, flags);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 5f045505a1f4..76d294fc7846 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4189,6 +4189,25 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4189} 4189}
4190 4190
4191/** 4191/**
4192 * ipr_isr_eh - Interrupt service routine error handler
4193 * @ioa_cfg: ioa config struct
4194 * @msg: message to log
4195 *
4196 * Return value:
4197 * none
4198 **/
4199static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4200{
4201 ioa_cfg->errors_logged++;
4202 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4203
4204 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4205 ioa_cfg->sdt_state = GET_DUMP;
4206
4207 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4208}
4209
4210/**
4192 * ipr_isr - Interrupt service routine 4211 * ipr_isr - Interrupt service routine
4193 * @irq: irq number 4212 * @irq: irq number
4194 * @devp: pointer to ioa config struct 4213 * @devp: pointer to ioa config struct
@@ -4203,6 +4222,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4203 volatile u32 int_reg, int_mask_reg; 4222 volatile u32 int_reg, int_mask_reg;
4204 u32 ioasc; 4223 u32 ioasc;
4205 u16 cmd_index; 4224 u16 cmd_index;
4225 int num_hrrq = 0;
4206 struct ipr_cmnd *ipr_cmd; 4226 struct ipr_cmnd *ipr_cmd;
4207 irqreturn_t rc = IRQ_NONE; 4227 irqreturn_t rc = IRQ_NONE;
4208 4228
@@ -4233,13 +4253,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4233 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT; 4253 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4234 4254
4235 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) { 4255 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4236 ioa_cfg->errors_logged++; 4256 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
4237 dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
4238
4239 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4240 ioa_cfg->sdt_state = GET_DUMP;
4241
4242 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4243 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 4257 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4244 return IRQ_HANDLED; 4258 return IRQ_HANDLED;
4245 } 4259 }
@@ -4266,8 +4280,18 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4266 4280
4267 if (ipr_cmd != NULL) { 4281 if (ipr_cmd != NULL) {
4268 /* Clear the PCI interrupt */ 4282 /* Clear the PCI interrupt */
4269 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg); 4283 do {
4270 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; 4284 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
4285 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4286 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4287 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4288
4289 if (int_reg & IPR_PCII_HRRQ_UPDATED) {
4290 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
4291 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4292 return IRQ_HANDLED;
4293 }
4294
4271 } else 4295 } else
4272 break; 4296 break;
4273 } 4297 }
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 163245a1c3e5..19bbcf39f0c9 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -144,6 +144,7 @@
144#define IPR_IOA_MAX_SECTORS 32767 144#define IPR_IOA_MAX_SECTORS 32767
145#define IPR_VSET_MAX_SECTORS 512 145#define IPR_VSET_MAX_SECTORS 512
146#define IPR_MAX_CDB_LEN 16 146#define IPR_MAX_CDB_LEN 16
147#define IPR_MAX_HRRQ_RETRIES 3
147 148
148#define IPR_DEFAULT_BUS_WIDTH 16 149#define IPR_DEFAULT_BUS_WIDTH 16
149#define IPR_80MBs_SCSI_RATE ((80 * 10) / (IPR_DEFAULT_BUS_WIDTH / 8)) 150#define IPR_80MBs_SCSI_RATE ((80 * 10) / (IPR_DEFAULT_BUS_WIDTH / 8))
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index b3381959acce..33cf988c8c8a 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -960,7 +960,6 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
960 960
961 } 961 }
962 } 962 }
963 res = 0;
964 } 963 }
965 964
966 return res; 965 return res;
diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c
index 67cde0138061..528733b4a392 100644
--- a/drivers/scsi/pcmcia/aha152x_stub.c
+++ b/drivers/scsi/pcmcia/aha152x_stub.c
@@ -54,15 +54,6 @@
54#include <pcmcia/cistpl.h> 54#include <pcmcia/cistpl.h>
55#include <pcmcia/ds.h> 55#include <pcmcia/ds.h>
56 56
57#ifdef PCMCIA_DEBUG
58static int pc_debug = PCMCIA_DEBUG;
59module_param(pc_debug, int, 0644);
60#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
61static char *version =
62"aha152x_cs.c 1.54 2000/06/12 21:27:25 (David Hinds)";
63#else
64#define DEBUG(n, args...)
65#endif
66 57
67/*====================================================================*/ 58/*====================================================================*/
68 59
@@ -103,7 +94,7 @@ static int aha152x_probe(struct pcmcia_device *link)
103{ 94{
104 scsi_info_t *info; 95 scsi_info_t *info;
105 96
106 DEBUG(0, "aha152x_attach()\n"); 97 dev_dbg(&link->dev, "aha152x_attach()\n");
107 98
108 /* Create new SCSI device */ 99 /* Create new SCSI device */
109 info = kzalloc(sizeof(*info), GFP_KERNEL); 100 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -115,7 +106,6 @@ static int aha152x_probe(struct pcmcia_device *link)
115 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 106 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
116 link->io.IOAddrLines = 10; 107 link->io.IOAddrLines = 10;
117 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 108 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
118 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
119 link->conf.Attributes = CONF_ENABLE_IRQ; 109 link->conf.Attributes = CONF_ENABLE_IRQ;
120 link->conf.IntType = INT_MEMORY_AND_IO; 110 link->conf.IntType = INT_MEMORY_AND_IO;
121 link->conf.Present = PRESENT_OPTION; 111 link->conf.Present = PRESENT_OPTION;
@@ -127,7 +117,7 @@ static int aha152x_probe(struct pcmcia_device *link)
127 117
128static void aha152x_detach(struct pcmcia_device *link) 118static void aha152x_detach(struct pcmcia_device *link)
129{ 119{
130 DEBUG(0, "aha152x_detach(0x%p)\n", link); 120 dev_dbg(&link->dev, "aha152x_detach\n");
131 121
132 aha152x_release_cs(link); 122 aha152x_release_cs(link);
133 123
@@ -137,9 +127,6 @@ static void aha152x_detach(struct pcmcia_device *link)
137 127
138/*====================================================================*/ 128/*====================================================================*/
139 129
140#define CS_CHECK(fn, ret) \
141do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
142
143static int aha152x_config_check(struct pcmcia_device *p_dev, 130static int aha152x_config_check(struct pcmcia_device *p_dev,
144 cistpl_cftable_entry_t *cfg, 131 cistpl_cftable_entry_t *cfg,
145 cistpl_cftable_entry_t *dflt, 132 cistpl_cftable_entry_t *dflt,
@@ -164,19 +151,22 @@ static int aha152x_config_cs(struct pcmcia_device *link)
164{ 151{
165 scsi_info_t *info = link->priv; 152 scsi_info_t *info = link->priv;
166 struct aha152x_setup s; 153 struct aha152x_setup s;
167 int last_ret, last_fn; 154 int ret;
168 struct Scsi_Host *host; 155 struct Scsi_Host *host;
169 156
170 DEBUG(0, "aha152x_config(0x%p)\n", link); 157 dev_dbg(&link->dev, "aha152x_config\n");
171 158
172 last_ret = pcmcia_loop_config(link, aha152x_config_check, NULL); 159 ret = pcmcia_loop_config(link, aha152x_config_check, NULL);
173 if (last_ret) { 160 if (ret)
174 cs_error(link, RequestIO, last_ret); 161 goto failed;
175 goto failed;
176 }
177 162
178 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 163 ret = pcmcia_request_irq(link, &link->irq);
179 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 164 if (ret)
165 goto failed;
166
167 ret = pcmcia_request_configuration(link, &link->conf);
168 if (ret)
169 goto failed;
180 170
181 /* Set configuration options for the aha152x driver */ 171 /* Set configuration options for the aha152x driver */
182 memset(&s, 0, sizeof(s)); 172 memset(&s, 0, sizeof(s));
@@ -194,7 +184,7 @@ static int aha152x_config_cs(struct pcmcia_device *link)
194 host = aha152x_probe_one(&s); 184 host = aha152x_probe_one(&s);
195 if (host == NULL) { 185 if (host == NULL) {
196 printk(KERN_INFO "aha152x_cs: no SCSI devices found\n"); 186 printk(KERN_INFO "aha152x_cs: no SCSI devices found\n");
197 goto cs_failed; 187 goto failed;
198 } 188 }
199 189
200 sprintf(info->node.dev_name, "scsi%d", host->host_no); 190 sprintf(info->node.dev_name, "scsi%d", host->host_no);
@@ -203,8 +193,6 @@ static int aha152x_config_cs(struct pcmcia_device *link)
203 193
204 return 0; 194 return 0;
205 195
206cs_failed:
207 cs_error(link, last_fn, last_ret);
208failed: 196failed:
209 aha152x_release_cs(link); 197 aha152x_release_cs(link);
210 return -ENODEV; 198 return -ENODEV;
diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c
index 06254f46a0dd..914040684079 100644
--- a/drivers/scsi/pcmcia/fdomain_stub.c
+++ b/drivers/scsi/pcmcia/fdomain_stub.c
@@ -59,16 +59,6 @@ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
59MODULE_DESCRIPTION("Future Domain PCMCIA SCSI driver"); 59MODULE_DESCRIPTION("Future Domain PCMCIA SCSI driver");
60MODULE_LICENSE("Dual MPL/GPL"); 60MODULE_LICENSE("Dual MPL/GPL");
61 61
62#ifdef PCMCIA_DEBUG
63static int pc_debug = PCMCIA_DEBUG;
64module_param(pc_debug, int, 0);
65#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
66static char *version =
67"fdomain_cs.c 1.47 2001/10/13 00:08:52 (David Hinds)";
68#else
69#define DEBUG(n, args...)
70#endif
71
72/*====================================================================*/ 62/*====================================================================*/
73 63
74typedef struct scsi_info_t { 64typedef struct scsi_info_t {
@@ -86,7 +76,7 @@ static int fdomain_probe(struct pcmcia_device *link)
86{ 76{
87 scsi_info_t *info; 77 scsi_info_t *info;
88 78
89 DEBUG(0, "fdomain_attach()\n"); 79 dev_dbg(&link->dev, "fdomain_attach()\n");
90 80
91 /* Create new SCSI device */ 81 /* Create new SCSI device */
92 info = kzalloc(sizeof(*info), GFP_KERNEL); 82 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -99,7 +89,6 @@ static int fdomain_probe(struct pcmcia_device *link)
99 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 89 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
100 link->io.IOAddrLines = 10; 90 link->io.IOAddrLines = 10;
101 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 91 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
102 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
103 link->conf.Attributes = CONF_ENABLE_IRQ; 92 link->conf.Attributes = CONF_ENABLE_IRQ;
104 link->conf.IntType = INT_MEMORY_AND_IO; 93 link->conf.IntType = INT_MEMORY_AND_IO;
105 link->conf.Present = PRESENT_OPTION; 94 link->conf.Present = PRESENT_OPTION;
@@ -111,7 +100,7 @@ static int fdomain_probe(struct pcmcia_device *link)
111 100
112static void fdomain_detach(struct pcmcia_device *link) 101static void fdomain_detach(struct pcmcia_device *link)
113{ 102{
114 DEBUG(0, "fdomain_detach(0x%p)\n", link); 103 dev_dbg(&link->dev, "fdomain_detach\n");
115 104
116 fdomain_release(link); 105 fdomain_release(link);
117 106
@@ -120,9 +109,6 @@ static void fdomain_detach(struct pcmcia_device *link)
120 109
121/*====================================================================*/ 110/*====================================================================*/
122 111
123#define CS_CHECK(fn, ret) \
124do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
125
126static int fdomain_config_check(struct pcmcia_device *p_dev, 112static int fdomain_config_check(struct pcmcia_device *p_dev,
127 cistpl_cftable_entry_t *cfg, 113 cistpl_cftable_entry_t *cfg,
128 cistpl_cftable_entry_t *dflt, 114 cistpl_cftable_entry_t *dflt,
@@ -137,20 +123,22 @@ static int fdomain_config_check(struct pcmcia_device *p_dev,
137static int fdomain_config(struct pcmcia_device *link) 123static int fdomain_config(struct pcmcia_device *link)
138{ 124{
139 scsi_info_t *info = link->priv; 125 scsi_info_t *info = link->priv;
140 int last_ret, last_fn; 126 int ret;
141 char str[22]; 127 char str[22];
142 struct Scsi_Host *host; 128 struct Scsi_Host *host;
143 129
144 DEBUG(0, "fdomain_config(0x%p)\n", link); 130 dev_dbg(&link->dev, "fdomain_config\n");
145 131
146 last_ret = pcmcia_loop_config(link, fdomain_config_check, NULL); 132 ret = pcmcia_loop_config(link, fdomain_config_check, NULL);
147 if (last_ret) { 133 if (ret)
148 cs_error(link, RequestIO, last_ret);
149 goto failed; 134 goto failed;
150 }
151 135
152 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 136 ret = pcmcia_request_irq(link, &link->irq);
153 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 137 if (ret)
138 goto failed;
139 ret = pcmcia_request_configuration(link, &link->conf);
140 if (ret)
141 goto failed;
154 142
155 /* A bad hack... */ 143 /* A bad hack... */
156 release_region(link->io.BasePort1, link->io.NumPorts1); 144 release_region(link->io.BasePort1, link->io.NumPorts1);
@@ -162,11 +150,11 @@ static int fdomain_config(struct pcmcia_device *link)
162 host = __fdomain_16x0_detect(&fdomain_driver_template); 150 host = __fdomain_16x0_detect(&fdomain_driver_template);
163 if (!host) { 151 if (!host) {
164 printk(KERN_INFO "fdomain_cs: no SCSI devices found\n"); 152 printk(KERN_INFO "fdomain_cs: no SCSI devices found\n");
165 goto cs_failed; 153 goto failed;
166 } 154 }
167 155
168 if (scsi_add_host(host, NULL)) 156 if (scsi_add_host(host, NULL))
169 goto cs_failed; 157 goto failed;
170 scsi_scan_host(host); 158 scsi_scan_host(host);
171 159
172 sprintf(info->node.dev_name, "scsi%d", host->host_no); 160 sprintf(info->node.dev_name, "scsi%d", host->host_no);
@@ -175,8 +163,6 @@ static int fdomain_config(struct pcmcia_device *link)
175 163
176 return 0; 164 return 0;
177 165
178cs_failed:
179 cs_error(link, last_fn, last_ret);
180failed: 166failed:
181 fdomain_release(link); 167 fdomain_release(link);
182 return -ENODEV; 168 return -ENODEV;
@@ -188,7 +174,7 @@ static void fdomain_release(struct pcmcia_device *link)
188{ 174{
189 scsi_info_t *info = link->priv; 175 scsi_info_t *info = link->priv;
190 176
191 DEBUG(0, "fdomain_release(0x%p)\n", link); 177 dev_dbg(&link->dev, "fdomain_release\n");
192 178
193 scsi_remove_host(info->host); 179 scsi_remove_host(info->host);
194 pcmcia_disable_device(link); 180 pcmcia_disable_device(link);
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index e32c344d7ad8..c2341af587a3 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1564,12 +1564,10 @@ static int nsp_cs_probe(struct pcmcia_device *link)
1564 link->io.IOAddrLines = 10; /* not used */ 1564 link->io.IOAddrLines = 10; /* not used */
1565 1565
1566 /* Interrupt setup */ 1566 /* Interrupt setup */
1567 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 1567 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
1568 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
1569 1568
1570 /* Interrupt handler */ 1569 /* Interrupt handler */
1571 link->irq.Handler = &nspintr; 1570 link->irq.Handler = &nspintr;
1572 link->irq.Instance = info;
1573 link->irq.Attributes |= IRQF_SHARED; 1571 link->irq.Attributes |= IRQF_SHARED;
1574 1572
1575 /* General socket configuration */ 1573 /* General socket configuration */
@@ -1684,10 +1682,10 @@ static int nsp_cs_config_check(struct pcmcia_device *p_dev,
1684 if (cfg_mem->req.Size < 0x1000) 1682 if (cfg_mem->req.Size < 0x1000)
1685 cfg_mem->req.Size = 0x1000; 1683 cfg_mem->req.Size = 0x1000;
1686 cfg_mem->req.AccessSpeed = 0; 1684 cfg_mem->req.AccessSpeed = 0;
1687 if (pcmcia_request_window(&p_dev, &cfg_mem->req, &p_dev->win) != 0) 1685 if (pcmcia_request_window(p_dev, &cfg_mem->req, &p_dev->win) != 0)
1688 goto next_entry; 1686 goto next_entry;
1689 map.Page = 0; map.CardOffset = mem->win[0].card_addr; 1687 map.Page = 0; map.CardOffset = mem->win[0].card_addr;
1690 if (pcmcia_map_mem_page(p_dev->win, &map) != 0) 1688 if (pcmcia_map_mem_page(p_dev, p_dev->win, &map) != 0)
1691 goto next_entry; 1689 goto next_entry;
1692 1690
1693 cfg_mem->data->MmioAddress = (unsigned long) ioremap_nocache(cfg_mem->req.Base, cfg_mem->req.Size); 1691 cfg_mem->data->MmioAddress = (unsigned long) ioremap_nocache(cfg_mem->req.Base, cfg_mem->req.Size);
diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c
index 20c3e5e6d88a..f85f094870b4 100644
--- a/drivers/scsi/pcmcia/qlogic_stub.c
+++ b/drivers/scsi/pcmcia/qlogic_stub.c
@@ -62,15 +62,6 @@
62 62
63static char qlogic_name[] = "qlogic_cs"; 63static char qlogic_name[] = "qlogic_cs";
64 64
65#ifdef PCMCIA_DEBUG
66static int pc_debug = PCMCIA_DEBUG;
67module_param(pc_debug, int, 0644);
68#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
69static char *version = "qlogic_cs.c 1.79-ac 2002/10/26 (David Hinds)";
70#else
71#define DEBUG(n, args...)
72#endif
73
74static struct scsi_host_template qlogicfas_driver_template = { 65static struct scsi_host_template qlogicfas_driver_template = {
75 .module = THIS_MODULE, 66 .module = THIS_MODULE,
76 .name = qlogic_name, 67 .name = qlogic_name,
@@ -159,7 +150,7 @@ static int qlogic_probe(struct pcmcia_device *link)
159{ 150{
160 scsi_info_t *info; 151 scsi_info_t *info;
161 152
162 DEBUG(0, "qlogic_attach()\n"); 153 dev_dbg(&link->dev, "qlogic_attach()\n");
163 154
164 /* Create new SCSI device */ 155 /* Create new SCSI device */
165 info = kzalloc(sizeof(*info), GFP_KERNEL); 156 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -171,7 +162,6 @@ static int qlogic_probe(struct pcmcia_device *link)
171 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 162 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
172 link->io.IOAddrLines = 10; 163 link->io.IOAddrLines = 10;
173 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 164 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
174 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
175 link->conf.Attributes = CONF_ENABLE_IRQ; 165 link->conf.Attributes = CONF_ENABLE_IRQ;
176 link->conf.IntType = INT_MEMORY_AND_IO; 166 link->conf.IntType = INT_MEMORY_AND_IO;
177 link->conf.Present = PRESENT_OPTION; 167 link->conf.Present = PRESENT_OPTION;
@@ -183,7 +173,7 @@ static int qlogic_probe(struct pcmcia_device *link)
183 173
184static void qlogic_detach(struct pcmcia_device *link) 174static void qlogic_detach(struct pcmcia_device *link)
185{ 175{
186 DEBUG(0, "qlogic_detach(0x%p)\n", link); 176 dev_dbg(&link->dev, "qlogic_detach\n");
187 177
188 qlogic_release(link); 178 qlogic_release(link);
189 kfree(link->priv); 179 kfree(link->priv);
@@ -192,9 +182,6 @@ static void qlogic_detach(struct pcmcia_device *link)
192 182
193/*====================================================================*/ 183/*====================================================================*/
194 184
195#define CS_CHECK(fn, ret) \
196do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
197
198static int qlogic_config_check(struct pcmcia_device *p_dev, 185static int qlogic_config_check(struct pcmcia_device *p_dev,
199 cistpl_cftable_entry_t *cfg, 186 cistpl_cftable_entry_t *cfg,
200 cistpl_cftable_entry_t *dflt, 187 cistpl_cftable_entry_t *dflt,
@@ -213,19 +200,22 @@ static int qlogic_config_check(struct pcmcia_device *p_dev,
213static int qlogic_config(struct pcmcia_device * link) 200static int qlogic_config(struct pcmcia_device * link)
214{ 201{
215 scsi_info_t *info = link->priv; 202 scsi_info_t *info = link->priv;
216 int last_ret, last_fn; 203 int ret;
217 struct Scsi_Host *host; 204 struct Scsi_Host *host;
218 205
219 DEBUG(0, "qlogic_config(0x%p)\n", link); 206 dev_dbg(&link->dev, "qlogic_config\n");
220 207
221 last_ret = pcmcia_loop_config(link, qlogic_config_check, NULL); 208 ret = pcmcia_loop_config(link, qlogic_config_check, NULL);
222 if (last_ret) { 209 if (ret)
223 cs_error(link, RequestIO, last_ret); 210 goto failed;
211
212 ret = pcmcia_request_irq(link, &link->irq);
213 if (ret)
224 goto failed; 214 goto failed;
225 }
226 215
227 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 216 ret = pcmcia_request_configuration(link, &link->conf);
228 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 217 if (ret)
218 goto failed;
229 219
230 if ((info->manf_id == MANFID_MACNICA) || (info->manf_id == MANFID_PIONEER) || (info->manf_id == 0x0098)) { 220 if ((info->manf_id == MANFID_MACNICA) || (info->manf_id == MANFID_PIONEER) || (info->manf_id == 0x0098)) {
231 /* set ATAcmd */ 221 /* set ATAcmd */
@@ -244,7 +234,7 @@ static int qlogic_config(struct pcmcia_device * link)
244 234
245 if (!host) { 235 if (!host) {
246 printk(KERN_INFO "%s: no SCSI devices found\n", qlogic_name); 236 printk(KERN_INFO "%s: no SCSI devices found\n", qlogic_name);
247 goto cs_failed; 237 goto failed;
248 } 238 }
249 239
250 sprintf(info->node.dev_name, "scsi%d", host->host_no); 240 sprintf(info->node.dev_name, "scsi%d", host->host_no);
@@ -253,12 +243,9 @@ static int qlogic_config(struct pcmcia_device * link)
253 243
254 return 0; 244 return 0;
255 245
256cs_failed:
257 cs_error(link, last_fn, last_ret);
258 pcmcia_disable_device(link);
259failed: 246failed:
247 pcmcia_disable_device(link);
260 return -ENODEV; 248 return -ENODEV;
261
262} /* qlogic_config */ 249} /* qlogic_config */
263 250
264/*====================================================================*/ 251/*====================================================================*/
@@ -267,7 +254,7 @@ static void qlogic_release(struct pcmcia_device *link)
267{ 254{
268 scsi_info_t *info = link->priv; 255 scsi_info_t *info = link->priv;
269 256
270 DEBUG(0, "qlogic_release(0x%p)\n", link); 257 dev_dbg(&link->dev, "qlogic_release\n");
271 258
272 scsi_remove_host(info->host); 259 scsi_remove_host(info->host);
273 260
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index b330c11a1752..e7564d8f0cbf 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -77,17 +77,6 @@
77#include <pcmcia/ds.h> 77#include <pcmcia/ds.h>
78#include <pcmcia/ciscode.h> 78#include <pcmcia/ciscode.h>
79 79
80/* ================================================================== */
81
82#ifdef PCMCIA_DEBUG
83static int pc_debug = PCMCIA_DEBUG;
84module_param(pc_debug, int, 0);
85#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
86static char *version =
87"sym53c500_cs.c 0.9c 2004/10/27 (Bob Tracy)";
88#else
89#define DEBUG(n, args...)
90#endif
91 80
92/* ================================================================== */ 81/* ================================================================== */
93 82
@@ -525,7 +514,7 @@ SYM53C500_release(struct pcmcia_device *link)
525 struct scsi_info_t *info = link->priv; 514 struct scsi_info_t *info = link->priv;
526 struct Scsi_Host *shost = info->host; 515 struct Scsi_Host *shost = info->host;
527 516
528 DEBUG(0, "SYM53C500_release(0x%p)\n", link); 517 dev_dbg(&link->dev, "SYM53C500_release\n");
529 518
530 /* 519 /*
531 * Do this before releasing/freeing resources. 520 * Do this before releasing/freeing resources.
@@ -697,9 +686,6 @@ static struct scsi_host_template sym53c500_driver_template = {
697 .shost_attrs = SYM53C500_shost_attrs 686 .shost_attrs = SYM53C500_shost_attrs
698}; 687};
699 688
700#define CS_CHECK(fn, ret) \
701do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
702
703static int SYM53C500_config_check(struct pcmcia_device *p_dev, 689static int SYM53C500_config_check(struct pcmcia_device *p_dev,
704 cistpl_cftable_entry_t *cfg, 690 cistpl_cftable_entry_t *cfg,
705 cistpl_cftable_entry_t *dflt, 691 cistpl_cftable_entry_t *dflt,
@@ -719,24 +705,27 @@ static int
719SYM53C500_config(struct pcmcia_device *link) 705SYM53C500_config(struct pcmcia_device *link)
720{ 706{
721 struct scsi_info_t *info = link->priv; 707 struct scsi_info_t *info = link->priv;
722 int last_ret, last_fn; 708 int ret;
723 int irq_level, port_base; 709 int irq_level, port_base;
724 struct Scsi_Host *host; 710 struct Scsi_Host *host;
725 struct scsi_host_template *tpnt = &sym53c500_driver_template; 711 struct scsi_host_template *tpnt = &sym53c500_driver_template;
726 struct sym53c500_data *data; 712 struct sym53c500_data *data;
727 713
728 DEBUG(0, "SYM53C500_config(0x%p)\n", link); 714 dev_dbg(&link->dev, "SYM53C500_config\n");
729 715
730 info->manf_id = link->manf_id; 716 info->manf_id = link->manf_id;
731 717
732 last_ret = pcmcia_loop_config(link, SYM53C500_config_check, NULL); 718 ret = pcmcia_loop_config(link, SYM53C500_config_check, NULL);
733 if (last_ret) { 719 if (ret)
734 cs_error(link, RequestIO, last_ret); 720 goto failed;
721
722 ret = pcmcia_request_irq(link, &link->irq);
723 if (ret)
735 goto failed; 724 goto failed;
736 }
737 725
738 CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); 726 ret = pcmcia_request_configuration(link, &link->conf);
739 CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); 727 if (ret)
728 goto failed;
740 729
741 /* 730 /*
742 * That's the trouble with copying liberally from another driver. 731 * That's the trouble with copying liberally from another driver.
@@ -824,8 +813,6 @@ err_release:
824 printk(KERN_INFO "sym53c500_cs: no SCSI devices found\n"); 813 printk(KERN_INFO "sym53c500_cs: no SCSI devices found\n");
825 return -ENODEV; 814 return -ENODEV;
826 815
827cs_failed:
828 cs_error(link, last_fn, last_ret);
829failed: 816failed:
830 SYM53C500_release(link); 817 SYM53C500_release(link);
831 return -ENODEV; 818 return -ENODEV;
@@ -855,7 +842,7 @@ static int sym53c500_resume(struct pcmcia_device *link)
855static void 842static void
856SYM53C500_detach(struct pcmcia_device *link) 843SYM53C500_detach(struct pcmcia_device *link)
857{ 844{
858 DEBUG(0, "SYM53C500_detach(0x%p)\n", link); 845 dev_dbg(&link->dev, "SYM53C500_detach\n");
859 846
860 SYM53C500_release(link); 847 SYM53C500_release(link);
861 848
@@ -868,7 +855,7 @@ SYM53C500_probe(struct pcmcia_device *link)
868{ 855{
869 struct scsi_info_t *info; 856 struct scsi_info_t *info;
870 857
871 DEBUG(0, "SYM53C500_attach()\n"); 858 dev_dbg(&link->dev, "SYM53C500_attach()\n");
872 859
873 /* Create new SCSI device */ 860 /* Create new SCSI device */
874 info = kzalloc(sizeof(*info), GFP_KERNEL); 861 info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -880,7 +867,6 @@ SYM53C500_probe(struct pcmcia_device *link)
880 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 867 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
881 link->io.IOAddrLines = 10; 868 link->io.IOAddrLines = 10;
882 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 869 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
883 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
884 link->conf.Attributes = CONF_ENABLE_IRQ; 870 link->conf.Attributes = CONF_ENABLE_IRQ;
885 link->conf.IntType = INT_MEMORY_AND_IO; 871 link->conf.IntType = INT_MEMORY_AND_IO;
886 872
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index f7c70e2a8224..0a97bc9074bb 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1071,7 +1071,7 @@ static struct pmcraid_cmd *pmcraid_init_hcam
1071 1071
1072 ioarcb->data_transfer_length = cpu_to_le32(rcb_size); 1072 ioarcb->data_transfer_length = cpu_to_le32(rcb_size);
1073 1073
1074 ioadl[0].flags |= cpu_to_le32(IOADL_FLAGS_READ_LAST); 1074 ioadl[0].flags |= IOADL_FLAGS_READ_LAST;
1075 ioadl[0].data_len = cpu_to_le32(rcb_size); 1075 ioadl[0].data_len = cpu_to_le32(rcb_size);
1076 ioadl[0].address = cpu_to_le32(dma); 1076 ioadl[0].address = cpu_to_le32(dma);
1077 1077
@@ -2251,7 +2251,7 @@ static void pmcraid_request_sense(struct pmcraid_cmd *cmd)
2251 2251
2252 ioadl->address = cpu_to_le64(cmd->sense_buffer_dma); 2252 ioadl->address = cpu_to_le64(cmd->sense_buffer_dma);
2253 ioadl->data_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE); 2253 ioadl->data_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
2254 ioadl->flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC); 2254 ioadl->flags = IOADL_FLAGS_LAST_DESC;
2255 2255
2256 /* request sense might be called as part of error response processing 2256 /* request sense might be called as part of error response processing
2257 * which runs in tasklets context. It is possible that mid-layer might 2257 * which runs in tasklets context. It is possible that mid-layer might
@@ -3017,7 +3017,7 @@ static int pmcraid_build_ioadl(
3017 ioadl[i].flags = 0; 3017 ioadl[i].flags = 0;
3018 } 3018 }
3019 /* setup last descriptor */ 3019 /* setup last descriptor */
3020 ioadl[i - 1].flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC); 3020 ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
3021 3021
3022 return 0; 3022 return 0;
3023} 3023}
@@ -3387,7 +3387,7 @@ static int pmcraid_build_passthrough_ioadls(
3387 } 3387 }
3388 3388
3389 /* setup the last descriptor */ 3389 /* setup the last descriptor */
3390 ioadl[i - 1].flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC); 3390 ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
3391 3391
3392 return 0; 3392 return 0;
3393} 3393}
@@ -5314,7 +5314,7 @@ static void pmcraid_querycfg(struct pmcraid_cmd *cmd)
5314 cpu_to_le32(sizeof(struct pmcraid_config_table)); 5314 cpu_to_le32(sizeof(struct pmcraid_config_table));
5315 5315
5316 ioadl = &(ioarcb->add_data.u.ioadl[0]); 5316 ioadl = &(ioarcb->add_data.u.ioadl[0]);
5317 ioadl->flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC); 5317 ioadl->flags = IOADL_FLAGS_LAST_DESC;
5318 ioadl->address = cpu_to_le64(pinstance->cfg_table_bus_addr); 5318 ioadl->address = cpu_to_le64(pinstance->cfg_table_bus_addr);
5319 ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_config_table)); 5319 ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_config_table));
5320 5320
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 0547a7f44d42..47291bcff0d5 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -952,16 +952,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
952 return SCSI_SCAN_LUN_PRESENT; 952 return SCSI_SCAN_LUN_PRESENT;
953} 953}
954 954
955static inline void scsi_destroy_sdev(struct scsi_device *sdev)
956{
957 scsi_device_set_state(sdev, SDEV_DEL);
958 if (sdev->host->hostt->slave_destroy)
959 sdev->host->hostt->slave_destroy(sdev);
960 transport_destroy_device(&sdev->sdev_gendev);
961 put_device(&sdev->sdev_dev);
962 put_device(&sdev->sdev_gendev);
963}
964
965#ifdef CONFIG_SCSI_LOGGING 955#ifdef CONFIG_SCSI_LOGGING
966/** 956/**
967 * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace 957 * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace
@@ -1139,7 +1129,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
1139 } 1129 }
1140 } 1130 }
1141 } else 1131 } else
1142 scsi_destroy_sdev(sdev); 1132 __scsi_remove_device(sdev);
1143 out: 1133 out:
1144 return res; 1134 return res;
1145} 1135}
@@ -1500,7 +1490,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
1500 /* 1490 /*
1501 * the sdev we used didn't appear in the report luns scan 1491 * the sdev we used didn't appear in the report luns scan
1502 */ 1492 */
1503 scsi_destroy_sdev(sdev); 1493 __scsi_remove_device(sdev);
1504 return ret; 1494 return ret;
1505} 1495}
1506 1496
@@ -1710,7 +1700,7 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
1710 shost_for_each_device(sdev, shost) { 1700 shost_for_each_device(sdev, shost) {
1711 if (!scsi_host_scan_allowed(shost) || 1701 if (!scsi_host_scan_allowed(shost) ||
1712 scsi_sysfs_add_sdev(sdev) != 0) 1702 scsi_sysfs_add_sdev(sdev) != 0)
1713 scsi_destroy_sdev(sdev); 1703 __scsi_remove_device(sdev);
1714 } 1704 }
1715} 1705}
1716 1706
@@ -1943,7 +1933,7 @@ void scsi_free_host_dev(struct scsi_device *sdev)
1943{ 1933{
1944 BUG_ON(sdev->id != sdev->host->this_id); 1934 BUG_ON(sdev->id != sdev->host->this_id);
1945 1935
1946 scsi_destroy_sdev(sdev); 1936 __scsi_remove_device(sdev);
1947} 1937}
1948EXPORT_SYMBOL(scsi_free_host_dev); 1938EXPORT_SYMBOL(scsi_free_host_dev);
1949 1939
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 5c7eb63a19d1..392d8db33905 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -854,82 +854,73 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
854 transport_configure_device(&starget->dev); 854 transport_configure_device(&starget->dev);
855 error = device_add(&sdev->sdev_gendev); 855 error = device_add(&sdev->sdev_gendev);
856 if (error) { 856 if (error) {
857 put_device(sdev->sdev_gendev.parent);
858 printk(KERN_INFO "error 1\n"); 857 printk(KERN_INFO "error 1\n");
859 return error; 858 goto out_remove;
860 } 859 }
861 error = device_add(&sdev->sdev_dev); 860 error = device_add(&sdev->sdev_dev);
862 if (error) { 861 if (error) {
863 printk(KERN_INFO "error 2\n"); 862 printk(KERN_INFO "error 2\n");
864 goto clean_device; 863 device_del(&sdev->sdev_gendev);
864 goto out_remove;
865 } 865 }
866 transport_add_device(&sdev->sdev_gendev);
867 sdev->is_visible = 1;
866 868
867 /* create queue files, which may be writable, depending on the host */ 869 /* create queue files, which may be writable, depending on the host */
868 if (sdev->host->hostt->change_queue_depth) 870 if (sdev->host->hostt->change_queue_depth)
869 error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_depth_rw); 871 error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_depth_rw);
870 else 872 else
871 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth); 873 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth);
872 if (error) { 874 if (error)
873 __scsi_remove_device(sdev); 875 goto out_remove;
874 goto out; 876
875 }
876 if (sdev->host->hostt->change_queue_type) 877 if (sdev->host->hostt->change_queue_type)
877 error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw); 878 error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw);
878 else 879 else
879 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type); 880 error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type);
880 if (error) { 881 if (error)
881 __scsi_remove_device(sdev); 882 goto out_remove;
882 goto out;
883 }
884 883
885 error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL); 884 error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
886 885
887 if (error) 886 if (error)
887 /* we're treating error on bsg register as non-fatal,
888 * so pretend nothing went wrong */
888 sdev_printk(KERN_INFO, sdev, 889 sdev_printk(KERN_INFO, sdev,
889 "Failed to register bsg queue, errno=%d\n", error); 890 "Failed to register bsg queue, errno=%d\n", error);
890 891
891 /* we're treating error on bsg register as non-fatal, so pretend
892 * nothing went wrong */
893 error = 0;
894
895 /* add additional host specific attributes */ 892 /* add additional host specific attributes */
896 if (sdev->host->hostt->sdev_attrs) { 893 if (sdev->host->hostt->sdev_attrs) {
897 for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) { 894 for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) {
898 error = device_create_file(&sdev->sdev_gendev, 895 error = device_create_file(&sdev->sdev_gendev,
899 sdev->host->hostt->sdev_attrs[i]); 896 sdev->host->hostt->sdev_attrs[i]);
900 if (error) { 897 if (error)
901 __scsi_remove_device(sdev); 898 goto out_remove;
902 goto out;
903 }
904 } 899 }
905 } 900 }
906 901
907 transport_add_device(&sdev->sdev_gendev); 902 return 0;
908 out:
909 return error;
910
911 clean_device:
912 scsi_device_set_state(sdev, SDEV_CANCEL);
913
914 device_del(&sdev->sdev_gendev);
915 transport_destroy_device(&sdev->sdev_gendev);
916 put_device(&sdev->sdev_dev);
917 put_device(&sdev->sdev_gendev);
918 903
904 out_remove:
905 __scsi_remove_device(sdev);
919 return error; 906 return error;
907
920} 908}
921 909
922void __scsi_remove_device(struct scsi_device *sdev) 910void __scsi_remove_device(struct scsi_device *sdev)
923{ 911{
924 struct device *dev = &sdev->sdev_gendev; 912 struct device *dev = &sdev->sdev_gendev;
925 913
926 if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0) 914 if (sdev->is_visible) {
927 return; 915 if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
916 return;
928 917
929 bsg_unregister_queue(sdev->request_queue); 918 bsg_unregister_queue(sdev->request_queue);
930 device_unregister(&sdev->sdev_dev); 919 device_unregister(&sdev->sdev_dev);
931 transport_remove_device(dev); 920 transport_remove_device(dev);
932 device_del(dev); 921 device_del(dev);
922 } else
923 put_device(&sdev->sdev_dev);
933 scsi_device_set_state(sdev, SDEV_DEL); 924 scsi_device_set_state(sdev, SDEV_DEL);
934 if (sdev->host->hostt->slave_destroy) 925 if (sdev->host->hostt->slave_destroy)
935 sdev->host->hostt->slave_destroy(sdev); 926 sdev->host->hostt->slave_destroy(sdev);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index a67fed10598a..c6f70dae9b2e 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3656,6 +3656,7 @@ fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
3656fail_host_msg: 3656fail_host_msg:
3657 /* return the errno failure code as the only status */ 3657 /* return the errno failure code as the only status */
3658 BUG_ON(job->reply_len < sizeof(uint32_t)); 3658 BUG_ON(job->reply_len < sizeof(uint32_t));
3659 job->reply->reply_payload_rcv_len = 0;
3659 job->reply->result = ret; 3660 job->reply->result = ret;
3660 job->reply_len = sizeof(uint32_t); 3661 job->reply_len = sizeof(uint32_t);
3661 fc_bsg_jobdone(job); 3662 fc_bsg_jobdone(job);
@@ -3741,6 +3742,7 @@ check_bidi:
3741fail_rport_msg: 3742fail_rport_msg:
3742 /* return the errno failure code as the only status */ 3743 /* return the errno failure code as the only status */
3743 BUG_ON(job->reply_len < sizeof(uint32_t)); 3744 BUG_ON(job->reply_len < sizeof(uint32_t));
3745 job->reply->reply_payload_rcv_len = 0;
3744 job->reply->result = ret; 3746 job->reply->result = ret;
3745 job->reply_len = sizeof(uint32_t); 3747 job->reply_len = sizeof(uint32_t);
3746 fc_bsg_jobdone(job); 3748 fc_bsg_jobdone(job);
@@ -3797,6 +3799,7 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
3797 /* check if we have the msgcode value at least */ 3799 /* check if we have the msgcode value at least */
3798 if (job->request_len < sizeof(uint32_t)) { 3800 if (job->request_len < sizeof(uint32_t)) {
3799 BUG_ON(job->reply_len < sizeof(uint32_t)); 3801 BUG_ON(job->reply_len < sizeof(uint32_t));
3802 job->reply->reply_payload_rcv_len = 0;
3800 job->reply->result = -ENOMSG; 3803 job->reply->result = -ENOMSG;
3801 job->reply_len = sizeof(uint32_t); 3804 job->reply_len = sizeof(uint32_t);
3802 fc_bsg_jobdone(job); 3805 fc_bsg_jobdone(job);
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 88da97745710..84be62149c6c 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -418,7 +418,7 @@ error:
418 __func__, virt, phys, be32_to_cpu(sdt->ref_tag), 418 __func__, virt, phys, be32_to_cpu(sdt->ref_tag),
419 be16_to_cpu(sdt->app_tag)); 419 be16_to_cpu(sdt->app_tag));
420 420
421 return -EIO; 421 return -EILSEQ;
422} 422}
423 423
424/* 424/*
diff --git a/drivers/serial/bcm63xx_uart.c b/drivers/serial/bcm63xx_uart.c
index beddaa6e9069..37ad0c449937 100644
--- a/drivers/serial/bcm63xx_uart.c
+++ b/drivers/serial/bcm63xx_uart.c
@@ -242,7 +242,7 @@ static void bcm_uart_do_rx(struct uart_port *port)
242 * higher than fifo size anyway since we're much faster than 242 * higher than fifo size anyway since we're much faster than
243 * serial port */ 243 * serial port */
244 max_count = 32; 244 max_count = 32;
245 tty = port->info->port.tty; 245 tty = port->state->port.tty;
246 do { 246 do {
247 unsigned int iestat, c, cstat; 247 unsigned int iestat, c, cstat;
248 char flag; 248 char flag;
@@ -318,7 +318,7 @@ static void bcm_uart_do_tx(struct uart_port *port)
318 return; 318 return;
319 } 319 }
320 320
321 xmit = &port->info->xmit; 321 xmit = &port->state->xmit;
322 if (uart_circ_empty(xmit)) 322 if (uart_circ_empty(xmit))
323 goto txq_empty; 323 goto txq_empty;
324 324
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c
index 02406ba6da1c..cdf172eda2e3 100644
--- a/drivers/serial/of_serial.c
+++ b/drivers/serial/of_serial.c
@@ -161,6 +161,7 @@ static int of_platform_serial_remove(struct of_device *ofdev)
161static struct of_device_id __devinitdata of_platform_serial_table[] = { 161static struct of_device_id __devinitdata of_platform_serial_table[] = {
162 { .type = "serial", .compatible = "ns8250", .data = (void *)PORT_8250, }, 162 { .type = "serial", .compatible = "ns8250", .data = (void *)PORT_8250, },
163 { .type = "serial", .compatible = "ns16450", .data = (void *)PORT_16450, }, 163 { .type = "serial", .compatible = "ns16450", .data = (void *)PORT_16450, },
164 { .type = "serial", .compatible = "ns16550a", .data = (void *)PORT_16550A, },
164 { .type = "serial", .compatible = "ns16550", .data = (void *)PORT_16550, }, 165 { .type = "serial", .compatible = "ns16550", .data = (void *)PORT_16550, },
165 { .type = "serial", .compatible = "ns16750", .data = (void *)PORT_16750, }, 166 { .type = "serial", .compatible = "ns16750", .data = (void *)PORT_16750, },
166 { .type = "serial", .compatible = "ns16850", .data = (void *)PORT_16850, }, 167 { .type = "serial", .compatible = "ns16850", .data = (void *)PORT_16850, },
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 7c7914f5fa02..fc413f0f8dd2 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -54,14 +54,6 @@
54 54
55#include "8250.h" 55#include "8250.h"
56 56
57#ifdef PCMCIA_DEBUG
58static int pc_debug = PCMCIA_DEBUG;
59module_param(pc_debug, int, 0644);
60#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
61static char *version = "serial_cs.c 1.134 2002/05/04 05:48:53 (David Hinds)";
62#else
63#define DEBUG(n, args...)
64#endif
65 57
66/*====================================================================*/ 58/*====================================================================*/
67 59
@@ -121,24 +113,20 @@ static void quirk_setup_brainboxes_0104(struct pcmcia_device *link, struct uart_
121static int quirk_post_ibm(struct pcmcia_device *link) 113static int quirk_post_ibm(struct pcmcia_device *link)
122{ 114{
123 conf_reg_t reg = { 0, CS_READ, 0x800, 0 }; 115 conf_reg_t reg = { 0, CS_READ, 0x800, 0 };
124 int last_ret, last_fn; 116 int ret;
117
118 ret = pcmcia_access_configuration_register(link, &reg);
119 if (ret)
120 goto failed;
125 121
126 last_ret = pcmcia_access_configuration_register(link, &reg);
127 if (last_ret) {
128 last_fn = AccessConfigurationRegister;
129 goto cs_failed;
130 }
131 reg.Action = CS_WRITE; 122 reg.Action = CS_WRITE;
132 reg.Value = reg.Value | 1; 123 reg.Value = reg.Value | 1;
133 last_ret = pcmcia_access_configuration_register(link, &reg); 124 ret = pcmcia_access_configuration_register(link, &reg);
134 if (last_ret) { 125 if (ret)
135 last_fn = AccessConfigurationRegister; 126 goto failed;
136 goto cs_failed;
137 }
138 return 0; 127 return 0;
139 128
140 cs_failed: 129 failed:
141 cs_error(link, last_fn, last_ret);
142 return -ENODEV; 130 return -ENODEV;
143} 131}
144 132
@@ -283,7 +271,7 @@ static void serial_remove(struct pcmcia_device *link)
283 struct serial_info *info = link->priv; 271 struct serial_info *info = link->priv;
284 int i; 272 int i;
285 273
286 DEBUG(0, "serial_release(0x%p)\n", link); 274 dev_dbg(&link->dev, "serial_release\n");
287 275
288 /* 276 /*
289 * Recheck to see if the device is still configured. 277 * Recheck to see if the device is still configured.
@@ -334,7 +322,7 @@ static int serial_probe(struct pcmcia_device *link)
334{ 322{
335 struct serial_info *info; 323 struct serial_info *info;
336 324
337 DEBUG(0, "serial_attach()\n"); 325 dev_dbg(&link->dev, "serial_attach()\n");
338 326
339 /* Create new serial device */ 327 /* Create new serial device */
340 info = kzalloc(sizeof (*info), GFP_KERNEL); 328 info = kzalloc(sizeof (*info), GFP_KERNEL);
@@ -346,7 +334,6 @@ static int serial_probe(struct pcmcia_device *link)
346 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 334 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
347 link->io.NumPorts1 = 8; 335 link->io.NumPorts1 = 8;
348 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 336 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
349 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
350 link->conf.Attributes = CONF_ENABLE_IRQ; 337 link->conf.Attributes = CONF_ENABLE_IRQ;
351 if (do_sound) { 338 if (do_sound) {
352 link->conf.Attributes |= CONF_ENABLE_SPKR; 339 link->conf.Attributes |= CONF_ENABLE_SPKR;
@@ -370,7 +357,7 @@ static void serial_detach(struct pcmcia_device *link)
370{ 357{
371 struct serial_info *info = link->priv; 358 struct serial_info *info = link->priv;
372 359
373 DEBUG(0, "serial_detach(0x%p)\n", link); 360 dev_dbg(&link->dev, "serial_detach\n");
374 361
375 /* 362 /*
376 * Ensure any outstanding scheduled tasks are completed. 363 * Ensure any outstanding scheduled tasks are completed.
@@ -399,7 +386,7 @@ static int setup_serial(struct pcmcia_device *handle, struct serial_info * info,
399 port.irq = irq; 386 port.irq = irq;
400 port.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ; 387 port.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ;
401 port.uartclk = 1843200; 388 port.uartclk = 1843200;
402 port.dev = &handle_to_dev(handle); 389 port.dev = &handle->dev;
403 if (buggy_uart) 390 if (buggy_uart)
404 port.flags |= UPF_BUGGY_UART; 391 port.flags |= UPF_BUGGY_UART;
405 392
@@ -426,21 +413,6 @@ static int setup_serial(struct pcmcia_device *handle, struct serial_info * info,
426 413
427/*====================================================================*/ 414/*====================================================================*/
428 415
429static int
430first_tuple(struct pcmcia_device *handle, tuple_t * tuple, cisparse_t * parse)
431{
432 int i;
433 i = pcmcia_get_first_tuple(handle, tuple);
434 if (i != 0)
435 return i;
436 i = pcmcia_get_tuple_data(handle, tuple);
437 if (i != 0)
438 return i;
439 return pcmcia_parse_tuple(tuple, parse);
440}
441
442/*====================================================================*/
443
444static int simple_config_check(struct pcmcia_device *p_dev, 416static int simple_config_check(struct pcmcia_device *p_dev,
445 cistpl_cftable_entry_t *cf, 417 cistpl_cftable_entry_t *cf,
446 cistpl_cftable_entry_t *dflt, 418 cistpl_cftable_entry_t *dflt,
@@ -522,15 +494,13 @@ static int simple_config(struct pcmcia_device *link)
522 494
523 printk(KERN_NOTICE 495 printk(KERN_NOTICE
524 "serial_cs: no usable port range found, giving up\n"); 496 "serial_cs: no usable port range found, giving up\n");
525 cs_error(link, RequestIO, i);
526 return -1; 497 return -1;
527 498
528found_port: 499found_port:
529 i = pcmcia_request_irq(link, &link->irq); 500 i = pcmcia_request_irq(link, &link->irq);
530 if (i != 0) { 501 if (i != 0)
531 cs_error(link, RequestIRQ, i);
532 link->irq.AssignedIRQ = 0; 502 link->irq.AssignedIRQ = 0;
533 } 503
534 if (info->multi && (info->manfid == MANFID_3COM)) 504 if (info->multi && (info->manfid == MANFID_3COM))
535 link->conf.ConfigIndex &= ~(0x08); 505 link->conf.ConfigIndex &= ~(0x08);
536 506
@@ -541,10 +511,8 @@ found_port:
541 info->quirk->config(link); 511 info->quirk->config(link);
542 512
543 i = pcmcia_request_configuration(link, &link->conf); 513 i = pcmcia_request_configuration(link, &link->conf);
544 if (i != 0) { 514 if (i != 0)
545 cs_error(link, RequestConfiguration, i);
546 return -1; 515 return -1;
547 }
548 return setup_serial(link, info, link->io.BasePort1, link->irq.AssignedIRQ); 516 return setup_serial(link, info, link->io.BasePort1, link->irq.AssignedIRQ);
549} 517}
550 518
@@ -613,7 +581,6 @@ static int multi_config(struct pcmcia_device *link)
613 /* FIXME: comment does not fit, error handling does not fit */ 581 /* FIXME: comment does not fit, error handling does not fit */
614 printk(KERN_NOTICE 582 printk(KERN_NOTICE
615 "serial_cs: no usable port range found, giving up\n"); 583 "serial_cs: no usable port range found, giving up\n");
616 cs_error(link, RequestIRQ, i);
617 link->irq.AssignedIRQ = 0; 584 link->irq.AssignedIRQ = 0;
618 } 585 }
619 586
@@ -624,10 +591,8 @@ static int multi_config(struct pcmcia_device *link)
624 info->quirk->config(link); 591 info->quirk->config(link);
625 592
626 i = pcmcia_request_configuration(link, &link->conf); 593 i = pcmcia_request_configuration(link, &link->conf);
627 if (i != 0) { 594 if (i != 0)
628 cs_error(link, RequestConfiguration, i);
629 return -ENODEV; 595 return -ENODEV;
630 }
631 596
632 /* The Oxford Semiconductor OXCF950 cards are in fact single-port: 597 /* The Oxford Semiconductor OXCF950 cards are in fact single-port:
633 * 8 registers are for the UART, the others are extra registers. 598 * 8 registers are for the UART, the others are extra registers.
@@ -665,6 +630,25 @@ static int multi_config(struct pcmcia_device *link)
665 return 0; 630 return 0;
666} 631}
667 632
633static int serial_check_for_multi(struct pcmcia_device *p_dev,
634 cistpl_cftable_entry_t *cf,
635 cistpl_cftable_entry_t *dflt,
636 unsigned int vcc,
637 void *priv_data)
638{
639 struct serial_info *info = p_dev->priv;
640
641 if ((cf->io.nwin == 1) && (cf->io.win[0].len % 8 == 0))
642 info->multi = cf->io.win[0].len >> 3;
643
644 if ((cf->io.nwin == 2) && (cf->io.win[0].len == 8) &&
645 (cf->io.win[1].len == 8))
646 info->multi = 2;
647
648 return 0; /* break */
649}
650
651
668/*====================================================================== 652/*======================================================================
669 653
670 serial_config() is scheduled to run after a CARD_INSERTION event 654 serial_config() is scheduled to run after a CARD_INSERTION event
@@ -676,46 +660,14 @@ static int multi_config(struct pcmcia_device *link)
676static int serial_config(struct pcmcia_device * link) 660static int serial_config(struct pcmcia_device * link)
677{ 661{
678 struct serial_info *info = link->priv; 662 struct serial_info *info = link->priv;
679 struct serial_cfg_mem *cfg_mem; 663 int i;
680 tuple_t *tuple;
681 u_char *buf;
682 cisparse_t *parse;
683 cistpl_cftable_entry_t *cf;
684 int i, last_ret, last_fn;
685
686 DEBUG(0, "serial_config(0x%p)\n", link);
687
688 cfg_mem = kmalloc(sizeof(struct serial_cfg_mem), GFP_KERNEL);
689 if (!cfg_mem)
690 goto failed;
691 664
692 tuple = &cfg_mem->tuple; 665 dev_dbg(&link->dev, "serial_config\n");
693 parse = &cfg_mem->parse;
694 cf = &parse->cftable_entry;
695 buf = cfg_mem->buf;
696
697 tuple->TupleData = (cisdata_t *) buf;
698 tuple->TupleOffset = 0;
699 tuple->TupleDataMax = 255;
700 tuple->Attributes = 0;
701
702 /* Get configuration register information */
703 tuple->DesiredTuple = CISTPL_CONFIG;
704 last_ret = first_tuple(link, tuple, parse);
705 if (last_ret != 0) {
706 last_fn = ParseTuple;
707 goto cs_failed;
708 }
709 link->conf.ConfigBase = parse->config.base;
710 link->conf.Present = parse->config.rmask[0];
711 666
712 /* Is this a compliant multifunction card? */ 667 /* Is this a compliant multifunction card? */
713 tuple->DesiredTuple = CISTPL_LONGLINK_MFC; 668 info->multi = (link->socket->functions > 1);
714 tuple->Attributes = TUPLE_RETURN_COMMON | TUPLE_RETURN_LINK;
715 info->multi = (first_tuple(link, tuple, parse) == 0);
716 669
717 /* Is this a multiport card? */ 670 /* Is this a multiport card? */
718 tuple->DesiredTuple = CISTPL_MANFID;
719 info->manfid = link->manf_id; 671 info->manfid = link->manf_id;
720 info->prodid = link->card_id; 672 info->prodid = link->card_id;
721 673
@@ -730,20 +682,11 @@ static int serial_config(struct pcmcia_device * link)
730 682
731 /* Another check for dual-serial cards: look for either serial or 683 /* Another check for dual-serial cards: look for either serial or
732 multifunction cards that ask for appropriate IO port ranges */ 684 multifunction cards that ask for appropriate IO port ranges */
733 tuple->DesiredTuple = CISTPL_FUNCID;
734 if ((info->multi == 0) && 685 if ((info->multi == 0) &&
735 (link->has_func_id) && 686 (link->has_func_id) &&
736 ((link->func_id == CISTPL_FUNCID_MULTI) || 687 ((link->func_id == CISTPL_FUNCID_MULTI) ||
737 (link->func_id == CISTPL_FUNCID_SERIAL))) { 688 (link->func_id == CISTPL_FUNCID_SERIAL)))
738 tuple->DesiredTuple = CISTPL_CFTABLE_ENTRY; 689 pcmcia_loop_config(link, serial_check_for_multi, info);
739 if (first_tuple(link, tuple, parse) == 0) {
740 if ((cf->io.nwin == 1) && (cf->io.win[0].len % 8 == 0))
741 info->multi = cf->io.win[0].len >> 3;
742 if ((cf->io.nwin == 2) && (cf->io.win[0].len == 8) &&
743 (cf->io.win[1].len == 8))
744 info->multi = 2;
745 }
746 }
747 690
748 /* 691 /*
749 * Apply any multi-port quirk. 692 * Apply any multi-port quirk.
@@ -768,14 +711,10 @@ static int serial_config(struct pcmcia_device * link)
768 goto failed; 711 goto failed;
769 712
770 link->dev_node = &info->node[0]; 713 link->dev_node = &info->node[0];
771 kfree(cfg_mem);
772 return 0; 714 return 0;
773 715
774cs_failed:
775 cs_error(link, last_fn, last_ret);
776failed: 716failed:
777 serial_remove(link); 717 serial_remove(link);
778 kfree(cfg_mem);
779 return -ENODEV; 718 return -ENODEV;
780} 719}
781 720
diff --git a/drivers/serial/suncore.c b/drivers/serial/suncore.c
index a2d4a19550ab..ed7d958b0a01 100644
--- a/drivers/serial/suncore.c
+++ b/drivers/serial/suncore.c
@@ -53,20 +53,21 @@ void sunserial_unregister_minors(struct uart_driver *drv, int count)
53EXPORT_SYMBOL(sunserial_unregister_minors); 53EXPORT_SYMBOL(sunserial_unregister_minors);
54 54
55int sunserial_console_match(struct console *con, struct device_node *dp, 55int sunserial_console_match(struct console *con, struct device_node *dp,
56 struct uart_driver *drv, int line) 56 struct uart_driver *drv, int line, bool ignore_line)
57{ 57{
58 int off;
59
60 if (!con || of_console_device != dp) 58 if (!con || of_console_device != dp)
61 return 0; 59 return 0;
62 60
63 off = 0; 61 if (!ignore_line) {
64 if (of_console_options && 62 int off = 0;
65 *of_console_options == 'b')
66 off = 1;
67 63
68 if ((line & 1) != off) 64 if (of_console_options &&
69 return 0; 65 *of_console_options == 'b')
66 off = 1;
67
68 if ((line & 1) != off)
69 return 0;
70 }
70 71
71 con->index = line; 72 con->index = line;
72 drv->cons = con; 73 drv->cons = con;
@@ -76,23 +77,24 @@ int sunserial_console_match(struct console *con, struct device_node *dp,
76} 77}
77EXPORT_SYMBOL(sunserial_console_match); 78EXPORT_SYMBOL(sunserial_console_match);
78 79
79void 80void sunserial_console_termios(struct console *con, struct device_node *uart_dp)
80sunserial_console_termios(struct console *con)
81{ 81{
82 struct device_node *dp; 82 const char *mode, *s;
83 const char *od, *mode, *s;
84 char mode_prop[] = "ttyX-mode"; 83 char mode_prop[] = "ttyX-mode";
85 int baud, bits, stop, cflag; 84 int baud, bits, stop, cflag;
86 char parity; 85 char parity;
87 86
88 dp = of_find_node_by_path("/options"); 87 if (!strcmp(uart_dp->name, "rsc") ||
89 od = of_get_property(dp, "output-device", NULL); 88 !strcmp(uart_dp->name, "rsc-console") ||
90 if (!strcmp(od, "rsc")) { 89 !strcmp(uart_dp->name, "rsc-control")) {
91 mode = of_get_property(of_console_device, 90 mode = of_get_property(uart_dp,
92 "ssp-console-modes", NULL); 91 "ssp-console-modes", NULL);
93 if (!mode) 92 if (!mode)
94 mode = "115200,8,n,1,-"; 93 mode = "115200,8,n,1,-";
94 } else if (!strcmp(uart_dp->name, "lom-console")) {
95 mode = "9600,8,n,1,-";
95 } else { 96 } else {
97 struct device_node *dp;
96 char c; 98 char c;
97 99
98 c = 'a'; 100 c = 'a';
@@ -101,6 +103,7 @@ sunserial_console_termios(struct console *con)
101 103
102 mode_prop[3] = c; 104 mode_prop[3] = c;
103 105
106 dp = of_find_node_by_path("/options");
104 mode = of_get_property(dp, mode_prop, NULL); 107 mode = of_get_property(dp, mode_prop, NULL);
105 if (!mode) 108 if (!mode)
106 mode = "9600,8,n,1,-"; 109 mode = "9600,8,n,1,-";
diff --git a/drivers/serial/suncore.h b/drivers/serial/suncore.h
index 042668aa602e..db2057936c31 100644
--- a/drivers/serial/suncore.h
+++ b/drivers/serial/suncore.h
@@ -26,7 +26,8 @@ extern int sunserial_register_minors(struct uart_driver *, int);
26extern void sunserial_unregister_minors(struct uart_driver *, int); 26extern void sunserial_unregister_minors(struct uart_driver *, int);
27 27
28extern int sunserial_console_match(struct console *, struct device_node *, 28extern int sunserial_console_match(struct console *, struct device_node *,
29 struct uart_driver *, int); 29 struct uart_driver *, int, bool);
30extern void sunserial_console_termios(struct console *); 30extern void sunserial_console_termios(struct console *,
31 struct device_node *);
31 32
32#endif /* !(_SERIAL_SUN_H) */ 33#endif /* !(_SERIAL_SUN_H) */
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
index d548652dee50..d14cca7fb88d 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/serial/sunhv.c
@@ -566,7 +566,7 @@ static int __devinit hv_probe(struct of_device *op, const struct of_device_id *m
566 goto out_free_con_read_page; 566 goto out_free_con_read_page;
567 567
568 sunserial_console_match(&sunhv_console, op->node, 568 sunserial_console_match(&sunhv_console, op->node,
569 &sunhv_reg, port->line); 569 &sunhv_reg, port->line, false);
570 570
571 err = uart_add_one_port(&sunhv_reg, port); 571 err = uart_add_one_port(&sunhv_reg, port);
572 if (err) 572 if (err)
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index d1ad34128635..d514e28d0755 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -883,7 +883,7 @@ static int sunsab_console_setup(struct console *con, char *options)
883 printk("Console: ttyS%d (SAB82532)\n", 883 printk("Console: ttyS%d (SAB82532)\n",
884 (sunsab_reg.minor - 64) + con->index); 884 (sunsab_reg.minor - 64) + con->index);
885 885
886 sunserial_console_termios(con); 886 sunserial_console_termios(con, to_of_device(up->port.dev)->node);
887 887
888 switch (con->cflag & CBAUD) { 888 switch (con->cflag & CBAUD) {
889 case B150: baud = 150; break; 889 case B150: baud = 150; break;
@@ -1027,10 +1027,12 @@ static int __devinit sab_probe(struct of_device *op, const struct of_device_id *
1027 goto out1; 1027 goto out1;
1028 1028
1029 sunserial_console_match(SUNSAB_CONSOLE(), op->node, 1029 sunserial_console_match(SUNSAB_CONSOLE(), op->node,
1030 &sunsab_reg, up[0].port.line); 1030 &sunsab_reg, up[0].port.line,
1031 false);
1031 1032
1032 sunserial_console_match(SUNSAB_CONSOLE(), op->node, 1033 sunserial_console_match(SUNSAB_CONSOLE(), op->node,
1033 &sunsab_reg, up[1].port.line); 1034 &sunsab_reg, up[1].port.line,
1035 false);
1034 1036
1035 err = uart_add_one_port(&sunsab_reg, &up[0].port); 1037 err = uart_add_one_port(&sunsab_reg, &up[0].port);
1036 if (err) 1038 if (err)
@@ -1116,7 +1118,6 @@ static int __init sunsab_init(void)
1116 if (!sunsab_ports) 1118 if (!sunsab_ports)
1117 return -ENOMEM; 1119 return -ENOMEM;
1118 1120
1119 sunsab_reg.cons = SUNSAB_CONSOLE();
1120 err = sunserial_register_minors(&sunsab_reg, num_channels); 1121 err = sunserial_register_minors(&sunsab_reg, num_channels);
1121 if (err) { 1122 if (err) {
1122 kfree(sunsab_ports); 1123 kfree(sunsab_ports);
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index 68d262b15749..170d3d68c8f0 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -1329,11 +1329,9 @@ static void sunsu_console_write(struct console *co, const char *s,
1329 */ 1329 */
1330static int __init sunsu_console_setup(struct console *co, char *options) 1330static int __init sunsu_console_setup(struct console *co, char *options)
1331{ 1331{
1332 static struct ktermios dummy;
1333 struct ktermios termios;
1332 struct uart_port *port; 1334 struct uart_port *port;
1333 int baud = 9600;
1334 int bits = 8;
1335 int parity = 'n';
1336 int flow = 'n';
1337 1335
1338 printk("Console: ttyS%d (SU)\n", 1336 printk("Console: ttyS%d (SU)\n",
1339 (sunsu_reg.minor - 64) + co->index); 1337 (sunsu_reg.minor - 64) + co->index);
@@ -1352,10 +1350,15 @@ static int __init sunsu_console_setup(struct console *co, char *options)
1352 */ 1350 */
1353 spin_lock_init(&port->lock); 1351 spin_lock_init(&port->lock);
1354 1352
1355 if (options) 1353 /* Get firmware console settings. */
1356 uart_parse_options(options, &baud, &parity, &bits, &flow); 1354 sunserial_console_termios(co, to_of_device(port->dev)->node);
1357 1355
1358 return uart_set_options(port, co, baud, parity, bits, flow); 1356 memset(&termios, 0, sizeof(struct ktermios));
1357 termios.c_cflag = co->cflag;
1358 port->mctrl |= TIOCM_DTR;
1359 port->ops->set_termios(port, &termios, &dummy);
1360
1361 return 0;
1359} 1362}
1360 1363
1361static struct console sunsu_console = { 1364static struct console sunsu_console = {
@@ -1409,6 +1412,7 @@ static int __devinit su_probe(struct of_device *op, const struct of_device_id *m
1409 struct uart_sunsu_port *up; 1412 struct uart_sunsu_port *up;
1410 struct resource *rp; 1413 struct resource *rp;
1411 enum su_type type; 1414 enum su_type type;
1415 bool ignore_line;
1412 int err; 1416 int err;
1413 1417
1414 type = su_get_type(dp); 1418 type = su_get_type(dp);
@@ -1467,8 +1471,14 @@ static int __devinit su_probe(struct of_device *op, const struct of_device_id *m
1467 1471
1468 up->port.ops = &sunsu_pops; 1472 up->port.ops = &sunsu_pops;
1469 1473
1474 ignore_line = false;
1475 if (!strcmp(dp->name, "rsc-console") ||
1476 !strcmp(dp->name, "lom-console"))
1477 ignore_line = true;
1478
1470 sunserial_console_match(SUNSU_CONSOLE(), dp, 1479 sunserial_console_match(SUNSU_CONSOLE(), dp,
1471 &sunsu_reg, up->port.line); 1480 &sunsu_reg, up->port.line,
1481 ignore_line);
1472 err = uart_add_one_port(&sunsu_reg, &up->port); 1482 err = uart_add_one_port(&sunsu_reg, &up->port);
1473 if (err) 1483 if (err)
1474 goto out_unmap; 1484 goto out_unmap;
@@ -1517,6 +1527,10 @@ static const struct of_device_id su_match[] = {
1517 .name = "serial", 1527 .name = "serial",
1518 .compatible = "su", 1528 .compatible = "su",
1519 }, 1529 },
1530 {
1531 .type = "serial",
1532 .compatible = "su",
1533 },
1520 {}, 1534 {},
1521}; 1535};
1522MODULE_DEVICE_TABLE(of, su_match); 1536MODULE_DEVICE_TABLE(of, su_match);
@@ -1548,6 +1562,12 @@ static int __init sunsu_init(void)
1548 num_uart++; 1562 num_uart++;
1549 } 1563 }
1550 } 1564 }
1565 for_each_node_by_type(dp, "serial") {
1566 if (of_device_is_compatible(dp, "su")) {
1567 if (su_get_type(dp) == SU_PORT_PORT)
1568 num_uart++;
1569 }
1570 }
1551 1571
1552 if (num_uart) { 1572 if (num_uart) {
1553 err = sunserial_register_minors(&sunsu_reg, num_uart); 1573 err = sunserial_register_minors(&sunsu_reg, num_uart);
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index ef693ae22e7f..2c7a66af4f52 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -1180,7 +1180,7 @@ static int __init sunzilog_console_setup(struct console *con, char *options)
1180 (sunzilog_reg.minor - 64) + con->index, con->index); 1180 (sunzilog_reg.minor - 64) + con->index, con->index);
1181 1181
1182 /* Get firmware console settings. */ 1182 /* Get firmware console settings. */
1183 sunserial_console_termios(con); 1183 sunserial_console_termios(con, to_of_device(up->port.dev)->node);
1184 1184
1185 /* Firmware console speed is limited to 150-->38400 baud so 1185 /* Firmware console speed is limited to 150-->38400 baud so
1186 * this hackish cflag thing is OK. 1186 * this hackish cflag thing is OK.
@@ -1416,7 +1416,8 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m
1416 1416
1417 if (!keyboard_mouse) { 1417 if (!keyboard_mouse) {
1418 if (sunserial_console_match(SUNZILOG_CONSOLE(), op->node, 1418 if (sunserial_console_match(SUNZILOG_CONSOLE(), op->node,
1419 &sunzilog_reg, up[0].port.line)) 1419 &sunzilog_reg, up[0].port.line,
1420 false))
1420 up->flags |= SUNZILOG_FLAG_IS_CONS; 1421 up->flags |= SUNZILOG_FLAG_IS_CONS;
1421 err = uart_add_one_port(&sunzilog_reg, &up[0].port); 1422 err = uart_add_one_port(&sunzilog_reg, &up[0].port);
1422 if (err) { 1423 if (err) {
@@ -1425,7 +1426,8 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m
1425 return err; 1426 return err;
1426 } 1427 }
1427 if (sunserial_console_match(SUNZILOG_CONSOLE(), op->node, 1428 if (sunserial_console_match(SUNZILOG_CONSOLE(), op->node,
1428 &sunzilog_reg, up[1].port.line)) 1429 &sunzilog_reg, up[1].port.line,
1430 false))
1429 up->flags |= SUNZILOG_FLAG_IS_CONS; 1431 up->flags |= SUNZILOG_FLAG_IS_CONS;
1430 err = uart_add_one_port(&sunzilog_reg, &up[1].port); 1432 err = uart_add_one_port(&sunzilog_reg, &up[1].port);
1431 if (err) { 1433 if (err) {
diff --git a/drivers/spi/spi_stmp.c b/drivers/spi/spi_stmp.c
index d871dc23909c..2552bb364005 100644
--- a/drivers/spi/spi_stmp.c
+++ b/drivers/spi/spi_stmp.c
@@ -242,7 +242,7 @@ static int stmp_spi_txrx_dma(struct stmp_spi *ss, int cs,
242 wait_for_completion(&ss->done); 242 wait_for_completion(&ss->done);
243 243
244 if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) & BM_SSP_CTRL0_RUN)) 244 if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) & BM_SSP_CTRL0_RUN))
245 status = ETIMEDOUT; 245 status = -ETIMEDOUT;
246 246
247 if (!dma_buf) 247 if (!dma_buf)
248 dma_unmap_single(ss->master_dev, spi_buf_dma, len, dir); 248 dma_unmap_single(ss->master_dev, spi_buf_dma, len, dir);
diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi_txx9.c
index 96057de133ad..19f75627c3de 100644
--- a/drivers/spi/spi_txx9.c
+++ b/drivers/spi/spi_txx9.c
@@ -29,6 +29,8 @@
29 29
30 30
31#define SPI_FIFO_SIZE 4 31#define SPI_FIFO_SIZE 4
32#define SPI_MAX_DIVIDER 0xff /* Max. value for SPCR1.SER */
33#define SPI_MIN_DIVIDER 1 /* Min. value for SPCR1.SER */
32 34
33#define TXx9_SPMCR 0x00 35#define TXx9_SPMCR 0x00
34#define TXx9_SPCR0 0x04 36#define TXx9_SPCR0 0x04
@@ -193,11 +195,8 @@ static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m)
193 195
194 if (prev_speed_hz != speed_hz 196 if (prev_speed_hz != speed_hz
195 || prev_bits_per_word != bits_per_word) { 197 || prev_bits_per_word != bits_per_word) {
196 u32 n = (c->baseclk + speed_hz - 1) / speed_hz; 198 int n = DIV_ROUND_UP(c->baseclk, speed_hz) - 1;
197 if (n < 1) 199 n = clamp(n, SPI_MIN_DIVIDER, SPI_MAX_DIVIDER);
198 n = 1;
199 else if (n > 0xff)
200 n = 0xff;
201 /* enter config mode */ 200 /* enter config mode */
202 txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, 201 txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR,
203 TXx9_SPMCR); 202 TXx9_SPMCR);
@@ -370,8 +369,8 @@ static int __init txx9spi_probe(struct platform_device *dev)
370 goto exit; 369 goto exit;
371 } 370 }
372 c->baseclk = clk_get_rate(c->clk); 371 c->baseclk = clk_get_rate(c->clk);
373 c->min_speed_hz = (c->baseclk + 0xff - 1) / 0xff; 372 c->min_speed_hz = DIV_ROUND_UP(c->baseclk, SPI_MAX_DIVIDER + 1);
374 c->max_speed_hz = c->baseclk; 373 c->max_speed_hz = c->baseclk / (SPI_MIN_DIVIDER + 1);
375 374
376 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 375 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
377 if (!res) 376 if (!res)
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
index 100e7a5c5ea1..e72f4046a5e0 100644
--- a/drivers/ssb/pcmcia.c
+++ b/drivers/ssb/pcmcia.c
@@ -617,136 +617,140 @@ static int ssb_pcmcia_sprom_check_crc(const u16 *sprom, size_t size)
617 } \ 617 } \
618 } while (0) 618 } while (0)
619 619
620int ssb_pcmcia_get_invariants(struct ssb_bus *bus, 620static int ssb_pcmcia_get_mac(struct pcmcia_device *p_dev,
621 struct ssb_init_invariants *iv) 621 tuple_t *tuple,
622 void *priv)
622{ 623{
623 tuple_t tuple; 624 struct ssb_sprom *sprom = priv;
624 int res; 625
625 unsigned char buf[32]; 626 if (tuple->TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID)
627 return -EINVAL;
628 if (tuple->TupleDataLen != ETH_ALEN + 2)
629 return -EINVAL;
630 if (tuple->TupleData[1] != ETH_ALEN)
631 return -EINVAL;
632 memcpy(sprom->il0mac, &tuple->TupleData[2], ETH_ALEN);
633 return 0;
634};
635
636static int ssb_pcmcia_do_get_invariants(struct pcmcia_device *p_dev,
637 tuple_t *tuple,
638 void *priv)
639{
640 struct ssb_init_invariants *iv = priv;
626 struct ssb_sprom *sprom = &iv->sprom; 641 struct ssb_sprom *sprom = &iv->sprom;
627 struct ssb_boardinfo *bi = &iv->boardinfo; 642 struct ssb_boardinfo *bi = &iv->boardinfo;
628 const char *error_description; 643 const char *error_description;
629 644
645 GOTO_ERROR_ON(tuple->TupleDataLen < 1, "VEN tpl < 1");
646 switch (tuple->TupleData[0]) {
647 case SSB_PCMCIA_CIS_ID:
648 GOTO_ERROR_ON((tuple->TupleDataLen != 5) &&
649 (tuple->TupleDataLen != 7),
650 "id tpl size");
651 bi->vendor = tuple->TupleData[1] |
652 ((u16)tuple->TupleData[2] << 8);
653 break;
654 case SSB_PCMCIA_CIS_BOARDREV:
655 GOTO_ERROR_ON(tuple->TupleDataLen != 2,
656 "boardrev tpl size");
657 sprom->board_rev = tuple->TupleData[1];
658 break;
659 case SSB_PCMCIA_CIS_PA:
660 GOTO_ERROR_ON((tuple->TupleDataLen != 9) &&
661 (tuple->TupleDataLen != 10),
662 "pa tpl size");
663 sprom->pa0b0 = tuple->TupleData[1] |
664 ((u16)tuple->TupleData[2] << 8);
665 sprom->pa0b1 = tuple->TupleData[3] |
666 ((u16)tuple->TupleData[4] << 8);
667 sprom->pa0b2 = tuple->TupleData[5] |
668 ((u16)tuple->TupleData[6] << 8);
669 sprom->itssi_a = tuple->TupleData[7];
670 sprom->itssi_bg = tuple->TupleData[7];
671 sprom->maxpwr_a = tuple->TupleData[8];
672 sprom->maxpwr_bg = tuple->TupleData[8];
673 break;
674 case SSB_PCMCIA_CIS_OEMNAME:
675 /* We ignore this. */
676 break;
677 case SSB_PCMCIA_CIS_CCODE:
678 GOTO_ERROR_ON(tuple->TupleDataLen != 2,
679 "ccode tpl size");
680 sprom->country_code = tuple->TupleData[1];
681 break;
682 case SSB_PCMCIA_CIS_ANTENNA:
683 GOTO_ERROR_ON(tuple->TupleDataLen != 2,
684 "ant tpl size");
685 sprom->ant_available_a = tuple->TupleData[1];
686 sprom->ant_available_bg = tuple->TupleData[1];
687 break;
688 case SSB_PCMCIA_CIS_ANTGAIN:
689 GOTO_ERROR_ON(tuple->TupleDataLen != 2,
690 "antg tpl size");
691 sprom->antenna_gain.ghz24.a0 = tuple->TupleData[1];
692 sprom->antenna_gain.ghz24.a1 = tuple->TupleData[1];
693 sprom->antenna_gain.ghz24.a2 = tuple->TupleData[1];
694 sprom->antenna_gain.ghz24.a3 = tuple->TupleData[1];
695 sprom->antenna_gain.ghz5.a0 = tuple->TupleData[1];
696 sprom->antenna_gain.ghz5.a1 = tuple->TupleData[1];
697 sprom->antenna_gain.ghz5.a2 = tuple->TupleData[1];
698 sprom->antenna_gain.ghz5.a3 = tuple->TupleData[1];
699 break;
700 case SSB_PCMCIA_CIS_BFLAGS:
701 GOTO_ERROR_ON((tuple->TupleDataLen != 3) &&
702 (tuple->TupleDataLen != 5),
703 "bfl tpl size");
704 sprom->boardflags_lo = tuple->TupleData[1] |
705 ((u16)tuple->TupleData[2] << 8);
706 break;
707 case SSB_PCMCIA_CIS_LEDS:
708 GOTO_ERROR_ON(tuple->TupleDataLen != 5,
709 "leds tpl size");
710 sprom->gpio0 = tuple->TupleData[1];
711 sprom->gpio1 = tuple->TupleData[2];
712 sprom->gpio2 = tuple->TupleData[3];
713 sprom->gpio3 = tuple->TupleData[4];
714 break;
715 }
716 return -ENOSPC; /* continue with next entry */
717
718error:
719 ssb_printk(KERN_ERR PFX
720 "PCMCIA: Failed to fetch device invariants: %s\n",
721 error_description);
722 return -ENODEV;
723}
724
725
726int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
727 struct ssb_init_invariants *iv)
728{
729 struct ssb_sprom *sprom = &iv->sprom;
730 int res;
731
630 memset(sprom, 0xFF, sizeof(*sprom)); 732 memset(sprom, 0xFF, sizeof(*sprom));
631 sprom->revision = 1; 733 sprom->revision = 1;
632 sprom->boardflags_lo = 0; 734 sprom->boardflags_lo = 0;
633 sprom->boardflags_hi = 0; 735 sprom->boardflags_hi = 0;
634 736
635 /* First fetch the MAC address. */ 737 /* First fetch the MAC address. */
636 memset(&tuple, 0, sizeof(tuple)); 738 res = pcmcia_loop_tuple(bus->host_pcmcia, CISTPL_FUNCE,
637 tuple.DesiredTuple = CISTPL_FUNCE; 739 ssb_pcmcia_get_mac, sprom);
638 tuple.TupleData = buf; 740 if (res != 0) {
639 tuple.TupleDataMax = sizeof(buf); 741 ssb_printk(KERN_ERR PFX
640 res = pcmcia_get_first_tuple(bus->host_pcmcia, &tuple); 742 "PCMCIA: Failed to fetch MAC address\n");
641 GOTO_ERROR_ON(res != 0, "MAC first tpl"); 743 return -ENODEV;
642 res = pcmcia_get_tuple_data(bus->host_pcmcia, &tuple);
643 GOTO_ERROR_ON(res != 0, "MAC first tpl data");
644 while (1) {
645 GOTO_ERROR_ON(tuple.TupleDataLen < 1, "MAC tpl < 1");
646 if (tuple.TupleData[0] == CISTPL_FUNCE_LAN_NODE_ID)
647 break;
648 res = pcmcia_get_next_tuple(bus->host_pcmcia, &tuple);
649 GOTO_ERROR_ON(res != 0, "MAC next tpl");
650 res = pcmcia_get_tuple_data(bus->host_pcmcia, &tuple);
651 GOTO_ERROR_ON(res != 0, "MAC next tpl data");
652 } 744 }
653 GOTO_ERROR_ON(tuple.TupleDataLen != ETH_ALEN + 2, "MAC tpl size");
654 memcpy(sprom->il0mac, &tuple.TupleData[2], ETH_ALEN);
655 745
656 /* Fetch the vendor specific tuples. */ 746 /* Fetch the vendor specific tuples. */
657 memset(&tuple, 0, sizeof(tuple)); 747 res = pcmcia_loop_tuple(bus->host_pcmcia, SSB_PCMCIA_CIS,
658 tuple.DesiredTuple = SSB_PCMCIA_CIS; 748 ssb_pcmcia_do_get_invariants, sprom);
659 tuple.TupleData = buf; 749 if ((res == 0) || (res == -ENOSPC))
660 tuple.TupleDataMax = sizeof(buf); 750 return 0;
661 res = pcmcia_get_first_tuple(bus->host_pcmcia, &tuple);
662 GOTO_ERROR_ON(res != 0, "VEN first tpl");
663 res = pcmcia_get_tuple_data(bus->host_pcmcia, &tuple);
664 GOTO_ERROR_ON(res != 0, "VEN first tpl data");
665 while (1) {
666 GOTO_ERROR_ON(tuple.TupleDataLen < 1, "VEN tpl < 1");
667 switch (tuple.TupleData[0]) {
668 case SSB_PCMCIA_CIS_ID:
669 GOTO_ERROR_ON((tuple.TupleDataLen != 5) &&
670 (tuple.TupleDataLen != 7),
671 "id tpl size");
672 bi->vendor = tuple.TupleData[1] |
673 ((u16)tuple.TupleData[2] << 8);
674 break;
675 case SSB_PCMCIA_CIS_BOARDREV:
676 GOTO_ERROR_ON(tuple.TupleDataLen != 2,
677 "boardrev tpl size");
678 sprom->board_rev = tuple.TupleData[1];
679 break;
680 case SSB_PCMCIA_CIS_PA:
681 GOTO_ERROR_ON((tuple.TupleDataLen != 9) &&
682 (tuple.TupleDataLen != 10),
683 "pa tpl size");
684 sprom->pa0b0 = tuple.TupleData[1] |
685 ((u16)tuple.TupleData[2] << 8);
686 sprom->pa0b1 = tuple.TupleData[3] |
687 ((u16)tuple.TupleData[4] << 8);
688 sprom->pa0b2 = tuple.TupleData[5] |
689 ((u16)tuple.TupleData[6] << 8);
690 sprom->itssi_a = tuple.TupleData[7];
691 sprom->itssi_bg = tuple.TupleData[7];
692 sprom->maxpwr_a = tuple.TupleData[8];
693 sprom->maxpwr_bg = tuple.TupleData[8];
694 break;
695 case SSB_PCMCIA_CIS_OEMNAME:
696 /* We ignore this. */
697 break;
698 case SSB_PCMCIA_CIS_CCODE:
699 GOTO_ERROR_ON(tuple.TupleDataLen != 2,
700 "ccode tpl size");
701 sprom->country_code = tuple.TupleData[1];
702 break;
703 case SSB_PCMCIA_CIS_ANTENNA:
704 GOTO_ERROR_ON(tuple.TupleDataLen != 2,
705 "ant tpl size");
706 sprom->ant_available_a = tuple.TupleData[1];
707 sprom->ant_available_bg = tuple.TupleData[1];
708 break;
709 case SSB_PCMCIA_CIS_ANTGAIN:
710 GOTO_ERROR_ON(tuple.TupleDataLen != 2,
711 "antg tpl size");
712 sprom->antenna_gain.ghz24.a0 = tuple.TupleData[1];
713 sprom->antenna_gain.ghz24.a1 = tuple.TupleData[1];
714 sprom->antenna_gain.ghz24.a2 = tuple.TupleData[1];
715 sprom->antenna_gain.ghz24.a3 = tuple.TupleData[1];
716 sprom->antenna_gain.ghz5.a0 = tuple.TupleData[1];
717 sprom->antenna_gain.ghz5.a1 = tuple.TupleData[1];
718 sprom->antenna_gain.ghz5.a2 = tuple.TupleData[1];
719 sprom->antenna_gain.ghz5.a3 = tuple.TupleData[1];
720 break;
721 case SSB_PCMCIA_CIS_BFLAGS:
722 GOTO_ERROR_ON((tuple.TupleDataLen != 3) &&
723 (tuple.TupleDataLen != 5),
724 "bfl tpl size");
725 sprom->boardflags_lo = tuple.TupleData[1] |
726 ((u16)tuple.TupleData[2] << 8);
727 break;
728 case SSB_PCMCIA_CIS_LEDS:
729 GOTO_ERROR_ON(tuple.TupleDataLen != 5,
730 "leds tpl size");
731 sprom->gpio0 = tuple.TupleData[1];
732 sprom->gpio1 = tuple.TupleData[2];
733 sprom->gpio2 = tuple.TupleData[3];
734 sprom->gpio3 = tuple.TupleData[4];
735 break;
736 }
737 res = pcmcia_get_next_tuple(bus->host_pcmcia, &tuple);
738 if (res == -ENOSPC)
739 break;
740 GOTO_ERROR_ON(res != 0, "VEN next tpl");
741 res = pcmcia_get_tuple_data(bus->host_pcmcia, &tuple);
742 GOTO_ERROR_ON(res != 0, "VEN next tpl data");
743 }
744 751
745 return 0;
746error:
747 ssb_printk(KERN_ERR PFX 752 ssb_printk(KERN_ERR PFX
748 "PCMCIA: Failed to fetch device invariants: %s\n", 753 "PCMCIA: Failed to fetch device invariants\n");
749 error_description);
750 return -ENODEV; 754 return -ENODEV;
751} 755}
752 756
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index b74212d698c7..e8b89e8ac9bd 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -162,6 +162,8 @@ static u8 chipid_to_nrcores(u16 chipid)
162static u32 scan_read32(struct ssb_bus *bus, u8 current_coreidx, 162static u32 scan_read32(struct ssb_bus *bus, u8 current_coreidx,
163 u16 offset) 163 u16 offset)
164{ 164{
165 u32 lo, hi;
166
165 switch (bus->bustype) { 167 switch (bus->bustype) {
166 case SSB_BUSTYPE_SSB: 168 case SSB_BUSTYPE_SSB:
167 offset += current_coreidx * SSB_CORE_SIZE; 169 offset += current_coreidx * SSB_CORE_SIZE;
@@ -174,7 +176,9 @@ static u32 scan_read32(struct ssb_bus *bus, u8 current_coreidx,
174 offset -= 0x800; 176 offset -= 0x800;
175 } else 177 } else
176 ssb_pcmcia_switch_segment(bus, 0); 178 ssb_pcmcia_switch_segment(bus, 0);
177 break; 179 lo = readw(bus->mmio + offset);
180 hi = readw(bus->mmio + offset + 2);
181 return lo | (hi << 16);
178 case SSB_BUSTYPE_SDIO: 182 case SSB_BUSTYPE_SDIO:
179 offset += current_coreidx * SSB_CORE_SIZE; 183 offset += current_coreidx * SSB_CORE_SIZE;
180 return ssb_sdio_scan_read32(bus, offset); 184 return ssb_sdio_scan_read32(bus, offset);
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index 80c0df8656f3..39923cb388be 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -141,37 +141,14 @@ static int das16cs_timer_insn_config(struct comedi_device *dev,
141 struct comedi_insn *insn, 141 struct comedi_insn *insn,
142 unsigned int *data); 142 unsigned int *data);
143 143
144static int get_prodid(struct comedi_device *dev, struct pcmcia_device *link)
145{
146 tuple_t tuple;
147 u_short buf[128];
148 int prodid = 0;
149
150 tuple.TupleData = (cisdata_t *) buf;
151 tuple.TupleOffset = 0;
152 tuple.TupleDataMax = 255;
153 tuple.DesiredTuple = CISTPL_MANFID;
154 tuple.Attributes = TUPLE_RETURN_COMMON;
155 if ((pcmcia_get_first_tuple(link, &tuple) == 0) &&
156 (pcmcia_get_tuple_data(link, &tuple) == 0)) {
157 prodid = le16_to_cpu(buf[1]);
158 }
159
160 return prodid;
161}
162
163static const struct das16cs_board *das16cs_probe(struct comedi_device *dev, 144static const struct das16cs_board *das16cs_probe(struct comedi_device *dev,
164 struct pcmcia_device *link) 145 struct pcmcia_device *link)
165{ 146{
166 int id;
167 int i; 147 int i;
168 148
169 id = get_prodid(dev, link);
170
171 for (i = 0; i < n_boards; i++) { 149 for (i = 0; i < n_boards; i++) {
172 if (das16cs_boards[i].device_id == id) { 150 if (das16cs_boards[i].device_id == link->card_id)
173 return das16cs_boards + i; 151 return das16cs_boards + i;
174 }
175 } 152 }
176 153
177 printk("unknown board!\n"); 154 printk("unknown board!\n");
@@ -660,27 +637,8 @@ static int das16cs_timer_insn_config(struct comedi_device *dev,
660 637
661======================================================================*/ 638======================================================================*/
662 639
663/*
664 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
665 you do not define PCMCIA_DEBUG at all, all the debug code will be
666 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
667 be present but disabled -- but it can then be enabled for specific
668 modules at load time with a 'pc_debug=#' option to insmod.
669*/
670#if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE) 640#if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE)
671 641
672#ifdef PCMCIA_DEBUG
673static int pc_debug = PCMCIA_DEBUG;
674module_param(pc_debug, int, 0644);
675#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
676static char *version =
677 "cb_das16_cs.c pcmcia code (David Schleef), modified from dummy_cs.c 1.31 2001/08/24 12:13:13 (David Hinds)";
678#else
679#define DEBUG(n, args...)
680#endif
681
682/*====================================================================*/
683
684static void das16cs_pcmcia_config(struct pcmcia_device *link); 642static void das16cs_pcmcia_config(struct pcmcia_device *link);
685static void das16cs_pcmcia_release(struct pcmcia_device *link); 643static void das16cs_pcmcia_release(struct pcmcia_device *link);
686static int das16cs_pcmcia_suspend(struct pcmcia_device *p_dev); 644static int das16cs_pcmcia_suspend(struct pcmcia_device *p_dev);
@@ -733,7 +691,7 @@ static int das16cs_pcmcia_attach(struct pcmcia_device *link)
733{ 691{
734 struct local_info_t *local; 692 struct local_info_t *local;
735 693
736 DEBUG(0, "das16cs_pcmcia_attach()\n"); 694 dev_dbg(&link->dev, "das16cs_pcmcia_attach()\n");
737 695
738 /* Allocate space for private device-specific data */ 696 /* Allocate space for private device-specific data */
739 local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL); 697 local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL);
@@ -745,7 +703,6 @@ static int das16cs_pcmcia_attach(struct pcmcia_device *link)
745 /* Initialize the pcmcia_device structure */ 703 /* Initialize the pcmcia_device structure */
746 /* Interrupt setup */ 704 /* Interrupt setup */
747 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 705 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
748 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
749 link->irq.Handler = NULL; 706 link->irq.Handler = NULL;
750 707
751 link->conf.Attributes = 0; 708 link->conf.Attributes = 0;
@@ -760,7 +717,7 @@ static int das16cs_pcmcia_attach(struct pcmcia_device *link)
760 717
761static void das16cs_pcmcia_detach(struct pcmcia_device *link) 718static void das16cs_pcmcia_detach(struct pcmcia_device *link)
762{ 719{
763 DEBUG(0, "das16cs_pcmcia_detach(0x%p)\n", link); 720 dev_dbg(&link->dev, "das16cs_pcmcia_detach\n");
764 721
765 if (link->dev_node) { 722 if (link->dev_node) {
766 ((struct local_info_t *)link->priv)->stop = 1; 723 ((struct local_info_t *)link->priv)->stop = 1;
@@ -771,118 +728,55 @@ static void das16cs_pcmcia_detach(struct pcmcia_device *link)
771 kfree(link->priv); 728 kfree(link->priv);
772} /* das16cs_pcmcia_detach */ 729} /* das16cs_pcmcia_detach */
773 730
774static void das16cs_pcmcia_config(struct pcmcia_device *link)
775{
776 struct local_info_t *dev = link->priv;
777 tuple_t tuple;
778 cisparse_t parse;
779 int last_fn, last_ret;
780 u_char buf[64];
781 cistpl_cftable_entry_t dflt = { 0 };
782 731
783 DEBUG(0, "das16cs_pcmcia_config(0x%p)\n", link); 732static int das16cs_pcmcia_config_loop(struct pcmcia_device *p_dev,
784 733 cistpl_cftable_entry_t *cfg,
785 /* 734 cistpl_cftable_entry_t *dflt,
786 This reads the card's CONFIG tuple to find its configuration 735 unsigned int vcc,
787 registers. 736 void *priv_data)
788 */ 737{
789 tuple.DesiredTuple = CISTPL_CONFIG; 738 if (cfg->index == 0)
790 tuple.Attributes = 0; 739 return -EINVAL;
791 tuple.TupleData = buf;
792 tuple.TupleDataMax = sizeof(buf);
793 tuple.TupleOffset = 0;
794
795 last_fn = GetFirstTuple;
796 last_ret = pcmcia_get_first_tuple(link, &tuple);
797 if (last_ret != 0)
798 goto cs_failed;
799
800 last_fn = GetTupleData;
801 last_ret = pcmcia_get_tuple_data(link, &tuple);
802 if (last_ret != 0)
803 goto cs_failed;
804
805 last_fn = ParseTuple;
806 last_ret = pcmcia_parse_tuple(&tuple, &parse);
807 if (last_ret != 0)
808 goto cs_failed;
809
810 link->conf.ConfigBase = parse.config.base;
811 link->conf.Present = parse.config.rmask[0];
812 740
813 /* 741 /* Do we need to allocate an interrupt? */
814 In this loop, we scan the CIS for configuration table entries, 742 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
815 each of which describes a valid card configuration, including 743 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
816 voltage, IO window, memory window, and interrupt settings. 744
817 745 /* IO window settings */
818 We make no assumptions about the card to be configured: we use 746 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
819 just the information available in the CIS. In an ideal world, 747 if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
820 this would work for any PCMCIA card, but it requires a complete 748 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
821 and accurate CIS. In practice, a driver usually "knows" most of 749 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
822 these things without consulting the CIS, and most client drivers 750 if (!(io->flags & CISTPL_IO_8BIT))
823 will only use the CIS to fill in implementation-defined details. 751 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
824 */ 752 if (!(io->flags & CISTPL_IO_16BIT))
825 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 753 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
826 last_fn = GetFirstTuple; 754 p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
827 755 p_dev->io.BasePort1 = io->win[0].base;
828 last_ret = pcmcia_get_first_tuple(link, &tuple); 756 p_dev->io.NumPorts1 = io->win[0].len;
829 if (last_ret) 757 if (io->nwin > 1) {
830 goto cs_failed; 758 p_dev->io.Attributes2 = p_dev->io.Attributes1;
831 759 p_dev->io.BasePort2 = io->win[1].base;
832 while (1) { 760 p_dev->io.NumPorts2 = io->win[1].len;
833 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
834 if (pcmcia_get_tuple_data(link, &tuple))
835 goto next_entry;
836 if (pcmcia_parse_tuple(&tuple, &parse))
837 goto next_entry;
838
839 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
840 dflt = *cfg;
841 if (cfg->index == 0)
842 goto next_entry;
843 link->conf.ConfigIndex = cfg->index;
844
845 /* Does this card need audio output? */
846/* if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
847 link->conf.Attributes |= CONF_ENABLE_SPKR;
848 link->conf.Status = CCSR_AUDIO_ENA;
849 }
850*/
851 /* Do we need to allocate an interrupt? */
852 if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1)
853 link->conf.Attributes |= CONF_ENABLE_IRQ;
854
855 /* IO window settings */
856 link->io.NumPorts1 = link->io.NumPorts2 = 0;
857 if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
858 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io;
859 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
860 if (!(io->flags & CISTPL_IO_8BIT))
861 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
862 if (!(io->flags & CISTPL_IO_16BIT))
863 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
864 link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
865 link->io.BasePort1 = io->win[0].base;
866 link->io.NumPorts1 = io->win[0].len;
867 if (io->nwin > 1) {
868 link->io.Attributes2 = link->io.Attributes1;
869 link->io.BasePort2 = io->win[1].base;
870 link->io.NumPorts2 = io->win[1].len;
871 }
872 /* This reserves IO space but doesn't actually enable it */
873 if (pcmcia_request_io(link, &link->io))
874 goto next_entry;
875 } 761 }
762 /* This reserves IO space but doesn't actually enable it */
763 return pcmcia_request_io(p_dev, &p_dev->io);
764 }
876 765
877 /* If we got this far, we're cool! */ 766 return 0;
878 break; 767}
768
769static void das16cs_pcmcia_config(struct pcmcia_device *link)
770{
771 struct local_info_t *dev = link->priv;
772 int ret;
879 773
880next_entry: 774 dev_dbg(&link->dev, "das16cs_pcmcia_config\n");
881 last_fn = GetNextTuple;
882 775
883 last_ret = pcmcia_get_next_tuple(link, &tuple); 776 ret = pcmcia_loop_config(link, das16cs_pcmcia_config_loop, NULL);
884 if (last_ret) 777 if (ret) {
885 goto cs_failed; 778 dev_warn(&link->dev, "no configuration found\n");
779 goto failed;
886 } 780 }
887 781
888 /* 782 /*
@@ -891,21 +785,18 @@ next_entry:
891 irq structure is initialized. 785 irq structure is initialized.
892 */ 786 */
893 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 787 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
894 last_fn = RequestIRQ; 788 ret = pcmcia_request_irq(link, &link->irq);
895 789 if (ret)
896 last_ret = pcmcia_request_irq(link, &link->irq); 790 goto failed;
897 if (last_ret)
898 goto cs_failed;
899 } 791 }
900 /* 792 /*
901 This actually configures the PCMCIA socket -- setting up 793 This actually configures the PCMCIA socket -- setting up
902 the I/O windows and the interrupt mapping, and putting the 794 the I/O windows and the interrupt mapping, and putting the
903 card and host interface into "Memory and IO" mode. 795 card and host interface into "Memory and IO" mode.
904 */ 796 */
905 last_fn = RequestConfiguration; 797 ret = pcmcia_request_configuration(link, &link->conf);
906 last_ret = pcmcia_request_configuration(link, &link->conf); 798 if (ret)
907 if (last_ret) 799 goto failed;
908 goto cs_failed;
909 800
910 /* 801 /*
911 At this point, the dev_node_t structure(s) need to be 802 At this point, the dev_node_t structure(s) need to be
@@ -930,14 +821,13 @@ next_entry:
930 821
931 return; 822 return;
932 823
933cs_failed: 824failed:
934 cs_error(link, last_fn, last_ret);
935 das16cs_pcmcia_release(link); 825 das16cs_pcmcia_release(link);
936} /* das16cs_pcmcia_config */ 826} /* das16cs_pcmcia_config */
937 827
938static void das16cs_pcmcia_release(struct pcmcia_device *link) 828static void das16cs_pcmcia_release(struct pcmcia_device *link)
939{ 829{
940 DEBUG(0, "das16cs_pcmcia_release(0x%p)\n", link); 830 dev_dbg(&link->dev, "das16cs_pcmcia_release\n");
941 pcmcia_disable_device(link); 831 pcmcia_disable_device(link);
942} /* das16cs_pcmcia_release */ 832} /* das16cs_pcmcia_release */
943 833
@@ -983,14 +873,13 @@ struct pcmcia_driver das16cs_driver = {
983 873
984static int __init init_das16cs_pcmcia_cs(void) 874static int __init init_das16cs_pcmcia_cs(void)
985{ 875{
986 DEBUG(0, "%s\n", version);
987 pcmcia_register_driver(&das16cs_driver); 876 pcmcia_register_driver(&das16cs_driver);
988 return 0; 877 return 0;
989} 878}
990 879
991static void __exit exit_das16cs_pcmcia_cs(void) 880static void __exit exit_das16cs_pcmcia_cs(void)
992{ 881{
993 DEBUG(0, "das16cs_pcmcia_cs: unloading\n"); 882 pr_debug("das16cs_pcmcia_cs: unloading\n");
994 pcmcia_unregister_driver(&das16cs_driver); 883 pcmcia_unregister_driver(&das16cs_driver);
995} 884}
996 885
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index 9cab21eaaa18..9b945e5fdd32 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -110,25 +110,6 @@ static int das08_cs_attach(struct comedi_device *dev,
110 110
111======================================================================*/ 111======================================================================*/
112 112
113/*
114 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
115 you do not define PCMCIA_DEBUG at all, all the debug code will be
116 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
117 be present but disabled -- but it can then be enabled for specific
118 modules at load time with a 'pc_debug=#' option to insmod.
119*/
120
121#ifdef PCMCIA_DEBUG
122static int pc_debug = PCMCIA_DEBUG;
123module_param(pc_debug, int, 0644);
124#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
125static const char *version =
126 "das08.c pcmcia code (Frank Hess), modified from dummy_cs.c 1.31 2001/08/24 12:13:13 (David Hinds)";
127#else
128#define DEBUG(n, args...)
129#endif
130
131/*====================================================================*/
132static void das08_pcmcia_config(struct pcmcia_device *link); 113static void das08_pcmcia_config(struct pcmcia_device *link);
133static void das08_pcmcia_release(struct pcmcia_device *link); 114static void das08_pcmcia_release(struct pcmcia_device *link);
134static int das08_pcmcia_suspend(struct pcmcia_device *p_dev); 115static int das08_pcmcia_suspend(struct pcmcia_device *p_dev);
@@ -181,7 +162,7 @@ static int das08_pcmcia_attach(struct pcmcia_device *link)
181{ 162{
182 struct local_info_t *local; 163 struct local_info_t *local;
183 164
184 DEBUG(0, "das08_pcmcia_attach()\n"); 165 dev_dbg(&link->dev, "das08_pcmcia_attach()\n");
185 166
186 /* Allocate space for private device-specific data */ 167 /* Allocate space for private device-specific data */
187 local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL); 168 local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL);
@@ -192,7 +173,6 @@ static int das08_pcmcia_attach(struct pcmcia_device *link)
192 173
193 /* Interrupt setup */ 174 /* Interrupt setup */
194 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 175 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
195 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
196 link->irq.Handler = NULL; 176 link->irq.Handler = NULL;
197 177
198 /* 178 /*
@@ -224,7 +204,7 @@ static int das08_pcmcia_attach(struct pcmcia_device *link)
224static void das08_pcmcia_detach(struct pcmcia_device *link) 204static void das08_pcmcia_detach(struct pcmcia_device *link)
225{ 205{
226 206
227 DEBUG(0, "das08_pcmcia_detach(0x%p)\n", link); 207 dev_dbg(&link->dev, "das08_pcmcia_detach\n");
228 208
229 if (link->dev_node) { 209 if (link->dev_node) {
230 ((struct local_info_t *)link->priv)->stop = 1; 210 ((struct local_info_t *)link->priv)->stop = 1;
@@ -237,6 +217,44 @@ static void das08_pcmcia_detach(struct pcmcia_device *link)
237 217
238} /* das08_pcmcia_detach */ 218} /* das08_pcmcia_detach */
239 219
220
221static int das08_pcmcia_config_loop(struct pcmcia_device *p_dev,
222 cistpl_cftable_entry_t *cfg,
223 cistpl_cftable_entry_t *dflt,
224 unsigned int vcc,
225 void *priv_data)
226{
227 if (cfg->index == 0)
228 return -ENODEV;
229
230 /* Do we need to allocate an interrupt? */
231 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
232 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
233
234 /* IO window settings */
235 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
236 if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
237 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
238 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
239 if (!(io->flags & CISTPL_IO_8BIT))
240 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
241 if (!(io->flags & CISTPL_IO_16BIT))
242 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
243 p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
244 p_dev->io.BasePort1 = io->win[0].base;
245 p_dev->io.NumPorts1 = io->win[0].len;
246 if (io->nwin > 1) {
247 p_dev->io.Attributes2 = p_dev->io.Attributes1;
248 p_dev->io.BasePort2 = io->win[1].base;
249 p_dev->io.NumPorts2 = io->win[1].len;
250 }
251 /* This reserves IO space but doesn't actually enable it */
252 return pcmcia_request_io(p_dev, &p_dev->io);
253 }
254 return 0;
255}
256
257
240/*====================================================================== 258/*======================================================================
241 259
242 das08_pcmcia_config() is scheduled to run after a CARD_INSERTION event 260 das08_pcmcia_config() is scheduled to run after a CARD_INSERTION event
@@ -248,128 +266,20 @@ static void das08_pcmcia_detach(struct pcmcia_device *link)
248static void das08_pcmcia_config(struct pcmcia_device *link) 266static void das08_pcmcia_config(struct pcmcia_device *link)
249{ 267{
250 struct local_info_t *dev = link->priv; 268 struct local_info_t *dev = link->priv;
251 tuple_t tuple; 269 int ret;
252 cisparse_t parse;
253 int last_fn, last_ret;
254 u_char buf[64];
255 cistpl_cftable_entry_t dflt = { 0 };
256
257 DEBUG(0, "das08_pcmcia_config(0x%p)\n", link);
258
259 /*
260 This reads the card's CONFIG tuple to find its configuration
261 registers.
262 */
263 tuple.DesiredTuple = CISTPL_CONFIG;
264 tuple.Attributes = 0;
265 tuple.TupleData = buf;
266 tuple.TupleDataMax = sizeof(buf);
267 tuple.TupleOffset = 0;
268 last_fn = GetFirstTuple;
269
270 last_ret = pcmcia_get_first_tuple(link, &tuple);
271 if (last_ret)
272 goto cs_failed;
273
274 last_fn = GetTupleData;
275
276 last_ret = pcmcia_get_tuple_data(link, &tuple);
277 if (last_ret)
278 goto cs_failed;
279
280 last_fn = ParseTuple;
281
282 last_ret = pcmcia_parse_tuple(&tuple, &parse);
283 if (last_ret)
284 goto cs_failed;
285
286 link->conf.ConfigBase = parse.config.base;
287 link->conf.Present = parse.config.rmask[0];
288
289 /*
290 In this loop, we scan the CIS for configuration table entries,
291 each of which describes a valid card configuration, including
292 voltage, IO window, memory window, and interrupt settings.
293
294 We make no assumptions about the card to be configured: we use
295 just the information available in the CIS. In an ideal world,
296 this would work for any PCMCIA card, but it requires a complete
297 and accurate CIS. In practice, a driver usually "knows" most of
298 these things without consulting the CIS, and most client drivers
299 will only use the CIS to fill in implementation-defined details.
300 */
301 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
302 last_fn = GetFirstTuple;
303
304 last_ret = pcmcia_get_first_tuple(link, &tuple);
305 if (last_ret)
306 goto cs_failed;
307
308 while (1) {
309 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
310
311 last_ret = pcmcia_get_tuple_data(link, &tuple);
312 if (last_ret)
313 goto next_entry;
314
315 last_ret = pcmcia_parse_tuple(&tuple, &parse);
316 if (last_ret)
317 goto next_entry;
318
319 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
320 dflt = *cfg;
321 if (cfg->index == 0)
322 goto next_entry;
323 link->conf.ConfigIndex = cfg->index;
324
325 /* Does this card need audio output? */
326/* if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
327 link->conf.Attributes |= CONF_ENABLE_SPKR;
328 link->conf.Status = CCSR_AUDIO_ENA;
329 }
330*/
331 /* Do we need to allocate an interrupt? */
332 if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1)
333 link->conf.Attributes |= CONF_ENABLE_IRQ;
334
335 /* IO window settings */
336 link->io.NumPorts1 = link->io.NumPorts2 = 0;
337 if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
338 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io;
339 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
340 if (!(io->flags & CISTPL_IO_8BIT))
341 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
342 if (!(io->flags & CISTPL_IO_16BIT))
343 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
344 link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
345 link->io.BasePort1 = io->win[0].base;
346 link->io.NumPorts1 = io->win[0].len;
347 if (io->nwin > 1) {
348 link->io.Attributes2 = link->io.Attributes1;
349 link->io.BasePort2 = io->win[1].base;
350 link->io.NumPorts2 = io->win[1].len;
351 }
352 /* This reserves IO space but doesn't actually enable it */
353 if (pcmcia_request_io(link, &link->io) != 0)
354 goto next_entry;
355 }
356
357 /* If we got this far, we're cool! */
358 break;
359 270
360next_entry: 271 dev_dbg(&link->dev, "das08_pcmcia_config\n");
361 last_fn = GetNextTuple;
362 272
363 last_ret = pcmcia_get_next_tuple(link, &tuple); 273 ret = pcmcia_loop_config(link, das08_pcmcia_config_loop, NULL);
364 if (last_ret) 274 if (ret) {
365 goto cs_failed; 275 dev_warn(&link->dev, "no configuration found\n");
276 goto failed;
366 } 277 }
367 278
368 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 279 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
369 last_fn = RequestIRQ; 280 ret = pcmcia_request_irq(link, &link->irq);
370 last_ret = pcmcia_request_irq(link, &link->irq); 281 if (ret)
371 if (last_ret) 282 goto failed;
372 goto cs_failed;
373 } 283 }
374 284
375 /* 285 /*
@@ -377,10 +287,9 @@ next_entry:
377 the I/O windows and the interrupt mapping, and putting the 287 the I/O windows and the interrupt mapping, and putting the
378 card and host interface into "Memory and IO" mode. 288 card and host interface into "Memory and IO" mode.
379 */ 289 */
380 last_fn = RequestConfiguration; 290 ret = pcmcia_request_configuration(link, &link->conf);
381 last_ret = pcmcia_request_configuration(link, &link->conf); 291 if (ret)
382 if (last_ret) 292 goto failed;
383 goto cs_failed;
384 293
385 /* 294 /*
386 At this point, the dev_node_t structure(s) need to be 295 At this point, the dev_node_t structure(s) need to be
@@ -405,8 +314,7 @@ next_entry:
405 314
406 return; 315 return;
407 316
408cs_failed: 317failed:
409 cs_error(link, last_fn, last_ret);
410 das08_pcmcia_release(link); 318 das08_pcmcia_release(link);
411 319
412} /* das08_pcmcia_config */ 320} /* das08_pcmcia_config */
@@ -421,7 +329,7 @@ cs_failed:
421 329
422static void das08_pcmcia_release(struct pcmcia_device *link) 330static void das08_pcmcia_release(struct pcmcia_device *link)
423{ 331{
424 DEBUG(0, "das08_pcmcia_release(0x%p)\n", link); 332 dev_dbg(&link->dev, "das08_pcmcia_release\n");
425 pcmcia_disable_device(link); 333 pcmcia_disable_device(link);
426} /* das08_pcmcia_release */ 334} /* das08_pcmcia_release */
427 335
@@ -477,14 +385,13 @@ struct pcmcia_driver das08_cs_driver = {
477 385
478static int __init init_das08_pcmcia_cs(void) 386static int __init init_das08_pcmcia_cs(void)
479{ 387{
480 DEBUG(0, "%s\n", version);
481 pcmcia_register_driver(&das08_cs_driver); 388 pcmcia_register_driver(&das08_cs_driver);
482 return 0; 389 return 0;
483} 390}
484 391
485static void __exit exit_das08_pcmcia_cs(void) 392static void __exit exit_das08_pcmcia_cs(void)
486{ 393{
487 DEBUG(0, "das08_pcmcia_cs: unloading\n"); 394 pr_debug("das08_pcmcia_cs: unloading\n");
488 pcmcia_unregister_driver(&das08_cs_driver); 395 pcmcia_unregister_driver(&das08_cs_driver);
489} 396}
490 397
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index ec31a3970664..ef5e1183d47d 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -436,25 +436,7 @@ static int dio700_detach(struct comedi_device *dev)
436 return 0; 436 return 0;
437}; 437};
438 438
439/* PCMCIA crap */ 439/* PCMCIA crap -- watch your words, please! */
440
441/*
442 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
443 you do not define PCMCIA_DEBUG at all, all the debug code will be
444 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
445 be present but disabled -- but it can then be enabled for specific
446 modules at load time with a 'pc_debug=#' option to insmod.
447*/
448#ifdef PCMCIA_DEBUG
449static int pc_debug = PCMCIA_DEBUG;
450module_param(pc_debug, int, 0644);
451#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
452static char *version = "ni_daq_700.c, based on dummy_cs.c";
453#else
454#define DEBUG(n, args...)
455#endif
456
457/*====================================================================*/
458 440
459static void dio700_config(struct pcmcia_device *link); 441static void dio700_config(struct pcmcia_device *link);
460static void dio700_release(struct pcmcia_device *link); 442static void dio700_release(struct pcmcia_device *link);
@@ -510,7 +492,7 @@ static int dio700_cs_attach(struct pcmcia_device *link)
510 492
511 printk(KERN_INFO "ni_daq_700: cs-attach\n"); 493 printk(KERN_INFO "ni_daq_700: cs-attach\n");
512 494
513 DEBUG(0, "dio700_cs_attach()\n"); 495 dev_dbg(&link->dev, "dio700_cs_attach()\n");
514 496
515 /* Allocate space for private device-specific data */ 497 /* Allocate space for private device-specific data */
516 local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL); 498 local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL);
@@ -521,7 +503,6 @@ static int dio700_cs_attach(struct pcmcia_device *link)
521 503
522 /* Interrupt setup */ 504 /* Interrupt setup */
523 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 505 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
524 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
525 link->irq.Handler = NULL; 506 link->irq.Handler = NULL;
526 507
527 /* 508 /*
@@ -555,7 +536,7 @@ static void dio700_cs_detach(struct pcmcia_device *link)
555 536
556 printk(KERN_INFO "ni_daq_700: cs-detach!\n"); 537 printk(KERN_INFO "ni_daq_700: cs-detach!\n");
557 538
558 DEBUG(0, "dio700_cs_detach(0x%p)\n", link); 539 dev_dbg(&link->dev, "dio700_cs_detach\n");
559 540
560 if (link->dev_node) { 541 if (link->dev_node) {
561 ((struct local_info_t *)link->priv)->stop = 1; 542 ((struct local_info_t *)link->priv)->stop = 1;
@@ -576,141 +557,85 @@ static void dio700_cs_detach(struct pcmcia_device *link)
576 557
577======================================================================*/ 558======================================================================*/
578 559
579static void dio700_config(struct pcmcia_device *link) 560static int dio700_pcmcia_config_loop(struct pcmcia_device *p_dev,
561 cistpl_cftable_entry_t *cfg,
562 cistpl_cftable_entry_t *dflt,
563 unsigned int vcc,
564 void *priv_data)
580{ 565{
581 struct local_info_t *dev = link->priv; 566 win_req_t *req = priv_data;
582 tuple_t tuple;
583 cisparse_t parse;
584 int last_ret;
585 u_char buf[64];
586 win_req_t req;
587 memreq_t map; 567 memreq_t map;
588 cistpl_cftable_entry_t dflt = { 0 };
589 568
590 printk(KERN_INFO "ni_daq_700: cs-config\n"); 569 if (cfg->index == 0)
591 570 return -ENODEV;
592 DEBUG(0, "dio700_config(0x%p)\n", link);
593 571
594 /* 572 /* Does this card need audio output? */
595 This reads the card's CONFIG tuple to find its configuration 573 if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
596 registers. 574 p_dev->conf.Attributes |= CONF_ENABLE_SPKR;
597 */ 575 p_dev->conf.Status = CCSR_AUDIO_ENA;
598 tuple.DesiredTuple = CISTPL_CONFIG;
599 tuple.Attributes = 0;
600 tuple.TupleData = buf;
601 tuple.TupleDataMax = sizeof(buf);
602 tuple.TupleOffset = 0;
603
604 last_ret = pcmcia_get_first_tuple(link, &tuple);
605 if (last_ret) {
606 cs_error(link, GetFirstTuple, last_ret);
607 goto cs_failed;
608 } 576 }
609 577
610 last_ret = pcmcia_get_tuple_data(link, &tuple); 578 /* Do we need to allocate an interrupt? */
611 if (last_ret) { 579 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
612 cs_error(link, GetTupleData, last_ret); 580 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
613 goto cs_failed; 581
582 /* IO window settings */
583 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
584 if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
585 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
586 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
587 if (!(io->flags & CISTPL_IO_8BIT))
588 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
589 if (!(io->flags & CISTPL_IO_16BIT))
590 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
591 p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
592 p_dev->io.BasePort1 = io->win[0].base;
593 p_dev->io.NumPorts1 = io->win[0].len;
594 if (io->nwin > 1) {
595 p_dev->io.Attributes2 = p_dev->io.Attributes1;
596 p_dev->io.BasePort2 = io->win[1].base;
597 p_dev->io.NumPorts2 = io->win[1].len;
598 }
599 /* This reserves IO space but doesn't actually enable it */
600 if (pcmcia_request_io(p_dev, &p_dev->io) != 0)
601 return -ENODEV;
614 } 602 }
615 603
616 last_ret = pcmcia_parse_tuple(&tuple, &parse); 604 if ((cfg->mem.nwin > 0) || (dflt->mem.nwin > 0)) {
617 if (last_ret) { 605 cistpl_mem_t *mem =
618 cs_error(link, ParseTuple, last_ret); 606 (cfg->mem.nwin) ? &cfg->mem : &dflt->mem;
619 goto cs_failed; 607 req->Attributes = WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM;
608 req->Attributes |= WIN_ENABLE;
609 req->Base = mem->win[0].host_addr;
610 req->Size = mem->win[0].len;
611 if (req->Size < 0x1000)
612 req->Size = 0x1000;
613 req->AccessSpeed = 0;
614 if (pcmcia_request_window(p_dev, req, &p_dev->win))
615 return -ENODEV;
616 map.Page = 0;
617 map.CardOffset = mem->win[0].card_addr;
618 if (pcmcia_map_mem_page(p_dev, p_dev->win, &map))
619 return -ENODEV;
620 } 620 }
621 link->conf.ConfigBase = parse.config.base; 621 /* If we got this far, we're cool! */
622 link->conf.Present = parse.config.rmask[0]; 622 return 0;
623 623}
624 /*
625 In this loop, we scan the CIS for configuration table entries,
626 each of which describes a valid card configuration, including
627 voltage, IO window, memory window, and interrupt settings.
628
629 We make no assumptions about the card to be configured: we use
630 just the information available in the CIS. In an ideal world,
631 this would work for any PCMCIA card, but it requires a complete
632 and accurate CIS. In practice, a driver usually "knows" most of
633 these things without consulting the CIS, and most client drivers
634 will only use the CIS to fill in implementation-defined details.
635 */
636 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
637 last_ret = pcmcia_get_first_tuple(link, &tuple);
638 if (last_ret != 0) {
639 cs_error(link, GetFirstTuple, last_ret);
640 goto cs_failed;
641 }
642 while (1) {
643 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
644 if (pcmcia_get_tuple_data(link, &tuple) != 0)
645 goto next_entry;
646 if (pcmcia_parse_tuple(&tuple, &parse) != 0)
647 goto next_entry;
648
649 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
650 dflt = *cfg;
651 if (cfg->index == 0)
652 goto next_entry;
653 link->conf.ConfigIndex = cfg->index;
654
655 /* Does this card need audio output? */
656 if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
657 link->conf.Attributes |= CONF_ENABLE_SPKR;
658 link->conf.Status = CCSR_AUDIO_ENA;
659 }
660 624
661 /* Do we need to allocate an interrupt? */ 625static void dio700_config(struct pcmcia_device *link)
662 if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1) 626{
663 link->conf.Attributes |= CONF_ENABLE_IRQ; 627 struct local_info_t *dev = link->priv;
664 628 win_req_t req;
665 /* IO window settings */ 629 int ret;
666 link->io.NumPorts1 = link->io.NumPorts2 = 0;
667 if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
668 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io;
669 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
670 if (!(io->flags & CISTPL_IO_8BIT))
671 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
672 if (!(io->flags & CISTPL_IO_16BIT))
673 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
674 link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
675 link->io.BasePort1 = io->win[0].base;
676 link->io.NumPorts1 = io->win[0].len;
677 if (io->nwin > 1) {
678 link->io.Attributes2 = link->io.Attributes1;
679 link->io.BasePort2 = io->win[1].base;
680 link->io.NumPorts2 = io->win[1].len;
681 }
682 /* This reserves IO space but doesn't actually enable it */
683 if (pcmcia_request_io(link, &link->io) != 0)
684 goto next_entry;
685 }
686 630
687 if ((cfg->mem.nwin > 0) || (dflt.mem.nwin > 0)) { 631 printk(KERN_INFO "ni_daq_700: cs-config\n");
688 cistpl_mem_t *mem =
689 (cfg->mem.nwin) ? &cfg->mem : &dflt.mem;
690 req.Attributes = WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM;
691 req.Attributes |= WIN_ENABLE;
692 req.Base = mem->win[0].host_addr;
693 req.Size = mem->win[0].len;
694 if (req.Size < 0x1000)
695 req.Size = 0x1000;
696 req.AccessSpeed = 0;
697 if (pcmcia_request_window(&link, &req, &link->win))
698 goto next_entry;
699 map.Page = 0;
700 map.CardOffset = mem->win[0].card_addr;
701 if (pcmcia_map_mem_page(link->win, &map))
702 goto next_entry;
703 }
704 /* If we got this far, we're cool! */
705 break;
706 632
707next_entry: 633 dev_dbg(&link->dev, "dio700_config\n");
708 634
709 last_ret = pcmcia_get_next_tuple(link, &tuple); 635 ret = pcmcia_loop_config(link, dio700_pcmcia_config_loop, &req);
710 if (last_ret) { 636 if (ret) {
711 cs_error(link, GetNextTuple, last_ret); 637 dev_warn(&link->dev, "no configuration found\n");
712 goto cs_failed; 638 goto failed;
713 }
714 } 639 }
715 640
716 /* 641 /*
@@ -719,11 +644,9 @@ next_entry:
719 irq structure is initialized. 644 irq structure is initialized.
720 */ 645 */
721 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 646 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
722 last_ret = pcmcia_request_irq(link, &link->irq); 647 ret = pcmcia_request_irq(link, &link->irq);
723 if (last_ret) { 648 if (ret)
724 cs_error(link, RequestIRQ, last_ret); 649 goto failed;
725 goto cs_failed;
726 }
727 } 650 }
728 651
729 /* 652 /*
@@ -731,11 +654,9 @@ next_entry:
731 the I/O windows and the interrupt mapping, and putting the 654 the I/O windows and the interrupt mapping, and putting the
732 card and host interface into "Memory and IO" mode. 655 card and host interface into "Memory and IO" mode.
733 */ 656 */
734 last_ret = pcmcia_request_configuration(link, &link->conf); 657 ret = pcmcia_request_configuration(link, &link->conf);
735 if (last_ret != 0) { 658 if (ret != 0)
736 cs_error(link, RequestConfiguration, last_ret); 659 goto failed;
737 goto cs_failed;
738 }
739 660
740 /* 661 /*
741 At this point, the dev_node_t structure(s) need to be 662 At this point, the dev_node_t structure(s) need to be
@@ -763,7 +684,7 @@ next_entry:
763 684
764 return; 685 return;
765 686
766cs_failed: 687failed:
767 printk(KERN_INFO "ni_daq_700 cs failed"); 688 printk(KERN_INFO "ni_daq_700 cs failed");
768 dio700_release(link); 689 dio700_release(link);
769 690
@@ -771,7 +692,7 @@ cs_failed:
771 692
772static void dio700_release(struct pcmcia_device *link) 693static void dio700_release(struct pcmcia_device *link)
773{ 694{
774 DEBUG(0, "dio700_release(0x%p)\n", link); 695 dev_dbg(&link->dev, "dio700_release\n");
775 696
776 pcmcia_disable_device(link); 697 pcmcia_disable_device(link);
777} /* dio700_release */ 698} /* dio700_release */
@@ -830,15 +751,13 @@ struct pcmcia_driver dio700_cs_driver = {
830 751
831static int __init init_dio700_cs(void) 752static int __init init_dio700_cs(void)
832{ 753{
833 printk("ni_daq_700: cs-init \n");
834 DEBUG(0, "%s\n", version);
835 pcmcia_register_driver(&dio700_cs_driver); 754 pcmcia_register_driver(&dio700_cs_driver);
836 return 0; 755 return 0;
837} 756}
838 757
839static void __exit exit_dio700_cs(void) 758static void __exit exit_dio700_cs(void)
840{ 759{
841 DEBUG(0, "ni_daq_700: unloading\n"); 760 pr_debug("ni_daq_700: unloading\n");
842 pcmcia_unregister_driver(&dio700_cs_driver); 761 pcmcia_unregister_driver(&dio700_cs_driver);
843} 762}
844 763
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c
index 0700a8bddd1e..9017be3a92f1 100644
--- a/drivers/staging/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c
@@ -187,25 +187,7 @@ static int dio24_detach(struct comedi_device *dev)
187 return 0; 187 return 0;
188}; 188};
189 189
190/* PCMCIA crap */ 190/* PCMCIA crap -- watch your words! */
191
192/*
193 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
194 you do not define PCMCIA_DEBUG at all, all the debug code will be
195 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
196 be present but disabled -- but it can then be enabled for specific
197 modules at load time with a 'pc_debug=#' option to insmod.
198*/
199#ifdef PCMCIA_DEBUG
200static int pc_debug = PCMCIA_DEBUG;
201module_param(pc_debug, int, 0644);
202#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
203static char *version = "ni_daq_dio24.c, based on dummy_cs.c";
204#else
205#define DEBUG(n, args...)
206#endif
207
208/*====================================================================*/
209 191
210static void dio24_config(struct pcmcia_device *link); 192static void dio24_config(struct pcmcia_device *link);
211static void dio24_release(struct pcmcia_device *link); 193static void dio24_release(struct pcmcia_device *link);
@@ -261,7 +243,7 @@ static int dio24_cs_attach(struct pcmcia_device *link)
261 243
262 printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO - CS-attach!\n"); 244 printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO - CS-attach!\n");
263 245
264 DEBUG(0, "dio24_cs_attach()\n"); 246 dev_dbg(&link->dev, "dio24_cs_attach()\n");
265 247
266 /* Allocate space for private device-specific data */ 248 /* Allocate space for private device-specific data */
267 local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL); 249 local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL);
@@ -272,7 +254,6 @@ static int dio24_cs_attach(struct pcmcia_device *link)
272 254
273 /* Interrupt setup */ 255 /* Interrupt setup */
274 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 256 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
275 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
276 link->irq.Handler = NULL; 257 link->irq.Handler = NULL;
277 258
278 /* 259 /*
@@ -306,7 +287,7 @@ static void dio24_cs_detach(struct pcmcia_device *link)
306 287
307 printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO - cs-detach!\n"); 288 printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO - cs-detach!\n");
308 289
309 DEBUG(0, "dio24_cs_detach(0x%p)\n", link); 290 dev_dbg(&link->dev, "dio24_cs_detach\n");
310 291
311 if (link->dev_node) { 292 if (link->dev_node) {
312 ((struct local_info_t *)link->priv)->stop = 1; 293 ((struct local_info_t *)link->priv)->stop = 1;
@@ -327,142 +308,85 @@ static void dio24_cs_detach(struct pcmcia_device *link)
327 308
328======================================================================*/ 309======================================================================*/
329 310
330static void dio24_config(struct pcmcia_device *link) 311static int dio24_pcmcia_config_loop(struct pcmcia_device *p_dev,
312 cistpl_cftable_entry_t *cfg,
313 cistpl_cftable_entry_t *dflt,
314 unsigned int vcc,
315 void *priv_data)
331{ 316{
332 struct local_info_t *dev = link->priv; 317 win_req_t *req = priv_data;
333 tuple_t tuple;
334 cisparse_t parse;
335 int last_ret;
336 u_char buf[64];
337 win_req_t req;
338 memreq_t map; 318 memreq_t map;
339 cistpl_cftable_entry_t dflt = { 0 };
340 319
341 printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO! - config\n"); 320 if (cfg->index == 0)
342 321 return -ENODEV;
343 DEBUG(0, "dio24_config(0x%p)\n", link);
344
345 /*
346 This reads the card's CONFIG tuple to find its configuration
347 registers.
348 */
349 tuple.DesiredTuple = CISTPL_CONFIG;
350 tuple.Attributes = 0;
351 tuple.TupleData = buf;
352 tuple.TupleDataMax = sizeof(buf);
353 tuple.TupleOffset = 0;
354
355 last_ret = pcmcia_get_first_tuple(link, &tuple);
356 if (last_ret) {
357 cs_error(link, GetFirstTuple, last_ret);
358 goto cs_failed;
359 }
360 322
361 last_ret = pcmcia_get_tuple_data(link, &tuple); 323 /* Does this card need audio output? */
362 if (last_ret) { 324 if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
363 cs_error(link, GetTupleData, last_ret); 325 p_dev->conf.Attributes |= CONF_ENABLE_SPKR;
364 goto cs_failed; 326 p_dev->conf.Status = CCSR_AUDIO_ENA;
365 } 327 }
366 328
367 last_ret = pcmcia_parse_tuple(&tuple, &parse); 329 /* Do we need to allocate an interrupt? */
368 if (last_ret) { 330 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
369 cs_error(link, ParseTuple, last_ret); 331 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
370 goto cs_failed; 332
333 /* IO window settings */
334 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
335 if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
336 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
337 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
338 if (!(io->flags & CISTPL_IO_8BIT))
339 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
340 if (!(io->flags & CISTPL_IO_16BIT))
341 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
342 p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
343 p_dev->io.BasePort1 = io->win[0].base;
344 p_dev->io.NumPorts1 = io->win[0].len;
345 if (io->nwin > 1) {
346 p_dev->io.Attributes2 = p_dev->io.Attributes1;
347 p_dev->io.BasePort2 = io->win[1].base;
348 p_dev->io.NumPorts2 = io->win[1].len;
349 }
350 /* This reserves IO space but doesn't actually enable it */
351 if (pcmcia_request_io(p_dev, &p_dev->io) != 0)
352 return -ENODEV;
371 } 353 }
372 link->conf.ConfigBase = parse.config.base;
373 link->conf.Present = parse.config.rmask[0];
374
375 /*
376 In this loop, we scan the CIS for configuration table entries,
377 each of which describes a valid card configuration, including
378 voltage, IO window, memory window, and interrupt settings.
379
380 We make no assumptions about the card to be configured: we use
381 just the information available in the CIS. In an ideal world,
382 this would work for any PCMCIA card, but it requires a complete
383 and accurate CIS. In practice, a driver usually "knows" most of
384 these things without consulting the CIS, and most client drivers
385 will only use the CIS to fill in implementation-defined details.
386 */
387 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
388 354
389 last_ret = pcmcia_get_first_tuple(link, &tuple); 355 if ((cfg->mem.nwin > 0) || (dflt->mem.nwin > 0)) {
390 if (last_ret) { 356 cistpl_mem_t *mem =
391 cs_error(link, GetFirstTuple, last_ret); 357 (cfg->mem.nwin) ? &cfg->mem : &dflt->mem;
392 goto cs_failed; 358 req->Attributes = WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM;
359 req->Attributes |= WIN_ENABLE;
360 req->Base = mem->win[0].host_addr;
361 req->Size = mem->win[0].len;
362 if (req->Size < 0x1000)
363 req->Size = 0x1000;
364 req->AccessSpeed = 0;
365 if (pcmcia_request_window(p_dev, req, &p_dev->win))
366 return -ENODEV;
367 map.Page = 0;
368 map.CardOffset = mem->win[0].card_addr;
369 if (pcmcia_map_mem_page(p_dev, p_dev->win, &map))
370 return -ENODEV;
393 } 371 }
394 while (1) { 372 /* If we got this far, we're cool! */
395 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); 373 return 0;
396 if (pcmcia_get_tuple_data(link, &tuple) != 0) 374}
397 goto next_entry;
398 if (pcmcia_parse_tuple(&tuple, &parse) != 0)
399 goto next_entry;
400
401 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
402 dflt = *cfg;
403 if (cfg->index == 0)
404 goto next_entry;
405 link->conf.ConfigIndex = cfg->index;
406
407 /* Does this card need audio output? */
408 if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
409 link->conf.Attributes |= CONF_ENABLE_SPKR;
410 link->conf.Status = CCSR_AUDIO_ENA;
411 }
412 375
413 /* Do we need to allocate an interrupt? */ 376static void dio24_config(struct pcmcia_device *link)
414 if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1) 377{
415 link->conf.Attributes |= CONF_ENABLE_IRQ; 378 struct local_info_t *dev = link->priv;
416 379 int ret;
417 /* IO window settings */ 380 win_req_t req;
418 link->io.NumPorts1 = link->io.NumPorts2 = 0;
419 if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
420 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io;
421 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
422 if (!(io->flags & CISTPL_IO_8BIT))
423 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
424 if (!(io->flags & CISTPL_IO_16BIT))
425 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
426 link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
427 link->io.BasePort1 = io->win[0].base;
428 link->io.NumPorts1 = io->win[0].len;
429 if (io->nwin > 1) {
430 link->io.Attributes2 = link->io.Attributes1;
431 link->io.BasePort2 = io->win[1].base;
432 link->io.NumPorts2 = io->win[1].len;
433 }
434 /* This reserves IO space but doesn't actually enable it */
435 if (pcmcia_request_io(link, &link->io) != 0)
436 goto next_entry;
437 }
438 381
439 if ((cfg->mem.nwin > 0) || (dflt.mem.nwin > 0)) { 382 printk(KERN_INFO "ni_daq_dio24: HOLA SOY YO! - config\n");
440 cistpl_mem_t *mem =
441 (cfg->mem.nwin) ? &cfg->mem : &dflt.mem;
442 req.Attributes = WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM;
443 req.Attributes |= WIN_ENABLE;
444 req.Base = mem->win[0].host_addr;
445 req.Size = mem->win[0].len;
446 if (req.Size < 0x1000)
447 req.Size = 0x1000;
448 req.AccessSpeed = 0;
449 if (pcmcia_request_window(&link, &req, &link->win))
450 goto next_entry;
451 map.Page = 0;
452 map.CardOffset = mem->win[0].card_addr;
453 if (pcmcia_map_mem_page(link->win, &map))
454 goto next_entry;
455 }
456 /* If we got this far, we're cool! */
457 break;
458 383
459next_entry: 384 dev_dbg(&link->dev, "dio24_config\n");
460 385
461 last_ret = pcmcia_get_next_tuple(link, &tuple); 386 ret = pcmcia_loop_config(link, dio24_pcmcia_config_loop, &req);
462 if (last_ret) { 387 if (ret) {
463 cs_error(link, GetNextTuple, last_ret); 388 dev_warn(&link->dev, "no configuration found\n");
464 goto cs_failed; 389 goto failed;
465 }
466 } 390 }
467 391
468 /* 392 /*
@@ -471,11 +395,9 @@ next_entry:
471 irq structure is initialized. 395 irq structure is initialized.
472 */ 396 */
473 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 397 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
474 last_ret = pcmcia_request_irq(link, &link->irq); 398 ret = pcmcia_request_irq(link, &link->irq);
475 if (last_ret) { 399 if (ret)
476 cs_error(link, RequestIRQ, last_ret); 400 goto failed;
477 goto cs_failed;
478 }
479 } 401 }
480 402
481 /* 403 /*
@@ -483,11 +405,9 @@ next_entry:
483 the I/O windows and the interrupt mapping, and putting the 405 the I/O windows and the interrupt mapping, and putting the
484 card and host interface into "Memory and IO" mode. 406 card and host interface into "Memory and IO" mode.
485 */ 407 */
486 last_ret = pcmcia_request_configuration(link, &link->conf); 408 ret = pcmcia_request_configuration(link, &link->conf);
487 if (last_ret) { 409 if (ret)
488 cs_error(link, RequestConfiguration, last_ret); 410 goto failed;
489 goto cs_failed;
490 }
491 411
492 /* 412 /*
493 At this point, the dev_node_t structure(s) need to be 413 At this point, the dev_node_t structure(s) need to be
@@ -515,7 +435,7 @@ next_entry:
515 435
516 return; 436 return;
517 437
518cs_failed: 438failed:
519 printk(KERN_INFO "Fallo"); 439 printk(KERN_INFO "Fallo");
520 dio24_release(link); 440 dio24_release(link);
521 441
@@ -523,7 +443,7 @@ cs_failed:
523 443
524static void dio24_release(struct pcmcia_device *link) 444static void dio24_release(struct pcmcia_device *link)
525{ 445{
526 DEBUG(0, "dio24_release(0x%p)\n", link); 446 dev_dbg(&link->dev, "dio24_release\n");
527 447
528 pcmcia_disable_device(link); 448 pcmcia_disable_device(link);
529} /* dio24_release */ 449} /* dio24_release */
@@ -582,14 +502,12 @@ struct pcmcia_driver dio24_cs_driver = {
582static int __init init_dio24_cs(void) 502static int __init init_dio24_cs(void)
583{ 503{
584 printk("ni_daq_dio24: HOLA SOY YO!\n"); 504 printk("ni_daq_dio24: HOLA SOY YO!\n");
585 DEBUG(0, "%s\n", version);
586 pcmcia_register_driver(&dio24_cs_driver); 505 pcmcia_register_driver(&dio24_cs_driver);
587 return 0; 506 return 0;
588} 507}
589 508
590static void __exit exit_dio24_cs(void) 509static void __exit exit_dio24_cs(void)
591{ 510{
592 DEBUG(0, "ni_dio24: unloading\n");
593 pcmcia_unregister_driver(&dio24_cs_driver); 511 pcmcia_unregister_driver(&dio24_cs_driver);
594} 512}
595 513
diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c
index a3053b8da1c6..7d514b3ee754 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_cs.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_cs.c
@@ -153,23 +153,6 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
153 return labpc_common_attach(dev, iobase, irq, 0); 153 return labpc_common_attach(dev, iobase, irq, 0);
154} 154}
155 155
156/*
157 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
158 you do not define PCMCIA_DEBUG at all, all the debug code will be
159 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
160 be present but disabled -- but it can then be enabled for specific
161 modules at load time with a 'pc_debug=#' option to insmod.
162*/
163#ifdef PCMCIA_DEBUG
164static int pc_debug = PCMCIA_DEBUG;
165module_param(pc_debug, int, 0644);
166#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
167static const char *version =
168 "ni_labpc.c, based on dummy_cs.c 1.31 2001/08/24 12:13:13";
169#else
170#define DEBUG(n, args...)
171#endif
172
173/*====================================================================*/ 156/*====================================================================*/
174 157
175/* 158/*
@@ -236,7 +219,7 @@ static int labpc_cs_attach(struct pcmcia_device *link)
236{ 219{
237 struct local_info_t *local; 220 struct local_info_t *local;
238 221
239 DEBUG(0, "labpc_cs_attach()\n"); 222 dev_dbg(&link->dev, "labpc_cs_attach()\n");
240 223
241 /* Allocate space for private device-specific data */ 224 /* Allocate space for private device-specific data */
242 local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL); 225 local = kzalloc(sizeof(struct local_info_t), GFP_KERNEL);
@@ -247,7 +230,6 @@ static int labpc_cs_attach(struct pcmcia_device *link)
247 230
248 /* Interrupt setup */ 231 /* Interrupt setup */
249 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_FORCED_PULSE; 232 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_FORCED_PULSE;
250 link->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_PULSE_ID;
251 link->irq.Handler = NULL; 233 link->irq.Handler = NULL;
252 234
253 /* 235 /*
@@ -278,7 +260,7 @@ static int labpc_cs_attach(struct pcmcia_device *link)
278 260
279static void labpc_cs_detach(struct pcmcia_device *link) 261static void labpc_cs_detach(struct pcmcia_device *link)
280{ 262{
281 DEBUG(0, "labpc_cs_detach(0x%p)\n", link); 263 dev_dbg(&link->dev, "labpc_cs_detach\n");
282 264
283 /* 265 /*
284 If the device is currently configured and active, we won't 266 If the device is currently configured and active, we won't
@@ -305,135 +287,84 @@ static void labpc_cs_detach(struct pcmcia_device *link)
305 287
306======================================================================*/ 288======================================================================*/
307 289
308static void labpc_config(struct pcmcia_device *link) 290static int labpc_pcmcia_config_loop(struct pcmcia_device *p_dev,
291 cistpl_cftable_entry_t *cfg,
292 cistpl_cftable_entry_t *dflt,
293 unsigned int vcc,
294 void *priv_data)
309{ 295{
310 struct local_info_t *dev = link->priv; 296 win_req_t *req = priv_data;
311 tuple_t tuple;
312 cisparse_t parse;
313 int last_ret;
314 u_char buf[64];
315 win_req_t req;
316 memreq_t map; 297 memreq_t map;
317 cistpl_cftable_entry_t dflt = { 0 };
318 298
319 DEBUG(0, "labpc_config(0x%p)\n", link); 299 if (cfg->index == 0)
300 return -ENODEV;
320 301
321 /* 302 /* Does this card need audio output? */
322 This reads the card's CONFIG tuple to find its configuration 303 if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
323 registers. 304 p_dev->conf.Attributes |= CONF_ENABLE_SPKR;
324 */ 305 p_dev->conf.Status = CCSR_AUDIO_ENA;
325 tuple.DesiredTuple = CISTPL_CONFIG;
326 tuple.Attributes = 0;
327 tuple.TupleData = buf;
328 tuple.TupleDataMax = sizeof(buf);
329 tuple.TupleOffset = 0;
330
331 last_ret = pcmcia_get_first_tuple(link, &tuple);
332 if (last_ret) {
333 cs_error(link, GetFirstTuple, last_ret);
334 goto cs_failed;
335 } 306 }
336 307
337 last_ret = pcmcia_get_tuple_data(link, &tuple); 308 /* Do we need to allocate an interrupt? */
338 if (last_ret) { 309 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
339 cs_error(link, GetTupleData, last_ret); 310 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
340 goto cs_failed; 311
312 /* IO window settings */
313 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
314 if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
315 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
316 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
317 if (!(io->flags & CISTPL_IO_8BIT))
318 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
319 if (!(io->flags & CISTPL_IO_16BIT))
320 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
321 p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
322 p_dev->io.BasePort1 = io->win[0].base;
323 p_dev->io.NumPorts1 = io->win[0].len;
324 if (io->nwin > 1) {
325 p_dev->io.Attributes2 = p_dev->io.Attributes1;
326 p_dev->io.BasePort2 = io->win[1].base;
327 p_dev->io.NumPorts2 = io->win[1].len;
328 }
329 /* This reserves IO space but doesn't actually enable it */
330 if (pcmcia_request_io(p_dev, &p_dev->io) != 0)
331 return -ENODEV;
341 } 332 }
342 333
343 last_ret = pcmcia_parse_tuple(&tuple, &parse); 334 if ((cfg->mem.nwin > 0) || (dflt->mem.nwin > 0)) {
344 if (last_ret) { 335 cistpl_mem_t *mem =
345 cs_error(link, ParseTuple, last_ret); 336 (cfg->mem.nwin) ? &cfg->mem : &dflt->mem;
346 goto cs_failed; 337 req->Attributes = WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM;
338 req->Attributes |= WIN_ENABLE;
339 req->Base = mem->win[0].host_addr;
340 req->Size = mem->win[0].len;
341 if (req->Size < 0x1000)
342 req->Size = 0x1000;
343 req->AccessSpeed = 0;
344 if (pcmcia_request_window(p_dev, req, &p_dev->win))
345 return -ENODEV;
346 map.Page = 0;
347 map.CardOffset = mem->win[0].card_addr;
348 if (pcmcia_map_mem_page(p_dev, p_dev->win, &map))
349 return -ENODEV;
347 } 350 }
348 link->conf.ConfigBase = parse.config.base; 351 /* If we got this far, we're cool! */
349 link->conf.Present = parse.config.rmask[0]; 352 return 0;
353}
350 354
351 /*
352 In this loop, we scan the CIS for configuration table entries,
353 each of which describes a valid card configuration, including
354 voltage, IO window, memory window, and interrupt settings.
355
356 We make no assumptions about the card to be configured: we use
357 just the information available in the CIS. In an ideal world,
358 this would work for any PCMCIA card, but it requires a complete
359 and accurate CIS. In practice, a driver usually "knows" most of
360 these things without consulting the CIS, and most client drivers
361 will only use the CIS to fill in implementation-defined details.
362 */
363 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
364 last_ret = pcmcia_get_first_tuple(link, &tuple);
365 if (last_ret) {
366 cs_error(link, GetFirstTuple, last_ret);
367 goto cs_failed;
368 }
369 while (1) {
370 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
371 if (pcmcia_get_tuple_data(link, &tuple))
372 goto next_entry;
373 if (pcmcia_parse_tuple(&tuple, &parse))
374 goto next_entry;
375
376 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
377 dflt = *cfg;
378 if (cfg->index == 0)
379 goto next_entry;
380 link->conf.ConfigIndex = cfg->index;
381
382 /* Does this card need audio output? */
383 if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
384 link->conf.Attributes |= CONF_ENABLE_SPKR;
385 link->conf.Status = CCSR_AUDIO_ENA;
386 }
387 355
388 /* Do we need to allocate an interrupt? */ 356static void labpc_config(struct pcmcia_device *link)
389 if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1) 357{
390 link->conf.Attributes |= CONF_ENABLE_IRQ; 358 struct local_info_t *dev = link->priv;
391 359 int ret;
392 /* IO window settings */ 360 win_req_t req;
393 link->io.NumPorts1 = link->io.NumPorts2 = 0;
394 if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
395 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io;
396 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
397 link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
398 link->io.BasePort1 = io->win[0].base;
399 link->io.NumPorts1 = io->win[0].len;
400 if (io->nwin > 1) {
401 link->io.Attributes2 = link->io.Attributes1;
402 link->io.BasePort2 = io->win[1].base;
403 link->io.NumPorts2 = io->win[1].len;
404 }
405 /* This reserves IO space but doesn't actually enable it */
406 if (pcmcia_request_io(link, &link->io))
407 goto next_entry;
408 }
409 361
410 if ((cfg->mem.nwin > 0) || (dflt.mem.nwin > 0)) { 362 dev_dbg(&link->dev, "labpc_config\n");
411 cistpl_mem_t *mem =
412 (cfg->mem.nwin) ? &cfg->mem : &dflt.mem;
413 req.Attributes = WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM;
414 req.Attributes |= WIN_ENABLE;
415 req.Base = mem->win[0].host_addr;
416 req.Size = mem->win[0].len;
417 if (req.Size < 0x1000)
418 req.Size = 0x1000;
419 req.AccessSpeed = 0;
420 link->win = (window_handle_t) link;
421 if (pcmcia_request_window(&link, &req, &link->win))
422 goto next_entry;
423 map.Page = 0;
424 map.CardOffset = mem->win[0].card_addr;
425 if (pcmcia_map_mem_page(link->win, &map))
426 goto next_entry;
427 }
428 /* If we got this far, we're cool! */
429 break;
430 363
431next_entry: 364 ret = pcmcia_loop_config(link, labpc_pcmcia_config_loop, &req);
432 last_ret = pcmcia_get_next_tuple(link, &tuple); 365 if (ret) {
433 if (last_ret) { 366 dev_warn(&link->dev, "no configuration found\n");
434 cs_error(link, GetNextTuple, last_ret); 367 goto failed;
435 goto cs_failed;
436 }
437 } 368 }
438 369
439 /* 370 /*
@@ -442,11 +373,9 @@ next_entry:
442 irq structure is initialized. 373 irq structure is initialized.
443 */ 374 */
444 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 375 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
445 last_ret = pcmcia_request_irq(link, &link->irq); 376 ret = pcmcia_request_irq(link, &link->irq);
446 if (last_ret) { 377 if (ret)
447 cs_error(link, RequestIRQ, last_ret); 378 goto failed;
448 goto cs_failed;
449 }
450 } 379 }
451 380
452 /* 381 /*
@@ -454,11 +383,9 @@ next_entry:
454 the I/O windows and the interrupt mapping, and putting the 383 the I/O windows and the interrupt mapping, and putting the
455 card and host interface into "Memory and IO" mode. 384 card and host interface into "Memory and IO" mode.
456 */ 385 */
457 last_ret = pcmcia_request_configuration(link, &link->conf); 386 ret = pcmcia_request_configuration(link, &link->conf);
458 if (last_ret) { 387 if (ret)
459 cs_error(link, RequestConfiguration, last_ret); 388 goto failed;
460 goto cs_failed;
461 }
462 389
463 /* 390 /*
464 At this point, the dev_node_t structure(s) need to be 391 At this point, the dev_node_t structure(s) need to be
@@ -486,14 +413,14 @@ next_entry:
486 413
487 return; 414 return;
488 415
489cs_failed: 416failed:
490 labpc_release(link); 417 labpc_release(link);
491 418
492} /* labpc_config */ 419} /* labpc_config */
493 420
494static void labpc_release(struct pcmcia_device *link) 421static void labpc_release(struct pcmcia_device *link)
495{ 422{
496 DEBUG(0, "labpc_release(0x%p)\n", link); 423 dev_dbg(&link->dev, "labpc_release\n");
497 424
498 pcmcia_disable_device(link); 425 pcmcia_disable_device(link);
499} /* labpc_release */ 426} /* labpc_release */
@@ -551,14 +478,12 @@ struct pcmcia_driver labpc_cs_driver = {
551 478
552static int __init init_labpc_cs(void) 479static int __init init_labpc_cs(void)
553{ 480{
554 DEBUG(0, "%s\n", version);
555 pcmcia_register_driver(&labpc_cs_driver); 481 pcmcia_register_driver(&labpc_cs_driver);
556 return 0; 482 return 0;
557} 483}
558 484
559static void __exit exit_labpc_cs(void) 485static void __exit exit_labpc_cs(void)
560{ 486{
561 DEBUG(0, "ni_labpc: unloading\n");
562 pcmcia_unregister_driver(&labpc_cs_driver); 487 pcmcia_unregister_driver(&labpc_cs_driver);
563} 488}
564 489
diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c
index 9aef87fc81dc..d692f4bb47ea 100644
--- a/drivers/staging/comedi/drivers/ni_mio_cs.c
+++ b/drivers/staging/comedi/drivers/ni_mio_cs.c
@@ -274,7 +274,6 @@ static int cs_attach(struct pcmcia_device *link)
274 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; 274 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
275 link->io.NumPorts1 = 16; 275 link->io.NumPorts1 = 16;
276 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 276 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
277 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
278 link->conf.Attributes = CONF_ENABLE_IRQ; 277 link->conf.Attributes = CONF_ENABLE_IRQ;
279 link->conf.IntType = INT_MEMORY_AND_IO; 278 link->conf.IntType = INT_MEMORY_AND_IO;
280 279
@@ -312,96 +311,47 @@ static int mio_cs_resume(struct pcmcia_device *link)
312 return 0; 311 return 0;
313} 312}
314 313
315static void mio_cs_config(struct pcmcia_device *link)
316{
317 tuple_t tuple;
318 u_short buf[128];
319 cisparse_t parse;
320 int manfid = 0, prodid = 0;
321 int ret;
322
323 DPRINTK("mio_cs_config(link=%p)\n", link);
324 314
325 tuple.TupleData = (cisdata_t *) buf; 315static int mio_pcmcia_config_loop(struct pcmcia_device *p_dev,
326 tuple.TupleOffset = 0; 316 cistpl_cftable_entry_t *cfg,
327 tuple.TupleDataMax = 255; 317 cistpl_cftable_entry_t *dflt,
328 tuple.Attributes = 0; 318 unsigned int vcc,
319 void *priv_data)
320{
321 int base, ret;
329 322
330 tuple.DesiredTuple = CISTPL_CONFIG; 323 p_dev->io.NumPorts1 = cfg->io.win[0].len;
331 ret = pcmcia_get_first_tuple(link, &tuple); 324 p_dev->io.IOAddrLines = cfg->io.flags & CISTPL_IO_LINES_MASK;
332 ret = pcmcia_get_tuple_data(link, &tuple); 325 p_dev->io.NumPorts2 = 0;
333 ret = pcmcia_parse_tuple(&tuple, &parse);
334 link->conf.ConfigBase = parse.config.base;
335 link->conf.Present = parse.config.rmask[0];
336 326
337#if 0 327 for (base = 0x000; base < 0x400; base += 0x20) {
338 tuple.DesiredTuple = CISTPL_LONGLINK_MFC; 328 p_dev->io.BasePort1 = base;
339 tuple.Attributes = TUPLE_RETURN_COMMON | TUPLE_RETURN_LINK; 329 ret = pcmcia_request_io(p_dev, &p_dev->io);
340 info->multi(first_tuple(link, &tuple, &parse) == 0); 330 if (!ret)
341#endif 331 return 0;
342
343 tuple.DesiredTuple = CISTPL_MANFID;
344 tuple.Attributes = TUPLE_RETURN_COMMON;
345 if ((pcmcia_get_first_tuple(link, &tuple) == 0) &&
346 (pcmcia_get_tuple_data(link, &tuple) == 0)) {
347 manfid = le16_to_cpu(buf[0]);
348 prodid = le16_to_cpu(buf[1]);
349 } 332 }
350 /* printk("manfid = 0x%04x, 0x%04x\n",manfid,prodid); */ 333 return -ENODEV;
334}
351 335
352 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
353 tuple.Attributes = 0;
354 ret = pcmcia_get_first_tuple(link, &tuple);
355 ret = pcmcia_get_tuple_data(link, &tuple);
356 ret = pcmcia_parse_tuple(&tuple, &parse);
357 336
358#if 0 337static void mio_cs_config(struct pcmcia_device *link)
359 printk(" index: 0x%x\n", parse.cftable_entry.index); 338{
360 printk(" flags: 0x%x\n", parse.cftable_entry.flags); 339 int ret;
361 printk(" io flags: 0x%x\n", parse.cftable_entry.io.flags);
362 printk(" io nwin: 0x%x\n", parse.cftable_entry.io.nwin);
363 printk(" io base: 0x%x\n", parse.cftable_entry.io.win[0].base);
364 printk(" io len: 0x%x\n", parse.cftable_entry.io.win[0].len);
365 printk(" irq1: 0x%x\n", parse.cftable_entry.irq.IRQInfo1);
366 printk(" irq2: 0x%x\n", parse.cftable_entry.irq.IRQInfo2);
367 printk(" mem flags: 0x%x\n", parse.cftable_entry.mem.flags);
368 printk(" mem nwin: 0x%x\n", parse.cftable_entry.mem.nwin);
369 printk(" subtuples: 0x%x\n", parse.cftable_entry.subtuples);
370#endif
371 340
372#if 0 341 DPRINTK("mio_cs_config(link=%p)\n", link);
373 link->io.NumPorts1 = 0x20;
374 link->io.IOAddrLines = 5;
375 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
376#endif
377 link->io.NumPorts1 = parse.cftable_entry.io.win[0].len;
378 link->io.IOAddrLines =
379 parse.cftable_entry.io.flags & CISTPL_IO_LINES_MASK;
380 link->io.NumPorts2 = 0;
381 342
382 { 343 ret = pcmcia_loop_config(link, mio_pcmcia_config_loop, NULL);
383 int base; 344 if (ret) {
384 for (base = 0x000; base < 0x400; base += 0x20) { 345 dev_warn(&link->dev, "no configuration found\n");
385 link->io.BasePort1 = base; 346 return;
386 ret = pcmcia_request_io(link, &link->io);
387 /* printk("RequestIO 0x%02x\n",ret); */
388 if (!ret)
389 break;
390 }
391 } 347 }
392 348
393 link->irq.IRQInfo1 = parse.cftable_entry.irq.IRQInfo1;
394 link->irq.IRQInfo2 = parse.cftable_entry.irq.IRQInfo2;
395 ret = pcmcia_request_irq(link, &link->irq); 349 ret = pcmcia_request_irq(link, &link->irq);
396 if (ret) { 350 if (ret) {
397 printk("pcmcia_request_irq() returned error: %i\n", ret); 351 printk("pcmcia_request_irq() returned error: %i\n", ret);
398 } 352 }
399 /* printk("RequestIRQ 0x%02x\n",ret); */
400
401 link->conf.ConfigIndex = 1;
402 353
403 ret = pcmcia_request_configuration(link, &link->conf); 354 ret = pcmcia_request_configuration(link, &link->conf);
404 /* printk("RequestConfiguration %d\n",ret); */
405 355
406 link->dev_node = &dev_node; 356 link->dev_node = &dev_node;
407} 357}
@@ -475,40 +425,17 @@ static int mio_cs_attach(struct comedi_device *dev, struct comedi_devconfig *it)
475 return 0; 425 return 0;
476} 426}
477 427
478static int get_prodid(struct comedi_device *dev, struct pcmcia_device *link)
479{
480 tuple_t tuple;
481 u_short buf[128];
482 int prodid = 0;
483
484 tuple.TupleData = (cisdata_t *) buf;
485 tuple.TupleOffset = 0;
486 tuple.TupleDataMax = 255;
487 tuple.DesiredTuple = CISTPL_MANFID;
488 tuple.Attributes = TUPLE_RETURN_COMMON;
489 if ((pcmcia_get_first_tuple(link, &tuple) == 0) &&
490 (pcmcia_get_tuple_data(link, &tuple) == 0)) {
491 prodid = le16_to_cpu(buf[1]);
492 }
493
494 return prodid;
495}
496
497static int ni_getboardtype(struct comedi_device *dev, 428static int ni_getboardtype(struct comedi_device *dev,
498 struct pcmcia_device *link) 429 struct pcmcia_device *link)
499{ 430{
500 int id;
501 int i; 431 int i;
502 432
503 id = get_prodid(dev, link);
504
505 for (i = 0; i < n_ni_boards; i++) { 433 for (i = 0; i < n_ni_boards; i++) {
506 if (ni_boards[i].device_id == id) { 434 if (ni_boards[i].device_id == link->card_id)
507 return i; 435 return i;
508 }
509 } 436 }
510 437
511 printk("unknown board 0x%04x -- pretend it is a ", id); 438 printk("unknown board 0x%04x -- pretend it is a ", link->card_id);
512 439
513 return 0; 440 return 0;
514} 441}
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index 344b82353e08..5256fd933162 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -55,23 +55,6 @@ Devices: [Quatech] DAQP-208 (daqp), DAQP-308
55#include <pcmcia/cisreg.h> 55#include <pcmcia/cisreg.h>
56#include <pcmcia/ds.h> 56#include <pcmcia/ds.h>
57 57
58/*
59 All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
60 you do not define PCMCIA_DEBUG at all, all the debug code will be
61 left out. If you compile with PCMCIA_DEBUG=0, the debug code will
62 be present but disabled -- but it can then be enabled for specific
63 modules at load time with a 'pc_debug=#' option to insmod.
64*/
65
66#ifdef PCMCIA_DEBUG
67static int pc_debug = PCMCIA_DEBUG;
68module_param(pc_debug, int, 0644);
69#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
70static char *version = "quatech_daqp_cs.c 1.10 2003/04/21 (Brent Baccala)";
71#else
72#define DEBUG(n, args...)
73#endif
74
75/* Maximum number of separate DAQP devices we'll allow */ 58/* Maximum number of separate DAQP devices we'll allow */
76#define MAX_DEV 4 59#define MAX_DEV 4
77 60
@@ -863,8 +846,6 @@ static int daqp_attach(struct comedi_device *dev, struct comedi_devconfig *it)
863{ 846{
864 int ret; 847 int ret;
865 struct local_info_t *local = dev_table[it->options[0]]; 848 struct local_info_t *local = dev_table[it->options[0]];
866 tuple_t tuple;
867 int i;
868 struct comedi_subdevice *s; 849 struct comedi_subdevice *s;
869 850
870 if (it->options[0] < 0 || it->options[0] >= MAX_DEV || !local) { 851 if (it->options[0] < 0 || it->options[0] >= MAX_DEV || !local) {
@@ -883,29 +864,10 @@ static int daqp_attach(struct comedi_device *dev, struct comedi_devconfig *it)
883 864
884 strcpy(local->board_name, "DAQP"); 865 strcpy(local->board_name, "DAQP");
885 dev->board_name = local->board_name; 866 dev->board_name = local->board_name;
886 867 if (local->link->prod_id[2]) {
887 tuple.DesiredTuple = CISTPL_VERS_1; 868 if (strncmp(local->link->prod_id[2], "DAQP", 4) == 0) {
888 if (pcmcia_get_first_tuple(local->link, &tuple) == 0) { 869 strncpy(local->board_name, local->link->prod_id[2],
889 u_char buf[128]; 870 sizeof(local->board_name));
890
891 buf[0] = buf[sizeof(buf) - 1] = 0;
892 tuple.TupleData = buf;
893 tuple.TupleDataMax = sizeof(buf);
894 tuple.TupleOffset = 2;
895 if (pcmcia_get_tuple_data(local->link, &tuple) == 0) {
896
897 for (i = 0; i < tuple.TupleDataLen - 4; i++)
898 if (buf[i] == 0)
899 break;
900 for (i++; i < tuple.TupleDataLen - 4; i++)
901 if (buf[i] == 0)
902 break;
903 i++;
904 if ((i < tuple.TupleDataLen - 4)
905 && (strncmp(buf + i, "DAQP", 4) == 0)) {
906 strncpy(local->board_name, buf + i,
907 sizeof(local->board_name));
908 }
909 } 871 }
910 } 872 }
911 873
@@ -1058,7 +1020,7 @@ static int daqp_cs_attach(struct pcmcia_device *link)
1058 struct local_info_t *local; 1020 struct local_info_t *local;
1059 int i; 1021 int i;
1060 1022
1061 DEBUG(0, "daqp_cs_attach()\n"); 1023 dev_dbg(&link->dev, "daqp_cs_attach()\n");
1062 1024
1063 for (i = 0; i < MAX_DEV; i++) 1025 for (i = 0; i < MAX_DEV; i++)
1064 if (dev_table[i] == NULL) 1026 if (dev_table[i] == NULL)
@@ -1079,10 +1041,8 @@ static int daqp_cs_attach(struct pcmcia_device *link)
1079 link->priv = local; 1041 link->priv = local;
1080 1042
1081 /* Interrupt setup */ 1043 /* Interrupt setup */
1082 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT; 1044 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
1083 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
1084 link->irq.Handler = daqp_interrupt; 1045 link->irq.Handler = daqp_interrupt;
1085 link->irq.Instance = local;
1086 1046
1087 /* 1047 /*
1088 General socket configuration defaults can go here. In this 1048 General socket configuration defaults can go here. In this
@@ -1112,7 +1072,7 @@ static void daqp_cs_detach(struct pcmcia_device *link)
1112{ 1072{
1113 struct local_info_t *dev = link->priv; 1073 struct local_info_t *dev = link->priv;
1114 1074
1115 DEBUG(0, "daqp_cs_detach(0x%p)\n", link); 1075 dev_dbg(&link->dev, "daqp_cs_detach\n");
1116 1076
1117 if (link->dev_node) { 1077 if (link->dev_node) {
1118 dev->stop = 1; 1078 dev->stop = 1;
@@ -1134,115 +1094,54 @@ static void daqp_cs_detach(struct pcmcia_device *link)
1134 1094
1135======================================================================*/ 1095======================================================================*/
1136 1096
1137static void daqp_cs_config(struct pcmcia_device *link)
1138{
1139 struct local_info_t *dev = link->priv;
1140 tuple_t tuple;
1141 cisparse_t parse;
1142 int last_ret;
1143 u_char buf[64];
1144
1145 DEBUG(0, "daqp_cs_config(0x%p)\n", link);
1146
1147 /*
1148 This reads the card's CONFIG tuple to find its configuration
1149 registers.
1150 */
1151 tuple.DesiredTuple = CISTPL_CONFIG;
1152 tuple.Attributes = 0;
1153 tuple.TupleData = buf;
1154 tuple.TupleDataMax = sizeof(buf);
1155 tuple.TupleOffset = 0;
1156
1157 last_ret = pcmcia_get_first_tuple(link, &tuple);
1158 if (last_ret) {
1159 cs_error(link, GetFirstTuple, last_ret);
1160 goto cs_failed;
1161 }
1162 1097
1163 last_ret = pcmcia_get_tuple_data(link, &tuple); 1098static int daqp_pcmcia_config_loop(struct pcmcia_device *p_dev,
1164 if (last_ret) { 1099 cistpl_cftable_entry_t *cfg,
1165 cs_error(link, GetTupleData, last_ret); 1100 cistpl_cftable_entry_t *dflt,
1166 goto cs_failed; 1101 unsigned int vcc,
1167 } 1102 void *priv_data)
1168 1103{
1169 last_ret = pcmcia_parse_tuple(&tuple, &parse); 1104 if (cfg->index == 0)
1170 if (last_ret) { 1105 return -ENODEV;
1171 cs_error(link, ParseTuple, last_ret);
1172 goto cs_failed;
1173 }
1174 link->conf.ConfigBase = parse.config.base;
1175 link->conf.Present = parse.config.rmask[0];
1176 1106
1177 /* 1107 /* Do we need to allocate an interrupt? */
1178 In this loop, we scan the CIS for configuration table entries, 1108 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
1179 each of which describes a valid card configuration, including 1109 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
1180 voltage, IO window, memory window, and interrupt settings. 1110
1181 1111 /* IO window settings */
1182 We make no assumptions about the card to be configured: we use 1112 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
1183 just the information available in the CIS. In an ideal world, 1113 if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
1184 this would work for any PCMCIA card, but it requires a complete 1114 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
1185 and accurate CIS. In practice, a driver usually "knows" most of 1115 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
1186 these things without consulting the CIS, and most client drivers 1116 if (!(io->flags & CISTPL_IO_8BIT))
1187 will only use the CIS to fill in implementation-defined details. 1117 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
1188 */ 1118 if (!(io->flags & CISTPL_IO_16BIT))
1189 tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; 1119 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
1190 last_ret = pcmcia_get_first_tuple(link, &tuple); 1120 p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
1191 if (last_ret) { 1121 p_dev->io.BasePort1 = io->win[0].base;
1192 cs_error(link, GetFirstTuple, last_ret); 1122 p_dev->io.NumPorts1 = io->win[0].len;
1193 goto cs_failed; 1123 if (io->nwin > 1) {
1124 p_dev->io.Attributes2 = p_dev->io.Attributes1;
1125 p_dev->io.BasePort2 = io->win[1].base;
1126 p_dev->io.NumPorts2 = io->win[1].len;
1127 }
1194 } 1128 }
1195 1129
1196 while (1) { 1130 /* This reserves IO space but doesn't actually enable it */
1197 cistpl_cftable_entry_t dflt = { 0 }; 1131 return pcmcia_request_io(p_dev, &p_dev->io);
1198 cistpl_cftable_entry_t *cfg = &(parse.cftable_entry); 1132}
1199 if (pcmcia_get_tuple_data(link, &tuple))
1200 goto next_entry;
1201 if (pcmcia_parse_tuple(&tuple, &parse))
1202 goto next_entry;
1203
1204 if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
1205 dflt = *cfg;
1206 if (cfg->index == 0)
1207 goto next_entry;
1208 link->conf.ConfigIndex = cfg->index;
1209
1210 /* Do we need to allocate an interrupt? */
1211 if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1)
1212 link->conf.Attributes |= CONF_ENABLE_IRQ;
1213
1214 /* IO window settings */
1215 link->io.NumPorts1 = link->io.NumPorts2 = 0;
1216 if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
1217 cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io;
1218 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
1219 if (!(io->flags & CISTPL_IO_8BIT))
1220 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
1221 if (!(io->flags & CISTPL_IO_16BIT))
1222 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
1223 link->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
1224 link->io.BasePort1 = io->win[0].base;
1225 link->io.NumPorts1 = io->win[0].len;
1226 if (io->nwin > 1) {
1227 link->io.Attributes2 = link->io.Attributes1;
1228 link->io.BasePort2 = io->win[1].base;
1229 link->io.NumPorts2 = io->win[1].len;
1230 }
1231 }
1232 1133
1233 /* This reserves IO space but doesn't actually enable it */ 1134static void daqp_cs_config(struct pcmcia_device *link)
1234 if (pcmcia_request_io(link, &link->io)) 1135{
1235 goto next_entry; 1136 struct local_info_t *dev = link->priv;
1137 int ret;
1236 1138
1237 /* If we got this far, we're cool! */ 1139 dev_dbg(&link->dev, "daqp_cs_config\n");
1238 break;
1239 1140
1240next_entry: 1141 ret = pcmcia_loop_config(link, daqp_pcmcia_config_loop, NULL);
1241 last_ret = pcmcia_get_next_tuple(link, &tuple); 1142 if (ret) {
1242 if (last_ret) { 1143 dev_warn(&link->dev, "no configuration found\n");
1243 cs_error(link, GetNextTuple, last_ret); 1144 goto failed;
1244 goto cs_failed;
1245 }
1246 } 1145 }
1247 1146
1248 /* 1147 /*
@@ -1251,11 +1150,9 @@ next_entry:
1251 irq structure is initialized. 1150 irq structure is initialized.
1252 */ 1151 */
1253 if (link->conf.Attributes & CONF_ENABLE_IRQ) { 1152 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
1254 last_ret = pcmcia_request_irq(link, &link->irq); 1153 ret = pcmcia_request_irq(link, &link->irq);
1255 if (last_ret) { 1154 if (ret)
1256 cs_error(link, RequestIRQ, last_ret); 1155 goto failed;
1257 goto cs_failed;
1258 }
1259 } 1156 }
1260 1157
1261 /* 1158 /*
@@ -1263,11 +1160,9 @@ next_entry:
1263 the I/O windows and the interrupt mapping, and putting the 1160 the I/O windows and the interrupt mapping, and putting the
1264 card and host interface into "Memory and IO" mode. 1161 card and host interface into "Memory and IO" mode.
1265 */ 1162 */
1266 last_ret = pcmcia_request_configuration(link, &link->conf); 1163 ret = pcmcia_request_configuration(link, &link->conf);
1267 if (last_ret) { 1164 if (ret)
1268 cs_error(link, RequestConfiguration, last_ret); 1165 goto failed;
1269 goto cs_failed;
1270 }
1271 1166
1272 /* 1167 /*
1273 At this point, the dev_node_t structure(s) need to be 1168 At this point, the dev_node_t structure(s) need to be
@@ -1296,14 +1191,14 @@ next_entry:
1296 1191
1297 return; 1192 return;
1298 1193
1299cs_failed: 1194failed:
1300 daqp_cs_release(link); 1195 daqp_cs_release(link);
1301 1196
1302} /* daqp_cs_config */ 1197} /* daqp_cs_config */
1303 1198
1304static void daqp_cs_release(struct pcmcia_device *link) 1199static void daqp_cs_release(struct pcmcia_device *link)
1305{ 1200{
1306 DEBUG(0, "daqp_cs_release(0x%p)\n", link); 1201 dev_dbg(&link->dev, "daqp_cs_release\n");
1307 1202
1308 pcmcia_disable_device(link); 1203 pcmcia_disable_device(link);
1309} /* daqp_cs_release */ 1204} /* daqp_cs_release */
@@ -1363,7 +1258,6 @@ struct pcmcia_driver daqp_cs_driver = {
1363 1258
1364int __init init_module(void) 1259int __init init_module(void)
1365{ 1260{
1366 DEBUG(0, "%s\n", version);
1367 pcmcia_register_driver(&daqp_cs_driver); 1261 pcmcia_register_driver(&daqp_cs_driver);
1368 comedi_driver_register(&driver_daqp); 1262 comedi_driver_register(&driver_daqp);
1369 return 0; 1263 return 0;
@@ -1371,7 +1265,6 @@ int __init init_module(void)
1371 1265
1372void __exit cleanup_module(void) 1266void __exit cleanup_module(void)
1373{ 1267{
1374 DEBUG(0, "daqp_cs: unloading\n");
1375 comedi_driver_unregister(&driver_daqp); 1268 comedi_driver_unregister(&driver_daqp);
1376 pcmcia_unregister_driver(&daqp_cs_driver); 1269 pcmcia_unregister_driver(&daqp_cs_driver);
1377} 1270}
diff --git a/drivers/staging/go7007/s2250-board.c b/drivers/staging/go7007/s2250-board.c
index 8c85a9c3665a..f4a6541c3e60 100644
--- a/drivers/staging/go7007/s2250-board.c
+++ b/drivers/staging/go7007/s2250-board.c
@@ -261,7 +261,7 @@ static int read_reg_fp(struct i2c_client *client, u16 addr, u16 *val)
261 261
262 memset(buf, 0xcd, 6); 262 memset(buf, 0xcd, 6);
263 usb = go->hpi_context; 263 usb = go->hpi_context;
264 if (down_interruptible(&usb->i2c_lock) != 0) { 264 if (mutex_lock_interruptible(&usb->i2c_lock) != 0) {
265 printk(KERN_INFO "i2c lock failed\n"); 265 printk(KERN_INFO "i2c lock failed\n");
266 kfree(buf); 266 kfree(buf);
267 return -EINTR; 267 return -EINTR;
@@ -270,7 +270,7 @@ static int read_reg_fp(struct i2c_client *client, u16 addr, u16 *val)
270 kfree(buf); 270 kfree(buf);
271 return -EFAULT; 271 return -EFAULT;
272 } 272 }
273 up(&usb->i2c_lock); 273 mutex_unlock(&usb->i2c_lock);
274 274
275 *val = (buf[0] << 8) | buf[1]; 275 *val = (buf[0] << 8) | buf[1];
276 kfree(buf); 276 kfree(buf);
diff --git a/drivers/staging/go7007/s2250-loader.h b/drivers/staging/go7007/s2250-loader.h
new file mode 100644
index 000000000000..b7c301af16cc
--- /dev/null
+++ b/drivers/staging/go7007/s2250-loader.h
@@ -0,0 +1,24 @@
1/*
2 * Copyright (C) 2005-2006 Micronas USA Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License (Version 2) as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
16 */
17
18#ifndef _S2250_LOADER_H_
19#define _S2250_LOADER_H_
20
21extern int s2250loader_init(void);
22extern void s2250loader_cleanup(void);
23
24#endif
diff --git a/drivers/staging/hv/BlkVsc.c b/drivers/staging/hv/BlkVsc.c
index 51aa861292fc..a48ee3a12646 100644
--- a/drivers/staging/hv/BlkVsc.c
+++ b/drivers/staging/hv/BlkVsc.c
@@ -16,6 +16,7 @@
16 * Place - Suite 330, Boston, MA 02111-1307 USA. 16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * 17 *
18 * Authors: 18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com> 20 * Hank Janssen <hjanssen@microsoft.com>
20 * 21 *
21 */ 22 */
diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c
index d649ee169d95..746370e82115 100644
--- a/drivers/staging/hv/Channel.c
+++ b/drivers/staging/hv/Channel.c
@@ -611,7 +611,7 @@ void VmbusChannelClose(struct vmbus_channel *Channel)
611 611
612 /* Stop callback and cancel the timer asap */ 612 /* Stop callback and cancel the timer asap */
613 Channel->OnChannelCallback = NULL; 613 Channel->OnChannelCallback = NULL;
614 del_timer(&Channel->poll_timer); 614 del_timer_sync(&Channel->poll_timer);
615 615
616 /* Send a closing message */ 616 /* Send a closing message */
617 info = kmalloc(sizeof(*info) + 617 info = kmalloc(sizeof(*info) +
@@ -978,14 +978,10 @@ void VmbusChannelOnChannelEvent(struct vmbus_channel *Channel)
978{ 978{
979 DumpVmbusChannel(Channel); 979 DumpVmbusChannel(Channel);
980 ASSERT(Channel->OnChannelCallback); 980 ASSERT(Channel->OnChannelCallback);
981#ifdef ENABLE_POLLING 981
982 del_timer(&Channel->poll_timer);
983 Channel->OnChannelCallback(Channel->ChannelCallbackContext);
984 channel->poll_timer.expires(jiffies + usecs_to_jiffies(100);
985 add_timer(&channel->poll_timer);
986#else
987 Channel->OnChannelCallback(Channel->ChannelCallbackContext); 982 Channel->OnChannelCallback(Channel->ChannelCallbackContext);
988#endif 983
984 mod_timer(&Channel->poll_timer, jiffies + usecs_to_jiffies(100));
989} 985}
990 986
991/** 987/**
@@ -997,10 +993,6 @@ void VmbusChannelOnTimer(unsigned long data)
997 993
998 if (channel->OnChannelCallback) { 994 if (channel->OnChannelCallback) {
999 channel->OnChannelCallback(channel->ChannelCallbackContext); 995 channel->OnChannelCallback(channel->ChannelCallbackContext);
1000#ifdef ENABLE_POLLING
1001 channel->poll_timer.expires(jiffies + usecs_to_jiffies(100);
1002 add_timer(&channel->poll_timer);
1003#endif
1004 } 996 }
1005} 997}
1006 998
diff --git a/drivers/staging/hv/ChannelMgmt.c b/drivers/staging/hv/ChannelMgmt.c
index 3db62caedcff..ef38467ed4e2 100644
--- a/drivers/staging/hv/ChannelMgmt.c
+++ b/drivers/staging/hv/ChannelMgmt.c
@@ -119,7 +119,7 @@ static inline void ReleaseVmbusChannel(void *context)
119 */ 119 */
120void FreeVmbusChannel(struct vmbus_channel *Channel) 120void FreeVmbusChannel(struct vmbus_channel *Channel)
121{ 121{
122 del_timer(&Channel->poll_timer); 122 del_timer_sync(&Channel->poll_timer);
123 123
124 /* 124 /*
125 * We have to release the channel's workqueue/thread in the vmbus's 125 * We have to release the channel's workqueue/thread in the vmbus's
diff --git a/drivers/staging/hv/NetVsc.c b/drivers/staging/hv/NetVsc.c
index d384c0ddf069..1c717f9a554e 100644
--- a/drivers/staging/hv/NetVsc.c
+++ b/drivers/staging/hv/NetVsc.c
@@ -15,6 +15,7 @@
15 * Place - Suite 330, Boston, MA 02111-1307 USA. 15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 * 16 *
17 * Authors: 17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com> 19 * Hank Janssen <hjanssen@microsoft.com>
19 */ 20 */
20#include <linux/kernel.h> 21#include <linux/kernel.h>
diff --git a/drivers/staging/hv/NetVsc.h b/drivers/staging/hv/NetVsc.h
index 3e7112f7c755..6e0e03494126 100644
--- a/drivers/staging/hv/NetVsc.h
+++ b/drivers/staging/hv/NetVsc.h
@@ -16,6 +16,7 @@
16 * Place - Suite 330, Boston, MA 02111-1307 USA. 16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * 17 *
18 * Authors: 18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com> 20 * Hank Janssen <hjanssen@microsoft.com>
20 * 21 *
21 */ 22 */
diff --git a/drivers/staging/hv/StorVsc.c b/drivers/staging/hv/StorVsc.c
index 14015c927940..2f7c425896f7 100644
--- a/drivers/staging/hv/StorVsc.c
+++ b/drivers/staging/hv/StorVsc.c
@@ -196,7 +196,7 @@ static int StorVscChannelInit(struct hv_device *Device)
196 * Now, initiate the vsc/vsp initialization protocol on the open 196 * Now, initiate the vsc/vsp initialization protocol on the open
197 * channel 197 * channel
198 */ 198 */
199 memset(request, sizeof(struct storvsc_request_extension), 0); 199 memset(request, 0, sizeof(struct storvsc_request_extension));
200 request->WaitEvent = osd_WaitEventCreate(); 200 request->WaitEvent = osd_WaitEventCreate();
201 201
202 vstorPacket->Operation = VStorOperationBeginInitialization; 202 vstorPacket->Operation = VStorOperationBeginInitialization;
@@ -233,7 +233,7 @@ static int StorVscChannelInit(struct hv_device *Device)
233 DPRINT_INFO(STORVSC, "QUERY_PROTOCOL_VERSION_OPERATION..."); 233 DPRINT_INFO(STORVSC, "QUERY_PROTOCOL_VERSION_OPERATION...");
234 234
235 /* reuse the packet for version range supported */ 235 /* reuse the packet for version range supported */
236 memset(vstorPacket, sizeof(struct vstor_packet), 0); 236 memset(vstorPacket, 0, sizeof(struct vstor_packet));
237 vstorPacket->Operation = VStorOperationQueryProtocolVersion; 237 vstorPacket->Operation = VStorOperationQueryProtocolVersion;
238 vstorPacket->Flags = REQUEST_COMPLETION_FLAG; 238 vstorPacket->Flags = REQUEST_COMPLETION_FLAG;
239 239
@@ -266,7 +266,7 @@ static int StorVscChannelInit(struct hv_device *Device)
266 /* Query channel properties */ 266 /* Query channel properties */
267 DPRINT_INFO(STORVSC, "QUERY_PROPERTIES_OPERATION..."); 267 DPRINT_INFO(STORVSC, "QUERY_PROPERTIES_OPERATION...");
268 268
269 memset(vstorPacket, sizeof(struct vstor_packet), 0); 269 memset(vstorPacket, 0, sizeof(struct vstor_packet));
270 vstorPacket->Operation = VStorOperationQueryProperties; 270 vstorPacket->Operation = VStorOperationQueryProperties;
271 vstorPacket->Flags = REQUEST_COMPLETION_FLAG; 271 vstorPacket->Flags = REQUEST_COMPLETION_FLAG;
272 vstorPacket->StorageChannelProperties.PortNumber = 272 vstorPacket->StorageChannelProperties.PortNumber =
@@ -305,7 +305,7 @@ static int StorVscChannelInit(struct hv_device *Device)
305 305
306 DPRINT_INFO(STORVSC, "END_INITIALIZATION_OPERATION..."); 306 DPRINT_INFO(STORVSC, "END_INITIALIZATION_OPERATION...");
307 307
308 memset(vstorPacket, sizeof(struct vstor_packet), 0); 308 memset(vstorPacket, 0, sizeof(struct vstor_packet));
309 vstorPacket->Operation = VStorOperationEndInitialization; 309 vstorPacket->Operation = VStorOperationEndInitialization;
310 vstorPacket->Flags = REQUEST_COMPLETION_FLAG; 310 vstorPacket->Flags = REQUEST_COMPLETION_FLAG;
311 311
@@ -508,7 +508,7 @@ static int StorVscConnectToVsp(struct hv_device *Device)
508 int ret; 508 int ret;
509 509
510 storDriver = (struct storvsc_driver_object *)Device->Driver; 510 storDriver = (struct storvsc_driver_object *)Device->Driver;
511 memset(&props, sizeof(struct vmstorage_channel_properties), 0); 511 memset(&props, 0, sizeof(struct vmstorage_channel_properties));
512 512
513 /* Open the channel */ 513 /* Open the channel */
514 ret = Device->Driver->VmbusChannelInterface.Open(Device, 514 ret = Device->Driver->VmbusChannelInterface.Open(Device,
diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
index 99c49261a8b4..62b282844a53 100644
--- a/drivers/staging/hv/blkvsc_drv.c
+++ b/drivers/staging/hv/blkvsc_drv.c
@@ -15,6 +15,7 @@
15 * Place - Suite 330, Boston, MA 02111-1307 USA. 15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 * 16 *
17 * Authors: 17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com> 19 * Hank Janssen <hjanssen@microsoft.com>
19 */ 20 */
20#include <linux/init.h> 21#include <linux/init.h>
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
index 3192d50f7251..0d7459e2d036 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -15,6 +15,7 @@
15 * Place - Suite 330, Boston, MA 02111-1307 USA. 15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 * 16 *
17 * Authors: 17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com> 19 * Hank Janssen <hjanssen@microsoft.com>
19 */ 20 */
20#include <linux/init.h> 21#include <linux/init.h>
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 42230e62a222..31a58e508924 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -170,7 +170,7 @@ static u32 cvm_oct_get_link(struct net_device *dev)
170 return ret; 170 return ret;
171} 171}
172 172
173struct const ethtool_ops cvm_oct_ethtool_ops = { 173const struct ethtool_ops cvm_oct_ethtool_ops = {
174 .get_drvinfo = cvm_oct_get_drvinfo, 174 .get_drvinfo = cvm_oct_get_drvinfo,
175 .get_settings = cvm_oct_get_settings, 175 .get_settings = cvm_oct_get_settings,
176 .set_settings = cvm_oct_set_settings, 176 .set_settings = cvm_oct_set_settings,
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index 66190b0cb68f..00dc0f4bad19 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -317,6 +317,6 @@ void cvm_oct_spi_uninit(struct net_device *dev)
317 cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0); 317 cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
318 cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0); 318 cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);
319 } 319 }
320 free_irq(8 + 46, &number_spi_ports); 320 free_irq(OCTEON_IRQ_RML, &number_spi_ports);
321 } 321 }
322} 322}
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index b8479517dce2..492c5029992d 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -111,6 +111,16 @@ MODULE_PARM_DESC(disable_core_queueing, "\n"
111 "\tallows packets to be sent without lock contention in the packet\n" 111 "\tallows packets to be sent without lock contention in the packet\n"
112 "\tscheduler resulting in some cases in improved throughput.\n"); 112 "\tscheduler resulting in some cases in improved throughput.\n");
113 113
114
115/*
116 * The offset from mac_addr_base that should be used for the next port
117 * that is configured. By convention, if any mgmt ports exist on the
118 * chip, they get the first mac addresses, The ports controlled by
119 * this driver are numbered sequencially following any mgmt addresses
120 * that may exist.
121 */
122static unsigned int cvm_oct_mac_addr_offset;
123
114/** 124/**
115 * Periodic timer to check auto negotiation 125 * Periodic timer to check auto negotiation
116 */ 126 */
@@ -474,16 +484,30 @@ static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
474 */ 484 */
475int cvm_oct_common_init(struct net_device *dev) 485int cvm_oct_common_init(struct net_device *dev)
476{ 486{
477 static int count;
478 char mac[8] = { 0x00, 0x00,
479 octeon_bootinfo->mac_addr_base[0],
480 octeon_bootinfo->mac_addr_base[1],
481 octeon_bootinfo->mac_addr_base[2],
482 octeon_bootinfo->mac_addr_base[3],
483 octeon_bootinfo->mac_addr_base[4],
484 octeon_bootinfo->mac_addr_base[5] + count
485 };
486 struct octeon_ethernet *priv = netdev_priv(dev); 487 struct octeon_ethernet *priv = netdev_priv(dev);
488 struct sockaddr sa;
489 u64 mac = ((u64)(octeon_bootinfo->mac_addr_base[0] & 0xff) << 40) |
490 ((u64)(octeon_bootinfo->mac_addr_base[1] & 0xff) << 32) |
491 ((u64)(octeon_bootinfo->mac_addr_base[2] & 0xff) << 24) |
492 ((u64)(octeon_bootinfo->mac_addr_base[3] & 0xff) << 16) |
493 ((u64)(octeon_bootinfo->mac_addr_base[4] & 0xff) << 8) |
494 (u64)(octeon_bootinfo->mac_addr_base[5] & 0xff);
495
496 mac += cvm_oct_mac_addr_offset;
497 sa.sa_data[0] = (mac >> 40) & 0xff;
498 sa.sa_data[1] = (mac >> 32) & 0xff;
499 sa.sa_data[2] = (mac >> 24) & 0xff;
500 sa.sa_data[3] = (mac >> 16) & 0xff;
501 sa.sa_data[4] = (mac >> 8) & 0xff;
502 sa.sa_data[5] = mac & 0xff;
503
504 if (cvm_oct_mac_addr_offset >= octeon_bootinfo->mac_addr_count)
505 printk(KERN_DEBUG "%s: Using MAC outside of the assigned range:"
506 " %02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
507 sa.sa_data[0] & 0xff, sa.sa_data[1] & 0xff,
508 sa.sa_data[2] & 0xff, sa.sa_data[3] & 0xff,
509 sa.sa_data[4] & 0xff, sa.sa_data[5] & 0xff);
510 cvm_oct_mac_addr_offset++;
487 511
488 /* 512 /*
489 * Force the interface to use the POW send if always_use_pow 513 * Force the interface to use the POW send if always_use_pow
@@ -496,14 +520,12 @@ int cvm_oct_common_init(struct net_device *dev)
496 if (priv->queue != -1 && USE_HW_TCPUDP_CHECKSUM) 520 if (priv->queue != -1 && USE_HW_TCPUDP_CHECKSUM)
497 dev->features |= NETIF_F_IP_CSUM; 521 dev->features |= NETIF_F_IP_CSUM;
498 522
499 count++;
500
501 /* We do our own locking, Linux doesn't need to */ 523 /* We do our own locking, Linux doesn't need to */
502 dev->features |= NETIF_F_LLTX; 524 dev->features |= NETIF_F_LLTX;
503 SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops); 525 SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
504 526
505 cvm_oct_mdio_setup_device(dev); 527 cvm_oct_mdio_setup_device(dev);
506 dev->netdev_ops->ndo_set_mac_address(dev, mac); 528 dev->netdev_ops->ndo_set_mac_address(dev, &sa);
507 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu); 529 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
508 530
509 /* 531 /*
@@ -620,6 +642,13 @@ static int __init cvm_oct_init_module(void)
620 642
621 pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION); 643 pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);
622 644
645 if (OCTEON_IS_MODEL(OCTEON_CN52XX))
646 cvm_oct_mac_addr_offset = 2; /* First two are the mgmt ports. */
647 else if (OCTEON_IS_MODEL(OCTEON_CN56XX))
648 cvm_oct_mac_addr_offset = 1; /* First one is the mgmt port. */
649 else
650 cvm_oct_mac_addr_offset = 0;
651
623 cvm_oct_proc_initialize(); 652 cvm_oct_proc_initialize();
624 cvm_oct_rx_initialize(); 653 cvm_oct_rx_initialize();
625 cvm_oct_configure_common_hw(); 654 cvm_oct_configure_common_hw();
diff --git a/drivers/staging/rtl8187se/TODO b/drivers/staging/rtl8187se/TODO
index c09a9160739d..a762e79873e9 100644
--- a/drivers/staging/rtl8187se/TODO
+++ b/drivers/staging/rtl8187se/TODO
@@ -11,5 +11,4 @@ TODO:
11- sparse fixes 11- sparse fixes
12- integrate with drivers/net/wireless/rtl818x 12- integrate with drivers/net/wireless/rtl818x
13 13
14Please send any patches to Greg Kroah-Hartman <greg@kroah.com> and 14Please send any patches to Greg Kroah-Hartman <greg@kroah.com>.
15Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>.
diff --git a/drivers/staging/rtl8192su/TODO b/drivers/staging/rtl8192su/TODO
index b13be9edb278..f11eec700030 100644
--- a/drivers/staging/rtl8192su/TODO
+++ b/drivers/staging/rtl8192su/TODO
@@ -14,5 +14,4 @@ TODO:
14- sparse fixes 14- sparse fixes
15- integrate with drivers/net/wireless/rtl818x 15- integrate with drivers/net/wireless/rtl818x
16 16
17Please send any patches to Greg Kroah-Hartman <greg@kroah.com> and 17Please send any patches to Greg Kroah-Hartman <greg@kroah.com>.
18Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>.
diff --git a/drivers/staging/vt6655/TODO b/drivers/staging/vt6655/TODO
index 8462cd17eb61..cb04aaafc46f 100644
--- a/drivers/staging/vt6655/TODO
+++ b/drivers/staging/vt6655/TODO
@@ -16,6 +16,5 @@ TODO:
16- sparse fixes 16- sparse fixes
17- integrate with drivers/net/wireless 17- integrate with drivers/net/wireless
18 18
19Please send any patches to Greg Kroah-Hartman <greg@kroah.com>, 19Please send any patches to Greg Kroah-Hartman <greg@kroah.com>
20Forest Bond <forest@alittletooquiet.net> and Bartlomiej Zolnierkiewicz 20and Forest Bond <forest@alittletooquiet.net>.
21<bzolnier@gmail.com>.
diff --git a/drivers/staging/vt6656/TODO b/drivers/staging/vt6656/TODO
index 17cf50c6735e..a318995ba07f 100644
--- a/drivers/staging/vt6656/TODO
+++ b/drivers/staging/vt6656/TODO
@@ -15,6 +15,5 @@ TODO:
15- sparse fixes 15- sparse fixes
16- integrate with drivers/net/wireless 16- integrate with drivers/net/wireless
17 17
18Please send any patches to Greg Kroah-Hartman <greg@kroah.com>, 18Please send any patches to Greg Kroah-Hartman <greg@kroah.com>
19Forest Bond <forest@alittletooquiet.net> and Bartlomiej Zolnierkiewicz 19and Forest Bond <forest@alittletooquiet.net>.
20<bzolnier@gmail.com>.
diff --git a/drivers/telephony/ixj_pcmcia.c b/drivers/telephony/ixj_pcmcia.c
index 347c3ed1d9f1..d442fd35620a 100644
--- a/drivers/telephony/ixj_pcmcia.c
+++ b/drivers/telephony/ixj_pcmcia.c
@@ -19,13 +19,6 @@
19 * PCMCIA service support for Quicknet cards 19 * PCMCIA service support for Quicknet cards
20 */ 20 */
21 21
22#ifdef PCMCIA_DEBUG
23static int pc_debug = PCMCIA_DEBUG;
24module_param(pc_debug, int, 0644);
25#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
26#else
27#define DEBUG(n, args...)
28#endif
29 22
30typedef struct ixj_info_t { 23typedef struct ixj_info_t {
31 int ndev; 24 int ndev;
@@ -39,7 +32,7 @@ static void ixj_cs_release(struct pcmcia_device * link);
39 32
40static int ixj_probe(struct pcmcia_device *p_dev) 33static int ixj_probe(struct pcmcia_device *p_dev)
41{ 34{
42 DEBUG(0, "ixj_attach()\n"); 35 dev_dbg(&p_dev->dev, "ixj_attach()\n");
43 /* Create new ixj device */ 36 /* Create new ixj device */
44 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 37 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
45 p_dev->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 38 p_dev->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
@@ -55,33 +48,30 @@ static int ixj_probe(struct pcmcia_device *p_dev)
55 48
56static void ixj_detach(struct pcmcia_device *link) 49static void ixj_detach(struct pcmcia_device *link)
57{ 50{
58 DEBUG(0, "ixj_detach(0x%p)\n", link); 51 dev_dbg(&link->dev, "ixj_detach\n");
59 52
60 ixj_cs_release(link); 53 ixj_cs_release(link);
61 54
62 kfree(link->priv); 55 kfree(link->priv);
63} 56}
64 57
65#define CS_CHECK(fn, ret) \
66do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
67
68static void ixj_get_serial(struct pcmcia_device * link, IXJ * j) 58static void ixj_get_serial(struct pcmcia_device * link, IXJ * j)
69{ 59{
70 char *str; 60 char *str;
71 int i, place; 61 int i, place;
72 DEBUG(0, "ixj_get_serial(0x%p)\n", link); 62 dev_dbg(&link->dev, "ixj_get_serial\n");
73 63
74 str = link->prod_id[0]; 64 str = link->prod_id[0];
75 if (!str) 65 if (!str)
76 goto cs_failed; 66 goto failed;
77 printk("%s", str); 67 printk("%s", str);
78 str = link->prod_id[1]; 68 str = link->prod_id[1];
79 if (!str) 69 if (!str)
80 goto cs_failed; 70 goto failed;
81 printk(" %s", str); 71 printk(" %s", str);
82 str = link->prod_id[2]; 72 str = link->prod_id[2];
83 if (!str) 73 if (!str)
84 goto cs_failed; 74 goto failed;
85 place = 1; 75 place = 1;
86 for (i = strlen(str) - 1; i >= 0; i--) { 76 for (i = strlen(str) - 1; i >= 0; i--) {
87 switch (str[i]) { 77 switch (str[i]) {
@@ -118,9 +108,9 @@ static void ixj_get_serial(struct pcmcia_device * link, IXJ * j)
118 } 108 }
119 str = link->prod_id[3]; 109 str = link->prod_id[3];
120 if (!str) 110 if (!str)
121 goto cs_failed; 111 goto failed;
122 printk(" version %s\n", str); 112 printk(" version %s\n", str);
123 cs_failed: 113failed:
124 return; 114 return;
125} 115}
126 116
@@ -151,13 +141,13 @@ static int ixj_config(struct pcmcia_device * link)
151 cistpl_cftable_entry_t dflt = { 0 }; 141 cistpl_cftable_entry_t dflt = { 0 };
152 142
153 info = link->priv; 143 info = link->priv;
154 DEBUG(0, "ixj_config(0x%p)\n", link); 144 dev_dbg(&link->dev, "ixj_config\n");
155 145
156 if (pcmcia_loop_config(link, ixj_config_check, &dflt)) 146 if (pcmcia_loop_config(link, ixj_config_check, &dflt))
157 goto cs_failed; 147 goto failed;
158 148
159 if (pcmcia_request_configuration(link, &link->conf)) 149 if (pcmcia_request_configuration(link, &link->conf))
160 goto cs_failed; 150 goto failed;
161 151
162 /* 152 /*
163 * Register the card with the core. 153 * Register the card with the core.
@@ -170,7 +160,7 @@ static int ixj_config(struct pcmcia_device * link)
170 ixj_get_serial(link, j); 160 ixj_get_serial(link, j);
171 return 0; 161 return 0;
172 162
173 cs_failed: 163failed:
174 ixj_cs_release(link); 164 ixj_cs_release(link);
175 return -ENODEV; 165 return -ENODEV;
176} 166}
@@ -178,7 +168,7 @@ static int ixj_config(struct pcmcia_device * link)
178static void ixj_cs_release(struct pcmcia_device *link) 168static void ixj_cs_release(struct pcmcia_device *link)
179{ 169{
180 ixj_info_t *info = link->priv; 170 ixj_info_t *info = link->priv;
181 DEBUG(0, "ixj_cs_release(0x%p)\n", link); 171 dev_dbg(&link->dev, "ixj_cs_release\n");
182 info->ndev = 0; 172 info->ndev = 0;
183 pcmcia_disable_device(link); 173 pcmcia_disable_device(link);
184} 174}
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index 02347c57357d..aa53db9f2e88 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -178,6 +178,7 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
178 return 0; 178 return 0;
179 bad1: 179 bad1:
180 kfree(priv); 180 kfree(priv);
181 pm_runtime_disable(&pdev->dev);
181 bad0: 182 bad0:
182 return ret; 183 return ret;
183} 184}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index e3861b21e776..e4eca7810bcf 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -609,9 +609,9 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
609 609
610 acm->throttle = 0; 610 acm->throttle = 0;
611 611
612 tasklet_schedule(&acm->urb_task);
613 set_bit(ASYNCB_INITIALIZED, &acm->port.flags); 612 set_bit(ASYNCB_INITIALIZED, &acm->port.flags);
614 rv = tty_port_block_til_ready(&acm->port, tty, filp); 613 rv = tty_port_block_til_ready(&acm->port, tty, filp);
614 tasklet_schedule(&acm->urb_task);
615done: 615done:
616 mutex_unlock(&acm->mutex); 616 mutex_unlock(&acm->mutex);
617err_out: 617err_out:
@@ -686,15 +686,21 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
686 686
687 /* Perform the closing process and see if we need to do the hardware 687 /* Perform the closing process and see if we need to do the hardware
688 shutdown */ 688 shutdown */
689 if (!acm || tty_port_close_start(&acm->port, tty, filp) == 0) 689 if (!acm)
690 return;
691 if (tty_port_close_start(&acm->port, tty, filp) == 0) {
692 mutex_lock(&open_mutex);
693 if (!acm->dev) {
694 tty_port_tty_set(&acm->port, NULL);
695 acm_tty_unregister(acm);
696 tty->driver_data = NULL;
697 }
698 mutex_unlock(&open_mutex);
690 return; 699 return;
700 }
691 acm_port_down(acm, 0); 701 acm_port_down(acm, 0);
692 tty_port_close_end(&acm->port, tty); 702 tty_port_close_end(&acm->port, tty);
693 mutex_lock(&open_mutex);
694 tty_port_tty_set(&acm->port, NULL); 703 tty_port_tty_set(&acm->port, NULL);
695 if (!acm->dev)
696 acm_tty_unregister(acm);
697 mutex_unlock(&open_mutex);
698} 704}
699 705
700static int acm_tty_write(struct tty_struct *tty, 706static int acm_tty_write(struct tty_struct *tty,
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 5ce839137ad6..0f857e645058 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -444,7 +444,7 @@ resubmit:
444static inline int 444static inline int
445hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt) 445hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt)
446{ 446{
447 return usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), 447 return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
448 HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo, 448 HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo,
449 tt, NULL, 0, 1000); 449 tt, NULL, 0, 1000);
450} 450}
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index d5b65962dd36..731150d4b1d9 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -1213,7 +1213,12 @@ udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
1213 tmp &= AMD_UNMASK_BIT(ep->num); 1213 tmp &= AMD_UNMASK_BIT(ep->num);
1214 writel(tmp, &dev->regs->ep_irqmsk); 1214 writel(tmp, &dev->regs->ep_irqmsk);
1215 } 1215 }
1216 } 1216 } else if (ep->in) {
1217 /* enable ep irq */
1218 tmp = readl(&dev->regs->ep_irqmsk);
1219 tmp &= AMD_UNMASK_BIT(ep->num);
1220 writel(tmp, &dev->regs->ep_irqmsk);
1221 }
1217 1222
1218 } else if (ep->dma) { 1223 } else if (ep->dma) {
1219 1224
@@ -2005,18 +2010,17 @@ __acquires(dev->lock)
2005{ 2010{
2006 int tmp; 2011 int tmp;
2007 2012
2008 /* empty queues and init hardware */
2009 udc_basic_init(dev);
2010 for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
2011 empty_req_queue(&dev->ep[tmp]);
2012 }
2013
2014 if (dev->gadget.speed != USB_SPEED_UNKNOWN) { 2013 if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
2015 spin_unlock(&dev->lock); 2014 spin_unlock(&dev->lock);
2016 driver->disconnect(&dev->gadget); 2015 driver->disconnect(&dev->gadget);
2017 spin_lock(&dev->lock); 2016 spin_lock(&dev->lock);
2018 } 2017 }
2019 /* init */ 2018
2019 /* empty queues and init hardware */
2020 udc_basic_init(dev);
2021 for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
2022 empty_req_queue(&dev->ep[tmp]);
2023
2020 udc_setup_endpoints(dev); 2024 udc_setup_endpoints(dev);
2021} 2025}
2022 2026
@@ -2472,6 +2476,13 @@ static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
2472 } 2476 }
2473 } 2477 }
2474 2478
2479 } else if (!use_dma && ep->in) {
2480 /* disable interrupt */
2481 tmp = readl(
2482 &dev->regs->ep_irqmsk);
2483 tmp |= AMD_BIT(ep->num);
2484 writel(tmp,
2485 &dev->regs->ep_irqmsk);
2475 } 2486 }
2476 } 2487 }
2477 /* clear status bits */ 2488 /* clear status bits */
@@ -3279,6 +3290,17 @@ static int udc_pci_probe(
3279 goto finished; 3290 goto finished;
3280 } 3291 }
3281 3292
3293 spin_lock_init(&dev->lock);
3294 /* udc csr registers base */
3295 dev->csr = dev->virt_addr + UDC_CSR_ADDR;
3296 /* dev registers base */
3297 dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
3298 /* ep registers base */
3299 dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
3300 /* fifo's base */
3301 dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
3302 dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
3303
3282 if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) { 3304 if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
3283 dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq); 3305 dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq);
3284 kfree(dev); 3306 kfree(dev);
@@ -3331,7 +3353,6 @@ static int udc_probe(struct udc *dev)
3331 udc_pollstall_timer.data = 0; 3353 udc_pollstall_timer.data = 0;
3332 3354
3333 /* device struct setup */ 3355 /* device struct setup */
3334 spin_lock_init(&dev->lock);
3335 dev->gadget.ops = &udc_ops; 3356 dev->gadget.ops = &udc_ops;
3336 3357
3337 dev_set_name(&dev->gadget.dev, "gadget"); 3358 dev_set_name(&dev->gadget.dev, "gadget");
@@ -3340,16 +3361,6 @@ static int udc_probe(struct udc *dev)
3340 dev->gadget.name = name; 3361 dev->gadget.name = name;
3341 dev->gadget.is_dualspeed = 1; 3362 dev->gadget.is_dualspeed = 1;
3342 3363
3343 /* udc csr registers base */
3344 dev->csr = dev->virt_addr + UDC_CSR_ADDR;
3345 /* dev registers base */
3346 dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
3347 /* ep registers base */
3348 dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
3349 /* fifo's base */
3350 dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
3351 dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
3352
3353 /* init registers, interrupts, ... */ 3364 /* init registers, interrupts, ... */
3354 startup_registers(dev); 3365 startup_registers(dev);
3355 3366
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 9835e0713943..f5f5601701c9 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -28,6 +28,7 @@
28#include <linux/errno.h> 28#include <linux/errno.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/timer.h> 30#include <linux/timer.h>
31#include <linux/ktime.h>
31#include <linux/list.h> 32#include <linux/list.h>
32#include <linux/interrupt.h> 33#include <linux/interrupt.h>
33#include <linux/usb.h> 34#include <linux/usb.h>
@@ -676,6 +677,7 @@ static int ehci_run (struct usb_hcd *hcd)
676 ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ 677 ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
677 msleep(5); 678 msleep(5);
678 up_write(&ehci_cf_port_reset_rwsem); 679 up_write(&ehci_cf_port_reset_rwsem);
680 ehci->last_periodic_enable = ktime_get_real();
679 681
680 temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase)); 682 temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase));
681 ehci_info (ehci, 683 ehci_info (ehci,
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 378861b9d79a..ead5f4f2aa5a 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -111,6 +111,10 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
111 switch (pdev->vendor) { 111 switch (pdev->vendor) {
112 case PCI_VENDOR_ID_INTEL: 112 case PCI_VENDOR_ID_INTEL:
113 ehci->need_io_watchdog = 0; 113 ehci->need_io_watchdog = 0;
114 if (pdev->device == 0x27cc) {
115 ehci->broken_periodic = 1;
116 ehci_info(ehci, "using broken periodic workaround\n");
117 }
114 break; 118 break;
115 case PCI_VENDOR_ID_TDI: 119 case PCI_VENDOR_ID_TDI:
116 if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { 120 if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 00ad9ce392ed..139a2cc3f641 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -487,8 +487,20 @@ halt:
487 * we must clear the TT buffer (11.17.5). 487 * we must clear the TT buffer (11.17.5).
488 */ 488 */
489 if (unlikely(last_status != -EINPROGRESS && 489 if (unlikely(last_status != -EINPROGRESS &&
490 last_status != -EREMOTEIO)) 490 last_status != -EREMOTEIO)) {
491 ehci_clear_tt_buffer(ehci, qh, urb, token); 491 /* The TT's in some hubs malfunction when they
492 * receive this request following a STALL (they
493 * stop sending isochronous packets). Since a
494 * STALL can't leave the TT buffer in a busy
495 * state (if you believe Figures 11-48 - 11-51
496 * in the USB 2.0 spec), we won't clear the TT
497 * buffer in this case. Strictly speaking this
498 * is a violation of the spec.
499 */
500 if (last_status != -EPIPE)
501 ehci_clear_tt_buffer(ehci, qh, urb,
502 token);
503 }
492 } 504 }
493 505
494 /* if we're removing something not at the queue head, 506 /* if we're removing something not at the queue head,
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index b25cdea93a1f..a5535b5e3fe2 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -475,6 +475,8 @@ static int enable_periodic (struct ehci_hcd *ehci)
475 /* make sure ehci_work scans these */ 475 /* make sure ehci_work scans these */
476 ehci->next_uframe = ehci_readl(ehci, &ehci->regs->frame_index) 476 ehci->next_uframe = ehci_readl(ehci, &ehci->regs->frame_index)
477 % (ehci->periodic_size << 3); 477 % (ehci->periodic_size << 3);
478 if (unlikely(ehci->broken_periodic))
479 ehci->last_periodic_enable = ktime_get_real();
478 return 0; 480 return 0;
479} 481}
480 482
@@ -486,6 +488,16 @@ static int disable_periodic (struct ehci_hcd *ehci)
486 if (--ehci->periodic_sched) 488 if (--ehci->periodic_sched)
487 return 0; 489 return 0;
488 490
491 if (unlikely(ehci->broken_periodic)) {
492 /* delay experimentally determined */
493 ktime_t safe = ktime_add_us(ehci->last_periodic_enable, 1000);
494 ktime_t now = ktime_get_real();
495 s64 delay = ktime_us_delta(safe, now);
496
497 if (unlikely(delay > 0))
498 udelay(delay);
499 }
500
489 /* did setting PSE not take effect yet? 501 /* did setting PSE not take effect yet?
490 * takes effect only at frame boundaries... 502 * takes effect only at frame boundaries...
491 */ 503 */
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 064e76821ff5..2d85e21ff282 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -118,6 +118,7 @@ struct ehci_hcd { /* one per controller */
118 unsigned stamp; 118 unsigned stamp;
119 unsigned random_frame; 119 unsigned random_frame;
120 unsigned long next_statechange; 120 unsigned long next_statechange;
121 ktime_t last_periodic_enable;
121 u32 command; 122 u32 command;
122 123
123 /* SILICON QUIRKS */ 124 /* SILICON QUIRKS */
@@ -127,6 +128,7 @@ struct ehci_hcd { /* one per controller */
127 unsigned big_endian_desc:1; 128 unsigned big_endian_desc:1;
128 unsigned has_amcc_usb23:1; 129 unsigned has_amcc_usb23:1;
129 unsigned need_io_watchdog:1; 130 unsigned need_io_watchdog:1;
131 unsigned broken_periodic:1;
130 132
131 /* required for usb32 quirk */ 133 /* required for usb32 quirk */
132 #define OHCI_CTRL_HCFS (3 << 6) 134 #define OHCI_CTRL_HCFS (3 << 6)
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 78bb7710f36d..24eb74781919 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -87,6 +87,7 @@ static int ohci_restart (struct ohci_hcd *ohci);
87#ifdef CONFIG_PCI 87#ifdef CONFIG_PCI
88static void quirk_amd_pll(int state); 88static void quirk_amd_pll(int state);
89static void amd_iso_dev_put(void); 89static void amd_iso_dev_put(void);
90static void sb800_prefetch(struct ohci_hcd *ohci, int on);
90#else 91#else
91static inline void quirk_amd_pll(int state) 92static inline void quirk_amd_pll(int state)
92{ 93{
@@ -96,6 +97,10 @@ static inline void amd_iso_dev_put(void)
96{ 97{
97 return; 98 return;
98} 99}
100static inline void sb800_prefetch(struct ohci_hcd *ohci, int on)
101{
102 return;
103}
99#endif 104#endif
100 105
101 106
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index d2ba04dd785e..b8a1148f248e 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -177,6 +177,13 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
177 return 0; 177 return 0;
178 178
179 pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev); 179 pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
180
181 /* SB800 needs pre-fetch fix */
182 if ((rev >= 0x40) && (rev <= 0x4f)) {
183 ohci->flags |= OHCI_QUIRK_AMD_PREFETCH;
184 ohci_dbg(ohci, "enabled AMD prefetch quirk\n");
185 }
186
180 if ((rev > 0x3b) || (rev < 0x30)) { 187 if ((rev > 0x3b) || (rev < 0x30)) {
181 pci_dev_put(amd_smbus_dev); 188 pci_dev_put(amd_smbus_dev);
182 amd_smbus_dev = NULL; 189 amd_smbus_dev = NULL;
@@ -262,6 +269,19 @@ static void amd_iso_dev_put(void)
262 269
263} 270}
264 271
272static void sb800_prefetch(struct ohci_hcd *ohci, int on)
273{
274 struct pci_dev *pdev;
275 u16 misc;
276
277 pdev = to_pci_dev(ohci_to_hcd(ohci)->self.controller);
278 pci_read_config_word(pdev, 0x50, &misc);
279 if (on == 0)
280 pci_write_config_word(pdev, 0x50, misc & 0xfcff);
281 else
282 pci_write_config_word(pdev, 0x50, misc | 0x0300);
283}
284
265/* List of quirks for OHCI */ 285/* List of quirks for OHCI */
266static const struct pci_device_id ohci_pci_quirks[] = { 286static const struct pci_device_id ohci_pci_quirks[] = {
267 { 287 {
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 16fecb8ecc39..35288bcae0db 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -49,9 +49,12 @@ __acquires(ohci->lock)
49 switch (usb_pipetype (urb->pipe)) { 49 switch (usb_pipetype (urb->pipe)) {
50 case PIPE_ISOCHRONOUS: 50 case PIPE_ISOCHRONOUS:
51 ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--; 51 ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--;
52 if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0 52 if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
53 && quirk_amdiso(ohci)) 53 if (quirk_amdiso(ohci))
54 quirk_amd_pll(1); 54 quirk_amd_pll(1);
55 if (quirk_amdprefetch(ohci))
56 sb800_prefetch(ohci, 0);
57 }
55 break; 58 break;
56 case PIPE_INTERRUPT: 59 case PIPE_INTERRUPT:
57 ohci_to_hcd(ohci)->self.bandwidth_int_reqs--; 60 ohci_to_hcd(ohci)->self.bandwidth_int_reqs--;
@@ -680,9 +683,12 @@ static void td_submit_urb (
680 data + urb->iso_frame_desc [cnt].offset, 683 data + urb->iso_frame_desc [cnt].offset,
681 urb->iso_frame_desc [cnt].length, urb, cnt); 684 urb->iso_frame_desc [cnt].length, urb, cnt);
682 } 685 }
683 if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0 686 if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
684 && quirk_amdiso(ohci)) 687 if (quirk_amdiso(ohci))
685 quirk_amd_pll(0); 688 quirk_amd_pll(0);
689 if (quirk_amdprefetch(ohci))
690 sb800_prefetch(ohci, 1);
691 }
686 periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0 692 periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0
687 && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0; 693 && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0;
688 break; 694 break;
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 222011f6172c..5bf15fed0d9f 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -402,6 +402,7 @@ struct ohci_hcd {
402#define OHCI_QUIRK_FRAME_NO 0x80 /* no big endian frame_no shift */ 402#define OHCI_QUIRK_FRAME_NO 0x80 /* no big endian frame_no shift */
403#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */ 403#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
404#define OHCI_QUIRK_AMD_ISO 0x200 /* ISO transfers*/ 404#define OHCI_QUIRK_AMD_ISO 0x200 /* ISO transfers*/
405#define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */
405 // there are also chip quirks/bugs in init logic 406 // there are also chip quirks/bugs in init logic
406 407
407 struct work_struct nec_work; /* Worker for NEC quirk */ 408 struct work_struct nec_work; /* Worker for NEC quirk */
@@ -433,6 +434,10 @@ static inline int quirk_amdiso(struct ohci_hcd *ohci)
433{ 434{
434 return ohci->flags & OHCI_QUIRK_AMD_ISO; 435 return ohci->flags & OHCI_QUIRK_AMD_ISO;
435} 436}
437static inline int quirk_amdprefetch(struct ohci_hcd *ohci)
438{
439 return ohci->flags & OHCI_QUIRK_AMD_PREFETCH;
440}
436#else 441#else
437static inline int quirk_nec(struct ohci_hcd *ohci) 442static inline int quirk_nec(struct ohci_hcd *ohci)
438{ 443{
@@ -446,6 +451,10 @@ static inline int quirk_amdiso(struct ohci_hcd *ohci)
446{ 451{
447 return 0; 452 return 0;
448} 453}
454static inline int quirk_amdprefetch(struct ohci_hcd *ohci)
455{
456 return 0;
457}
449#endif 458#endif
450 459
451/* convert between an hcd pointer and the corresponding ohci_hcd */ 460/* convert between an hcd pointer and the corresponding ohci_hcd */
diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c
index 516848dd9b48..39d253e841f6 100644
--- a/drivers/usb/host/sl811_cs.c
+++ b/drivers/usb/host/sl811_cs.c
@@ -37,28 +37,8 @@ MODULE_LICENSE("GPL");
37/* MACROS */ 37/* MACROS */
38/*====================================================================*/ 38/*====================================================================*/
39 39
40#if defined(DEBUG) || defined(PCMCIA_DEBUG)
41
42static int pc_debug = 0;
43module_param(pc_debug, int, 0644);
44
45#define DBG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG "sl811_cs: " args)
46
47#else
48#define DBG(n, args...) do{}while(0)
49#endif /* no debugging */
50
51#define INFO(args...) printk(KERN_INFO "sl811_cs: " args) 40#define INFO(args...) printk(KERN_INFO "sl811_cs: " args)
52 41
53#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0444)
54
55#define CS_CHECK(fn, ret) \
56 do { \
57 last_fn = (fn); \
58 if ((last_ret = (ret)) != 0) \
59 goto cs_failed; \
60 } while (0)
61
62/*====================================================================*/ 42/*====================================================================*/
63/* VARIABLES */ 43/* VARIABLES */
64/*====================================================================*/ 44/*====================================================================*/
@@ -76,7 +56,7 @@ static void sl811_cs_release(struct pcmcia_device * link);
76 56
77static void release_platform_dev(struct device * dev) 57static void release_platform_dev(struct device * dev)
78{ 58{
79 DBG(0, "sl811_cs platform_dev release\n"); 59 dev_dbg(dev, "sl811_cs platform_dev release\n");
80 dev->parent = NULL; 60 dev->parent = NULL;
81} 61}
82 62
@@ -140,7 +120,7 @@ static int sl811_hc_init(struct device *parent, resource_size_t base_addr,
140 120
141static void sl811_cs_detach(struct pcmcia_device *link) 121static void sl811_cs_detach(struct pcmcia_device *link)
142{ 122{
143 DBG(0, "sl811_cs_detach(0x%p)\n", link); 123 dev_dbg(&link->dev, "sl811_cs_detach\n");
144 124
145 sl811_cs_release(link); 125 sl811_cs_release(link);
146 126
@@ -150,7 +130,7 @@ static void sl811_cs_detach(struct pcmcia_device *link)
150 130
151static void sl811_cs_release(struct pcmcia_device * link) 131static void sl811_cs_release(struct pcmcia_device * link)
152{ 132{
153 DBG(0, "sl811_cs_release(0x%p)\n", link); 133 dev_dbg(&link->dev, "sl811_cs_release\n");
154 134
155 pcmcia_disable_device(link); 135 pcmcia_disable_device(link);
156 platform_device_unregister(&platform_dev); 136 platform_device_unregister(&platform_dev);
@@ -205,11 +185,11 @@ static int sl811_cs_config_check(struct pcmcia_device *p_dev,
205 185
206static int sl811_cs_config(struct pcmcia_device *link) 186static int sl811_cs_config(struct pcmcia_device *link)
207{ 187{
208 struct device *parent = &handle_to_dev(link); 188 struct device *parent = &link->dev;
209 local_info_t *dev = link->priv; 189 local_info_t *dev = link->priv;
210 int last_fn, last_ret; 190 int ret;
211 191
212 DBG(0, "sl811_cs_config(0x%p)\n", link); 192 dev_dbg(&link->dev, "sl811_cs_config\n");
213 193
214 if (pcmcia_loop_config(link, sl811_cs_config_check, NULL)) 194 if (pcmcia_loop_config(link, sl811_cs_config_check, NULL))
215 goto failed; 195 goto failed;
@@ -217,14 +197,16 @@ static int sl811_cs_config(struct pcmcia_device *link)
217 /* require an IRQ and two registers */ 197 /* require an IRQ and two registers */
218 if (!link->io.NumPorts1 || link->io.NumPorts1 < 2) 198 if (!link->io.NumPorts1 || link->io.NumPorts1 < 2)
219 goto failed; 199 goto failed;
220 if (link->conf.Attributes & CONF_ENABLE_IRQ) 200 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
221 CS_CHECK(RequestIRQ, 201 ret = pcmcia_request_irq(link, &link->irq);
222 pcmcia_request_irq(link, &link->irq)); 202 if (ret)
223 else 203 goto failed;
204 } else
224 goto failed; 205 goto failed;
225 206
226 CS_CHECK(RequestConfiguration, 207 ret = pcmcia_request_configuration(link, &link->conf);
227 pcmcia_request_configuration(link, &link->conf)); 208 if (ret)
209 goto failed;
228 210
229 sprintf(dev->node.dev_name, driver_name); 211 sprintf(dev->node.dev_name, driver_name);
230 dev->node.major = dev->node.minor = 0; 212 dev->node.major = dev->node.minor = 0;
@@ -241,8 +223,6 @@ static int sl811_cs_config(struct pcmcia_device *link)
241 223
242 if (sl811_hc_init(parent, link->io.BasePort1, link->irq.AssignedIRQ) 224 if (sl811_hc_init(parent, link->io.BasePort1, link->irq.AssignedIRQ)
243 < 0) { 225 < 0) {
244cs_failed:
245 cs_error(link, last_fn, last_ret);
246failed: 226failed:
247 printk(KERN_WARNING "sl811_cs_config failed\n"); 227 printk(KERN_WARNING "sl811_cs_config failed\n");
248 sl811_cs_release(link); 228 sl811_cs_release(link);
@@ -263,7 +243,6 @@ static int sl811_cs_probe(struct pcmcia_device *link)
263 243
264 /* Initialize */ 244 /* Initialize */
265 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 245 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
266 link->irq.IRQInfo1 = IRQ_INFO2_VALID|IRQ_LEVEL_ID;
267 link->irq.Handler = NULL; 246 link->irq.Handler = NULL;
268 247
269 link->conf.Attributes = 0; 248 link->conf.Attributes = 0;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 1db4fea8c170..b8fd270a8b0d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -802,9 +802,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
802 int i; 802 int i;
803 803
804 /* Free the Event Ring Segment Table and the actual Event Ring */ 804 /* Free the Event Ring Segment Table and the actual Event Ring */
805 xhci_writel(xhci, 0, &xhci->ir_set->erst_size); 805 if (xhci->ir_set) {
806 xhci_write_64(xhci, 0, &xhci->ir_set->erst_base); 806 xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
807 xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue); 807 xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
808 xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
809 }
808 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 810 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
809 if (xhci->erst.entries) 811 if (xhci->erst.entries)
810 pci_free_consistent(pdev, size, 812 pci_free_consistent(pdev, size,
@@ -841,9 +843,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
841 xhci->dcbaa, xhci->dcbaa->dma); 843 xhci->dcbaa, xhci->dcbaa->dma);
842 xhci->dcbaa = NULL; 844 xhci->dcbaa = NULL;
843 845
846 scratchpad_free(xhci);
844 xhci->page_size = 0; 847 xhci->page_size = 0;
845 xhci->page_shift = 0; 848 xhci->page_shift = 0;
846 scratchpad_free(xhci);
847} 849}
848 850
849int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) 851int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 173c39c76489..821b7b4709de 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -864,9 +864,11 @@ static struct xhci_segment *trb_in_td(
864 cur_seg = start_seg; 864 cur_seg = start_seg;
865 865
866 do { 866 do {
867 if (start_dma == 0)
868 return 0;
867 /* We may get an event for a Link TRB in the middle of a TD */ 869 /* We may get an event for a Link TRB in the middle of a TD */
868 end_seg_dma = xhci_trb_virt_to_dma(cur_seg, 870 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
869 &start_seg->trbs[TRBS_PER_SEGMENT - 1]); 871 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
870 /* If the end TRB isn't in this segment, this is set to 0 */ 872 /* If the end TRB isn't in this segment, this is set to 0 */
871 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb); 873 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
872 874
@@ -893,8 +895,9 @@ static struct xhci_segment *trb_in_td(
893 } 895 }
894 cur_seg = cur_seg->next; 896 cur_seg = cur_seg->next;
895 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); 897 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
896 } while (1); 898 } while (cur_seg != start_seg);
897 899
900 return 0;
898} 901}
899 902
900/* 903/*
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 9ed3e741bee1..10f3205798e8 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -348,12 +348,12 @@ static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp,
348 348
349/* 349/*
350 * Return a few (kilo-)bytes to the head of the buffer. 350 * Return a few (kilo-)bytes to the head of the buffer.
351 * This is used if a DMA fetch fails. 351 * This is used if a data fetch fails.
352 */ 352 */
353static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size) 353static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size)
354{ 354{
355 355
356 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 356 /* size &= ~(PKT_ALIGN-1); -- we're called with aligned size */
357 rp->b_cnt -= size; 357 rp->b_cnt -= size;
358 if (rp->b_in < size) 358 if (rp->b_in < size)
359 rp->b_in += rp->b_size; 359 rp->b_in += rp->b_size;
@@ -433,6 +433,7 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
433 unsigned int urb_length; 433 unsigned int urb_length;
434 unsigned int offset; 434 unsigned int offset;
435 unsigned int length; 435 unsigned int length;
436 unsigned int delta;
436 unsigned int ndesc, lendesc; 437 unsigned int ndesc, lendesc;
437 unsigned char dir; 438 unsigned char dir;
438 struct mon_bin_hdr *ep; 439 struct mon_bin_hdr *ep;
@@ -537,8 +538,10 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
537 if (length != 0) { 538 if (length != 0) {
538 ep->flag_data = mon_bin_get_data(rp, offset, urb, length); 539 ep->flag_data = mon_bin_get_data(rp, offset, urb, length);
539 if (ep->flag_data != 0) { /* Yes, it's 0x00, not '0' */ 540 if (ep->flag_data != 0) { /* Yes, it's 0x00, not '0' */
540 ep->len_cap = 0; 541 delta = (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
541 mon_buff_area_shrink(rp, length); 542 ep->len_cap -= length;
543 delta -= (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
544 mon_buff_area_shrink(rp, delta);
542 } 545 }
543 } else { 546 } else {
544 ep->flag_data = data_tag; 547 ep->flag_data = data_tag;
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index c3577bbbae6c..ef2332a9941d 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -1442,11 +1442,6 @@ static int cppi_channel_abort(struct dma_channel *channel)
1442 musb_writew(regs, MUSB_TXCSR, value); 1442 musb_writew(regs, MUSB_TXCSR, value);
1443 musb_writew(regs, MUSB_TXCSR, value); 1443 musb_writew(regs, MUSB_TXCSR, value);
1444 1444
1445 /* re-enable interrupt */
1446 if (enabled)
1447 musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
1448 (1 << cppi_ch->index));
1449
1450 /* While we scrub the TX state RAM, ensure that we clean 1445 /* While we scrub the TX state RAM, ensure that we clean
1451 * up any interrupt that's currently asserted: 1446 * up any interrupt that's currently asserted:
1452 * 1. Write to completion Ptr value 0x1(bit 0 set) 1447 * 1. Write to completion Ptr value 0x1(bit 0 set)
@@ -1459,6 +1454,11 @@ static int cppi_channel_abort(struct dma_channel *channel)
1459 cppi_reset_tx(tx_ram, 1); 1454 cppi_reset_tx(tx_ram, 1);
1460 musb_writel(&tx_ram->tx_complete, 0, 0); 1455 musb_writel(&tx_ram->tx_complete, 0, 0);
1461 1456
1457 /* re-enable interrupt */
1458 if (enabled)
1459 musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
1460 (1 << cppi_ch->index));
1461
1462 cppi_dump_tx(5, cppi_ch, " (done teardown)"); 1462 cppi_dump_tx(5, cppi_ch, " (done teardown)");
1463 1463
1464 /* REVISIT tx side _should_ clean up the same way 1464 /* REVISIT tx side _should_ clean up the same way
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 3a61ddb62bd2..547e0e390726 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1450,7 +1450,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
1450#endif 1450#endif
1451 1451
1452 if (hw_ep->max_packet_sz_tx) { 1452 if (hw_ep->max_packet_sz_tx) {
1453 printk(KERN_DEBUG 1453 DBG(1,
1454 "%s: hw_ep %d%s, %smax %d\n", 1454 "%s: hw_ep %d%s, %smax %d\n",
1455 musb_driver_name, i, 1455 musb_driver_name, i,
1456 hw_ep->is_shared_fifo ? "shared" : "tx", 1456 hw_ep->is_shared_fifo ? "shared" : "tx",
@@ -1459,7 +1459,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
1459 hw_ep->max_packet_sz_tx); 1459 hw_ep->max_packet_sz_tx);
1460 } 1460 }
1461 if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) { 1461 if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
1462 printk(KERN_DEBUG 1462 DBG(1,
1463 "%s: hw_ep %d%s, %smax %d\n", 1463 "%s: hw_ep %d%s, %smax %d\n",
1464 musb_driver_name, i, 1464 musb_driver_name, i,
1465 "rx", 1465 "rx",
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 8b3c4e2ed7b8..74073f9a43f0 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -4,6 +4,7 @@
4 * Copyright 2005 Mentor Graphics Corporation 4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments 5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation 6 * Copyright (C) 2006-2007 Nokia Corporation
7 * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
7 * 8 *
8 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
@@ -436,14 +437,6 @@ void musb_g_tx(struct musb *musb, u8 epnum)
436 csr |= MUSB_TXCSR_P_WZC_BITS; 437 csr |= MUSB_TXCSR_P_WZC_BITS;
437 csr &= ~MUSB_TXCSR_P_SENTSTALL; 438 csr &= ~MUSB_TXCSR_P_SENTSTALL;
438 musb_writew(epio, MUSB_TXCSR, csr); 439 musb_writew(epio, MUSB_TXCSR, csr);
439 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
440 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
441 musb->dma_controller->channel_abort(dma);
442 }
443
444 if (request)
445 musb_g_giveback(musb_ep, request, -EPIPE);
446
447 break; 440 break;
448 } 441 }
449 442
@@ -582,15 +575,25 @@ void musb_g_tx(struct musb *musb, u8 epnum)
582 */ 575 */
583static void rxstate(struct musb *musb, struct musb_request *req) 576static void rxstate(struct musb *musb, struct musb_request *req)
584{ 577{
585 u16 csr = 0;
586 const u8 epnum = req->epnum; 578 const u8 epnum = req->epnum;
587 struct usb_request *request = &req->request; 579 struct usb_request *request = &req->request;
588 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; 580 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
589 void __iomem *epio = musb->endpoints[epnum].regs; 581 void __iomem *epio = musb->endpoints[epnum].regs;
590 unsigned fifo_count = 0; 582 unsigned fifo_count = 0;
591 u16 len = musb_ep->packet_sz; 583 u16 len = musb_ep->packet_sz;
584 u16 csr = musb_readw(epio, MUSB_RXCSR);
592 585
593 csr = musb_readw(epio, MUSB_RXCSR); 586 /* We shouldn't get here while DMA is active, but we do... */
587 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
588 DBG(4, "DMA pending...\n");
589 return;
590 }
591
592 if (csr & MUSB_RXCSR_P_SENDSTALL) {
593 DBG(5, "%s stalling, RXCSR %04x\n",
594 musb_ep->end_point.name, csr);
595 return;
596 }
594 597
595 if (is_cppi_enabled() && musb_ep->dma) { 598 if (is_cppi_enabled() && musb_ep->dma) {
596 struct dma_controller *c = musb->dma_controller; 599 struct dma_controller *c = musb->dma_controller;
@@ -761,19 +764,10 @@ void musb_g_rx(struct musb *musb, u8 epnum)
761 csr, dma ? " (dma)" : "", request); 764 csr, dma ? " (dma)" : "", request);
762 765
763 if (csr & MUSB_RXCSR_P_SENTSTALL) { 766 if (csr & MUSB_RXCSR_P_SENTSTALL) {
764 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
765 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
766 (void) musb->dma_controller->channel_abort(dma);
767 request->actual += musb_ep->dma->actual_len;
768 }
769
770 csr |= MUSB_RXCSR_P_WZC_BITS; 767 csr |= MUSB_RXCSR_P_WZC_BITS;
771 csr &= ~MUSB_RXCSR_P_SENTSTALL; 768 csr &= ~MUSB_RXCSR_P_SENTSTALL;
772 musb_writew(epio, MUSB_RXCSR, csr); 769 musb_writew(epio, MUSB_RXCSR, csr);
773 770 return;
774 if (request)
775 musb_g_giveback(musb_ep, request, -EPIPE);
776 goto done;
777 } 771 }
778 772
779 if (csr & MUSB_RXCSR_P_OVERRUN) { 773 if (csr & MUSB_RXCSR_P_OVERRUN) {
@@ -795,7 +789,7 @@ void musb_g_rx(struct musb *musb, u8 epnum)
795 DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1, 789 DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1,
796 "%s busy, csr %04x\n", 790 "%s busy, csr %04x\n",
797 musb_ep->end_point.name, csr); 791 musb_ep->end_point.name, csr);
798 goto done; 792 return;
799 } 793 }
800 794
801 if (dma && (csr & MUSB_RXCSR_DMAENAB)) { 795 if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
@@ -826,22 +820,15 @@ void musb_g_rx(struct musb *musb, u8 epnum)
826 if ((request->actual < request->length) 820 if ((request->actual < request->length)
827 && (musb_ep->dma->actual_len 821 && (musb_ep->dma->actual_len
828 == musb_ep->packet_sz)) 822 == musb_ep->packet_sz))
829 goto done; 823 return;
830#endif 824#endif
831 musb_g_giveback(musb_ep, request, 0); 825 musb_g_giveback(musb_ep, request, 0);
832 826
833 request = next_request(musb_ep); 827 request = next_request(musb_ep);
834 if (!request) 828 if (!request)
835 goto done; 829 return;
836
837 /* don't start more i/o till the stall clears */
838 musb_ep_select(mbase, epnum);
839 csr = musb_readw(epio, MUSB_RXCSR);
840 if (csr & MUSB_RXCSR_P_SENDSTALL)
841 goto done;
842 } 830 }
843 831
844
845 /* analyze request if the ep is hot */ 832 /* analyze request if the ep is hot */
846 if (request) 833 if (request)
847 rxstate(musb, to_musb_request(request)); 834 rxstate(musb, to_musb_request(request));
@@ -849,8 +836,6 @@ void musb_g_rx(struct musb *musb, u8 epnum)
849 DBG(3, "packet waiting for %s%s request\n", 836 DBG(3, "packet waiting for %s%s request\n",
850 musb_ep->desc ? "" : "inactive ", 837 musb_ep->desc ? "" : "inactive ",
851 musb_ep->end_point.name); 838 musb_ep->end_point.name);
852
853done:
854 return; 839 return;
855} 840}
856 841
@@ -1244,7 +1229,7 @@ int musb_gadget_set_halt(struct usb_ep *ep, int value)
1244 void __iomem *mbase; 1229 void __iomem *mbase;
1245 unsigned long flags; 1230 unsigned long flags;
1246 u16 csr; 1231 u16 csr;
1247 struct musb_request *request = NULL; 1232 struct musb_request *request;
1248 int status = 0; 1233 int status = 0;
1249 1234
1250 if (!ep) 1235 if (!ep)
@@ -1260,24 +1245,29 @@ int musb_gadget_set_halt(struct usb_ep *ep, int value)
1260 1245
1261 musb_ep_select(mbase, epnum); 1246 musb_ep_select(mbase, epnum);
1262 1247
1263 /* cannot portably stall with non-empty FIFO */
1264 request = to_musb_request(next_request(musb_ep)); 1248 request = to_musb_request(next_request(musb_ep));
1265 if (value && musb_ep->is_in) { 1249 if (value) {
1266 csr = musb_readw(epio, MUSB_TXCSR); 1250 if (request) {
1267 if (csr & MUSB_TXCSR_FIFONOTEMPTY) { 1251 DBG(3, "request in progress, cannot halt %s\n",
1268 DBG(3, "%s fifo busy, cannot halt\n", ep->name); 1252 ep->name);
1269 spin_unlock_irqrestore(&musb->lock, flags); 1253 status = -EAGAIN;
1270 return -EAGAIN; 1254 goto done;
1255 }
1256 /* Cannot portably stall with non-empty FIFO */
1257 if (musb_ep->is_in) {
1258 csr = musb_readw(epio, MUSB_TXCSR);
1259 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1260 DBG(3, "FIFO busy, cannot halt %s\n", ep->name);
1261 status = -EAGAIN;
1262 goto done;
1263 }
1271 } 1264 }
1272
1273 } 1265 }
1274 1266
1275 /* set/clear the stall and toggle bits */ 1267 /* set/clear the stall and toggle bits */
1276 DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear"); 1268 DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1277 if (musb_ep->is_in) { 1269 if (musb_ep->is_in) {
1278 csr = musb_readw(epio, MUSB_TXCSR); 1270 csr = musb_readw(epio, MUSB_TXCSR);
1279 if (csr & MUSB_TXCSR_FIFONOTEMPTY)
1280 csr |= MUSB_TXCSR_FLUSHFIFO;
1281 csr |= MUSB_TXCSR_P_WZC_BITS 1271 csr |= MUSB_TXCSR_P_WZC_BITS
1282 | MUSB_TXCSR_CLRDATATOG; 1272 | MUSB_TXCSR_CLRDATATOG;
1283 if (value) 1273 if (value)
@@ -1300,14 +1290,13 @@ int musb_gadget_set_halt(struct usb_ep *ep, int value)
1300 musb_writew(epio, MUSB_RXCSR, csr); 1290 musb_writew(epio, MUSB_RXCSR, csr);
1301 } 1291 }
1302 1292
1303done:
1304
1305 /* maybe start the first request in the queue */ 1293 /* maybe start the first request in the queue */
1306 if (!musb_ep->busy && !value && request) { 1294 if (!musb_ep->busy && !value && request) {
1307 DBG(3, "restarting the request\n"); 1295 DBG(3, "restarting the request\n");
1308 musb_ep_restart(musb, request); 1296 musb_ep_restart(musb, request);
1309 } 1297 }
1310 1298
1299done:
1311 spin_unlock_irqrestore(&musb->lock, flags); 1300 spin_unlock_irqrestore(&musb->lock, flags);
1312 return status; 1301 return status;
1313} 1302}
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 7a6778675ad3..522efb31b56b 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -511,7 +511,8 @@ static void ep0_txstate(struct musb *musb)
511 511
512 /* update the flags */ 512 /* update the flags */
513 if (fifo_count < MUSB_MAX_END0_PACKET 513 if (fifo_count < MUSB_MAX_END0_PACKET
514 || request->actual == request->length) { 514 || (request->actual == request->length
515 && !request->zero)) {
515 musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT; 516 musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
516 csr |= MUSB_CSR0_P_DATAEND; 517 csr |= MUSB_CSR0_P_DATAEND;
517 } else 518 } else
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index cf94511485f2..e3ab40a966eb 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1301,8 +1301,11 @@ void musb_host_tx(struct musb *musb, u8 epnum)
1301 return; 1301 return;
1302 } else if (usb_pipeisoc(pipe) && dma) { 1302 } else if (usb_pipeisoc(pipe) && dma) {
1303 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb, 1303 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1304 offset, length)) 1304 offset, length)) {
1305 if (is_cppi_enabled() || tusb_dma_omap())
1306 musb_h_tx_dma_start(hw_ep);
1305 return; 1307 return;
1308 }
1306 } else if (tx_csr & MUSB_TXCSR_DMAENAB) { 1309 } else if (tx_csr & MUSB_TXCSR_DMAENAB) {
1307 DBG(1, "not complete, but DMA enabled?\n"); 1310 DBG(1, "not complete, but DMA enabled?\n");
1308 return; 1311 return;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 698252a4dc5d..bd254ec97d14 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -50,6 +50,8 @@ static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *,
50static void cp210x_break_ctl(struct tty_struct *, int); 50static void cp210x_break_ctl(struct tty_struct *, int);
51static int cp210x_startup(struct usb_serial *); 51static int cp210x_startup(struct usb_serial *);
52static void cp210x_disconnect(struct usb_serial *); 52static void cp210x_disconnect(struct usb_serial *);
53static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
54static int cp210x_carrier_raised(struct usb_serial_port *p);
53 55
54static int debug; 56static int debug;
55 57
@@ -143,6 +145,8 @@ static struct usb_serial_driver cp210x_device = {
143 .tiocmset = cp210x_tiocmset, 145 .tiocmset = cp210x_tiocmset,
144 .attach = cp210x_startup, 146 .attach = cp210x_startup,
145 .disconnect = cp210x_disconnect, 147 .disconnect = cp210x_disconnect,
148 .dtr_rts = cp210x_dtr_rts,
149 .carrier_raised = cp210x_carrier_raised
146}; 150};
147 151
148/* Config request types */ 152/* Config request types */
@@ -746,6 +750,14 @@ static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *file,
746 return cp210x_set_config(port, CP210X_SET_MHS, &control, 2); 750 return cp210x_set_config(port, CP210X_SET_MHS, &control, 2);
747} 751}
748 752
753static void cp210x_dtr_rts(struct usb_serial_port *p, int on)
754{
755 if (on)
756 cp210x_tiocmset_port(p, NULL, TIOCM_DTR|TIOCM_RTS, 0);
757 else
758 cp210x_tiocmset_port(p, NULL, 0, TIOCM_DTR|TIOCM_RTS);
759}
760
749static int cp210x_tiocmget (struct tty_struct *tty, struct file *file) 761static int cp210x_tiocmget (struct tty_struct *tty, struct file *file)
750{ 762{
751 struct usb_serial_port *port = tty->driver_data; 763 struct usb_serial_port *port = tty->driver_data;
@@ -768,6 +780,15 @@ static int cp210x_tiocmget (struct tty_struct *tty, struct file *file)
768 return result; 780 return result;
769} 781}
770 782
783static int cp210x_carrier_raised(struct usb_serial_port *p)
784{
785 unsigned int control;
786 cp210x_get_config(p, CP210X_GET_MDMSTS, &control, 1);
787 if (control & CONTROL_DCD)
788 return 1;
789 return 0;
790}
791
771static void cp210x_break_ctl (struct tty_struct *tty, int break_state) 792static void cp210x_break_ctl (struct tty_struct *tty, int break_state)
772{ 793{
773 struct usb_serial_port *port = tty->driver_data; 794 struct usb_serial_port *port = tty->driver_data;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 9c60d6d4908a..ebcc6d0e2e91 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1937,7 +1937,7 @@ static void ftdi_write_bulk_callback(struct urb *urb)
1937 return; 1937 return;
1938 } 1938 }
1939 /* account for transferred data */ 1939 /* account for transferred data */
1940 countback = urb->actual_length; 1940 countback = urb->transfer_buffer_length;
1941 data_offset = priv->write_offset; 1941 data_offset = priv->write_offset;
1942 if (data_offset > 0) { 1942 if (data_offset > 0) {
1943 /* Subtract the control bytes */ 1943 /* Subtract the control bytes */
@@ -1950,7 +1950,6 @@ static void ftdi_write_bulk_callback(struct urb *urb)
1950 1950
1951 if (status) { 1951 if (status) {
1952 dbg("nonzero write bulk status received: %d", status); 1952 dbg("nonzero write bulk status received: %d", status);
1953 return;
1954 } 1953 }
1955 1954
1956 usb_serial_port_softint(port); 1955 usb_serial_port_softint(port);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index cd44c68954df..0577e4b61114 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -308,6 +308,7 @@ static int option_resume(struct usb_serial *serial);
308 308
309#define DLINK_VENDOR_ID 0x1186 309#define DLINK_VENDOR_ID 0x1186
310#define DLINK_PRODUCT_DWM_652 0x3e04 310#define DLINK_PRODUCT_DWM_652 0x3e04
311#define DLINK_PRODUCT_DWM_652_U5 0xce16
311 312
312#define QISDA_VENDOR_ID 0x1da5 313#define QISDA_VENDOR_ID 0x1da5
313#define QISDA_PRODUCT_H21_4512 0x4512 314#define QISDA_PRODUCT_H21_4512 0x4512
@@ -335,6 +336,10 @@ static int option_resume(struct usb_serial *serial);
335#define AIRPLUS_VENDOR_ID 0x1011 336#define AIRPLUS_VENDOR_ID 0x1011
336#define AIRPLUS_PRODUCT_MCD650 0x3198 337#define AIRPLUS_PRODUCT_MCD650 0x3198
337 338
339/* 4G Systems products */
340#define FOUR_G_SYSTEMS_VENDOR_ID 0x1c9e
341#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
342
338static struct usb_device_id option_ids[] = { 343static struct usb_device_id option_ids[] = {
339 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 344 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
340 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, 345 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -586,6 +591,7 @@ static struct usb_device_id option_ids[] = {
586 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, 591 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
587 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, 592 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
588 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, 593 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
594 { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
589 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) }, 595 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) },
590 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) }, 596 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) },
591 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) }, 597 { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) },
@@ -597,6 +603,7 @@ static struct usb_device_id option_ids[] = {
597 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, 603 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) },
598 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, 604 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
599 { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, 605 { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
606 { USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) },
600 { } /* Terminating entry */ 607 { } /* Terminating entry */
601}; 608};
602MODULE_DEVICE_TABLE(usb, option_ids); 609MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 2211a852af9c..96774949cd30 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -433,8 +433,9 @@ static int corgi_bl_update_status(struct backlight_device *bd)
433 433
434 if (corgibl_flags & CORGIBL_SUSPENDED) 434 if (corgibl_flags & CORGIBL_SUSPENDED)
435 intensity = 0; 435 intensity = 0;
436 if (corgibl_flags & CORGIBL_BATTLOW) 436
437 intensity &= lcd->limit_mask; 437 if ((corgibl_flags & CORGIBL_BATTLOW) && intensity > lcd->limit_mask)
438 intensity = lcd->limit_mask;
438 439
439 return corgi_bl_set_intensity(lcd, intensity); 440 return corgi_bl_set_intensity(lcd, intensity);
440} 441}
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index b6449470106c..a482dd7b0311 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -56,7 +56,7 @@ static int fb_notifier_callback(struct notifier_block *self,
56 56
57static int lcd_register_fb(struct lcd_device *ld) 57static int lcd_register_fb(struct lcd_device *ld)
58{ 58{
59 memset(&ld->fb_notif, 0, sizeof(&ld->fb_notif)); 59 memset(&ld->fb_notif, 0, sizeof(ld->fb_notif));
60 ld->fb_notif.notifier_call = fb_notifier_callback; 60 ld->fb_notif.notifier_call = fb_notifier_callback;
61 return fb_register_client(&ld->fb_notif); 61 return fb_register_client(&ld->fb_notif);
62} 62}
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index d065894ce38f..ea1fd3f47511 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -554,11 +554,11 @@ static int fb_check_var(struct fb_var_screeninfo *var,
554 var->transp.length = 0; 554 var->transp.length = 0;
555 break; 555 break;
556 case 16: /* RGB 565 */ 556 case 16: /* RGB 565 */
557 var->red.offset = 0; 557 var->red.offset = 11;
558 var->red.length = 5; 558 var->red.length = 5;
559 var->green.offset = 5; 559 var->green.offset = 5;
560 var->green.length = 6; 560 var->green.length = 6;
561 var->blue.offset = 11; 561 var->blue.offset = 0;
562 var->blue.length = 5; 562 var->blue.length = 5;
563 var->transp.offset = 0; 563 var->transp.offset = 0;
564 var->transp.length = 0; 564 var->transp.length = 0;
@@ -591,7 +591,7 @@ static int __devexit fb_remove(struct platform_device *dev)
591 unregister_framebuffer(info); 591 unregister_framebuffer(info);
592 fb_dealloc_cmap(&info->cmap); 592 fb_dealloc_cmap(&info->cmap);
593 dma_free_coherent(NULL, par->databuf_sz + PAGE_SIZE, 593 dma_free_coherent(NULL, par->databuf_sz + PAGE_SIZE,
594 info->screen_base, 594 info->screen_base - PAGE_SIZE,
595 info->fix.smem_start); 595 info->fix.smem_start);
596 free_irq(par->irq, par); 596 free_irq(par->irq, par);
597 clk_disable(par->lcdc_clk); 597 clk_disable(par->lcdc_clk);
@@ -704,7 +704,7 @@ static int __init fb_probe(struct platform_device *device)
704 704
705 if (i == ARRAY_SIZE(known_lcd_panels)) { 705 if (i == ARRAY_SIZE(known_lcd_panels)) {
706 dev_err(&device->dev, "GLCD: No valid panel found\n"); 706 dev_err(&device->dev, "GLCD: No valid panel found\n");
707 ret = ENODEV; 707 ret = -ENODEV;
708 goto err_clk_disable; 708 goto err_clk_disable;
709 } else 709 } else
710 dev_info(&device->dev, "GLCD: Found %s panel\n", 710 dev_info(&device->dev, "GLCD: Found %s panel\n",
@@ -749,6 +749,7 @@ static int __init fb_probe(struct platform_device *device)
749 (PAGE_SIZE - par->palette_sz); 749 (PAGE_SIZE - par->palette_sz);
750 750
751 /* the rest of the frame buffer is pixel data */ 751 /* the rest of the frame buffer is pixel data */
752 da8xx_fb_info->screen_base = par->v_palette_base + par->palette_sz;
752 da8xx_fb_fix.smem_start = par->p_palette_base + par->palette_sz; 753 da8xx_fb_fix.smem_start = par->p_palette_base + par->palette_sz;
753 da8xx_fb_fix.smem_len = par->databuf_sz - par->palette_sz; 754 da8xx_fb_fix.smem_len = par->databuf_sz - par->palette_sz;
754 da8xx_fb_fix.line_length = (lcdc_info->width * lcd_cfg->bpp) / 8; 755 da8xx_fb_fix.line_length = (lcdc_info->width * lcd_cfg->bpp) / 8;
@@ -787,6 +788,8 @@ static int __init fb_probe(struct platform_device *device)
787 da8xx_fb_info->var = da8xx_fb_var; 788 da8xx_fb_info->var = da8xx_fb_var;
788 da8xx_fb_info->fbops = &da8xx_fb_ops; 789 da8xx_fb_info->fbops = &da8xx_fb_ops;
789 da8xx_fb_info->pseudo_palette = par->pseudo_palette; 790 da8xx_fb_info->pseudo_palette = par->pseudo_palette;
791 da8xx_fb_info->fix.visual = (da8xx_fb_info->var.bits_per_pixel <= 8) ?
792 FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
790 793
791 ret = fb_alloc_cmap(&da8xx_fb_info->cmap, PALETTE_SIZE, 0); 794 ret = fb_alloc_cmap(&da8xx_fb_info->cmap, PALETTE_SIZE, 0);
792 if (ret) 795 if (ret)
@@ -825,7 +828,7 @@ err_free_irq:
825 828
826err_release_fb_mem: 829err_release_fb_mem:
827 dma_free_coherent(NULL, par->databuf_sz + PAGE_SIZE, 830 dma_free_coherent(NULL, par->databuf_sz + PAGE_SIZE,
828 da8xx_fb_info->screen_base, 831 da8xx_fb_info->screen_base - PAGE_SIZE,
829 da8xx_fb_info->fix.smem_start); 832 da8xx_fb_info->fix.smem_start);
830 833
831err_release_fb: 834err_release_fb:
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c
index 1a83709f9611..f67db4268374 100644
--- a/drivers/video/gbefb.c
+++ b/drivers/video/gbefb.c
@@ -1147,7 +1147,7 @@ static int __init gbefb_probe(struct platform_device *p_dev)
1147 gbefb_setup(options); 1147 gbefb_setup(options);
1148#endif 1148#endif
1149 1149
1150 if (!request_region(GBE_BASE, sizeof(struct sgi_gbe), "GBE")) { 1150 if (!request_mem_region(GBE_BASE, sizeof(struct sgi_gbe), "GBE")) {
1151 printk(KERN_ERR "gbefb: couldn't reserve mmio region\n"); 1151 printk(KERN_ERR "gbefb: couldn't reserve mmio region\n");
1152 ret = -EBUSY; 1152 ret = -EBUSY;
1153 goto out_release_framebuffer; 1153 goto out_release_framebuffer;
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index f24d04132eda..4d227b152001 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -317,7 +317,7 @@ static int __devexit pnx4008_wdt_remove(struct platform_device *pdev)
317 317
318static struct platform_driver platform_wdt_driver = { 318static struct platform_driver platform_wdt_driver = {
319 .driver = { 319 .driver = {
320 .name = "watchdog", 320 .name = "pnx4008-watchdog",
321 .owner = THIS_MODULE, 321 .owner = THIS_MODULE,
322 }, 322 },
323 .probe = pnx4008_wdt_probe, 323 .probe = pnx4008_wdt_probe,
@@ -352,4 +352,4 @@ MODULE_PARM_DESC(nowayout,
352 352
353MODULE_LICENSE("GPL"); 353MODULE_LICENSE("GPL");
354MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); 354MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
355MODULE_ALIAS("platform:watchdog"); 355MODULE_ALIAS("platform:pnx4008-watchdog");
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index f6cccc9df022..bf12d06b5877 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -62,7 +62,7 @@ extern unsigned int idt_cpu_freq;
62static int timeout = WATCHDOG_TIMEOUT; 62static int timeout = WATCHDOG_TIMEOUT;
63module_param(timeout, int, 0); 63module_param(timeout, int, 0);
64MODULE_PARM_DESC(timeout, "Watchdog timeout value, in seconds (default=" 64MODULE_PARM_DESC(timeout, "Watchdog timeout value, in seconds (default="
65 WATCHDOG_TIMEOUT ")"); 65 __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
66 66
67static int nowayout = WATCHDOG_NOWAYOUT; 67static int nowayout = WATCHDOG_NOWAYOUT;
68module_param(nowayout, int, 0); 68module_param(nowayout, int, 0);
@@ -276,7 +276,7 @@ static int __devinit rc32434_wdt_probe(struct platform_device *pdev)
276 return -ENODEV; 276 return -ENODEV;
277 } 277 }
278 278
279 wdt_reg = ioremap_nocache(r->start, r->end - r->start); 279 wdt_reg = ioremap_nocache(r->start, resource_size(r));
280 if (!wdt_reg) { 280 if (!wdt_reg) {
281 printk(KERN_ERR PFX "failed to remap I/O resources\n"); 281 printk(KERN_ERR PFX "failed to remap I/O resources\n");
282 return -ENXIO; 282 return -ENXIO;
diff --git a/fs/9p/cache.c b/fs/9p/cache.c
index 51c94e26a346..e777961939f3 100644
--- a/fs/9p/cache.c
+++ b/fs/9p/cache.c
@@ -343,18 +343,7 @@ int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
343 343
344 BUG_ON(!vcookie->fscache); 344 BUG_ON(!vcookie->fscache);
345 345
346 if (PageFsCache(page)) { 346 return fscache_maybe_release_page(vcookie->fscache, page, gfp);
347 if (fscache_check_page_write(vcookie->fscache, page)) {
348 if (!(gfp & __GFP_WAIT))
349 return 0;
350 fscache_wait_on_page_write(vcookie->fscache, page);
351 }
352
353 fscache_uncache_page(vcookie->fscache, page);
354 ClearPageFsCache(page);
355 }
356
357 return 1;
358} 347}
359 348
360void __v9fs_fscache_invalidate_page(struct page *page) 349void __v9fs_fscache_invalidate_page(struct page *page)
@@ -368,7 +357,6 @@ void __v9fs_fscache_invalidate_page(struct page *page)
368 fscache_wait_on_page_write(vcookie->fscache, page); 357 fscache_wait_on_page_write(vcookie->fscache, page);
369 BUG_ON(!PageLocked(page)); 358 BUG_ON(!PageLocked(page));
370 fscache_uncache_page(vcookie->fscache, page); 359 fscache_uncache_page(vcookie->fscache, page);
371 ClearPageFsCache(page);
372 } 360 }
373} 361}
374 362
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 681c2a7b013f..39b301662f22 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -315,7 +315,6 @@ static void afs_invalidatepage(struct page *page, unsigned long offset)
315 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); 315 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
316 fscache_wait_on_page_write(vnode->cache, page); 316 fscache_wait_on_page_write(vnode->cache, page);
317 fscache_uncache_page(vnode->cache, page); 317 fscache_uncache_page(vnode->cache, page);
318 ClearPageFsCache(page);
319 } 318 }
320#endif 319#endif
321 320
@@ -349,17 +348,9 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
349 /* deny if page is being written to the cache and the caller hasn't 348 /* deny if page is being written to the cache and the caller hasn't
350 * elected to wait */ 349 * elected to wait */
351#ifdef CONFIG_AFS_FSCACHE 350#ifdef CONFIG_AFS_FSCACHE
352 if (PageFsCache(page)) { 351 if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) {
353 if (fscache_check_page_write(vnode->cache, page)) { 352 _leave(" = F [cache busy]");
354 if (!(gfp_flags & __GFP_WAIT)) { 353 return 0;
355 _leave(" = F [cache busy]");
356 return 0;
357 }
358 fscache_wait_on_page_write(vnode->cache, page);
359 }
360
361 fscache_uncache_page(vnode->cache, page);
362 ClearPageFsCache(page);
363 } 354 }
364#endif 355#endif
365 356
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 431accd475a7..27089311fbea 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -114,8 +114,9 @@ nomem_lookup_data:
114 114
115/* 115/*
116 * attempt to look up the nominated node in this cache 116 * attempt to look up the nominated node in this cache
117 * - return -ETIMEDOUT to be scheduled again
117 */ 118 */
118static void cachefiles_lookup_object(struct fscache_object *_object) 119static int cachefiles_lookup_object(struct fscache_object *_object)
119{ 120{
120 struct cachefiles_lookup_data *lookup_data; 121 struct cachefiles_lookup_data *lookup_data;
121 struct cachefiles_object *parent, *object; 122 struct cachefiles_object *parent, *object;
@@ -145,13 +146,15 @@ static void cachefiles_lookup_object(struct fscache_object *_object)
145 object->fscache.cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) 146 object->fscache.cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
146 cachefiles_attr_changed(&object->fscache); 147 cachefiles_attr_changed(&object->fscache);
147 148
148 if (ret < 0) { 149 if (ret < 0 && ret != -ETIMEDOUT) {
149 printk(KERN_WARNING "CacheFiles: Lookup failed error %d\n", 150 if (ret != -ENOBUFS)
150 ret); 151 printk(KERN_WARNING
152 "CacheFiles: Lookup failed error %d\n", ret);
151 fscache_object_lookup_error(&object->fscache); 153 fscache_object_lookup_error(&object->fscache);
152 } 154 }
153 155
154 _leave(" [%d]", ret); 156 _leave(" [%d]", ret);
157 return ret;
155} 158}
156 159
157/* 160/*
@@ -331,6 +334,7 @@ static void cachefiles_put_object(struct fscache_object *_object)
331 } 334 }
332 335
333 cache = object->fscache.cache; 336 cache = object->fscache.cache;
337 fscache_object_destroy(&object->fscache);
334 kmem_cache_free(cachefiles_object_jar, object); 338 kmem_cache_free(cachefiles_object_jar, object);
335 fscache_object_destroyed(cache); 339 fscache_object_destroyed(cache);
336 } 340 }
@@ -403,12 +407,26 @@ static int cachefiles_attr_changed(struct fscache_object *_object)
403 if (oi_size == ni_size) 407 if (oi_size == ni_size)
404 return 0; 408 return 0;
405 409
406 newattrs.ia_size = ni_size;
407 newattrs.ia_valid = ATTR_SIZE;
408
409 cachefiles_begin_secure(cache, &saved_cred); 410 cachefiles_begin_secure(cache, &saved_cred);
410 mutex_lock(&object->backer->d_inode->i_mutex); 411 mutex_lock(&object->backer->d_inode->i_mutex);
412
413 /* if there's an extension to a partial page at the end of the backing
414 * file, we need to discard the partial page so that we pick up new
415 * data after it */
416 if (oi_size & ~PAGE_MASK && ni_size > oi_size) {
417 _debug("discard tail %llx", oi_size);
418 newattrs.ia_valid = ATTR_SIZE;
419 newattrs.ia_size = oi_size & PAGE_MASK;
420 ret = notify_change(object->backer, &newattrs);
421 if (ret < 0)
422 goto truncate_failed;
423 }
424
425 newattrs.ia_valid = ATTR_SIZE;
426 newattrs.ia_size = ni_size;
411 ret = notify_change(object->backer, &newattrs); 427 ret = notify_change(object->backer, &newattrs);
428
429truncate_failed:
412 mutex_unlock(&object->backer->d_inode->i_mutex); 430 mutex_unlock(&object->backer->d_inode->i_mutex);
413 cachefiles_end_secure(cache, saved_cred); 431 cachefiles_end_secure(cache, saved_cred);
414 432
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 4ce818ae39ea..14ac4806e291 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -21,17 +21,81 @@
21#include <linux/security.h> 21#include <linux/security.h>
22#include "internal.h" 22#include "internal.h"
23 23
24static int cachefiles_wait_bit(void *flags) 24#define CACHEFILES_KEYBUF_SIZE 512
25
26/*
27 * dump debugging info about an object
28 */
29static noinline
30void __cachefiles_printk_object(struct cachefiles_object *object,
31 const char *prefix,
32 u8 *keybuf)
25{ 33{
26 schedule(); 34 struct fscache_cookie *cookie;
27 return 0; 35 unsigned keylen, loop;
36
37 printk(KERN_ERR "%sobject: OBJ%x\n",
38 prefix, object->fscache.debug_id);
39 printk(KERN_ERR "%sobjstate=%s fl=%lx swfl=%lx ev=%lx[%lx]\n",
40 prefix, fscache_object_states[object->fscache.state],
41 object->fscache.flags, object->fscache.work.flags,
42 object->fscache.events,
43 object->fscache.event_mask & FSCACHE_OBJECT_EVENTS_MASK);
44 printk(KERN_ERR "%sops=%u inp=%u exc=%u\n",
45 prefix, object->fscache.n_ops, object->fscache.n_in_progress,
46 object->fscache.n_exclusive);
47 printk(KERN_ERR "%sparent=%p\n",
48 prefix, object->fscache.parent);
49
50 spin_lock(&object->fscache.lock);
51 cookie = object->fscache.cookie;
52 if (cookie) {
53 printk(KERN_ERR "%scookie=%p [pr=%p nd=%p fl=%lx]\n",
54 prefix,
55 object->fscache.cookie,
56 object->fscache.cookie->parent,
57 object->fscache.cookie->netfs_data,
58 object->fscache.cookie->flags);
59 if (keybuf)
60 keylen = cookie->def->get_key(cookie->netfs_data, keybuf,
61 CACHEFILES_KEYBUF_SIZE);
62 else
63 keylen = 0;
64 } else {
65 printk(KERN_ERR "%scookie=NULL\n", prefix);
66 keylen = 0;
67 }
68 spin_unlock(&object->fscache.lock);
69
70 if (keylen) {
71 printk(KERN_ERR "%skey=[%u] '", prefix, keylen);
72 for (loop = 0; loop < keylen; loop++)
73 printk("%02x", keybuf[loop]);
74 printk("'\n");
75 }
76}
77
78/*
79 * dump debugging info about a pair of objects
80 */
81static noinline void cachefiles_printk_object(struct cachefiles_object *object,
82 struct cachefiles_object *xobject)
83{
84 u8 *keybuf;
85
86 keybuf = kmalloc(CACHEFILES_KEYBUF_SIZE, GFP_NOIO);
87 if (object)
88 __cachefiles_printk_object(object, "", keybuf);
89 if (xobject)
90 __cachefiles_printk_object(xobject, "x", keybuf);
91 kfree(keybuf);
28} 92}
29 93
30/* 94/*
31 * record the fact that an object is now active 95 * record the fact that an object is now active
32 */ 96 */
33static void cachefiles_mark_object_active(struct cachefiles_cache *cache, 97static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
34 struct cachefiles_object *object) 98 struct cachefiles_object *object)
35{ 99{
36 struct cachefiles_object *xobject; 100 struct cachefiles_object *xobject;
37 struct rb_node **_p, *_parent = NULL; 101 struct rb_node **_p, *_parent = NULL;
@@ -42,8 +106,11 @@ static void cachefiles_mark_object_active(struct cachefiles_cache *cache,
42try_again: 106try_again:
43 write_lock(&cache->active_lock); 107 write_lock(&cache->active_lock);
44 108
45 if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) 109 if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) {
110 printk(KERN_ERR "CacheFiles: Error: Object already active\n");
111 cachefiles_printk_object(object, NULL);
46 BUG(); 112 BUG();
113 }
47 114
48 dentry = object->dentry; 115 dentry = object->dentry;
49 _p = &cache->active_nodes.rb_node; 116 _p = &cache->active_nodes.rb_node;
@@ -66,8 +133,8 @@ try_again:
66 rb_insert_color(&object->active_node, &cache->active_nodes); 133 rb_insert_color(&object->active_node, &cache->active_nodes);
67 134
68 write_unlock(&cache->active_lock); 135 write_unlock(&cache->active_lock);
69 _leave(""); 136 _leave(" = 0");
70 return; 137 return 0;
71 138
72 /* an old object from a previous incarnation is hogging the slot - we 139 /* an old object from a previous incarnation is hogging the slot - we
73 * need to wait for it to be destroyed */ 140 * need to wait for it to be destroyed */
@@ -76,44 +143,70 @@ wait_for_old_object:
76 printk(KERN_ERR "\n"); 143 printk(KERN_ERR "\n");
77 printk(KERN_ERR "CacheFiles: Error:" 144 printk(KERN_ERR "CacheFiles: Error:"
78 " Unexpected object collision\n"); 145 " Unexpected object collision\n");
79 printk(KERN_ERR "xobject: OBJ%x\n", 146 cachefiles_printk_object(object, xobject);
80 xobject->fscache.debug_id);
81 printk(KERN_ERR "xobjstate=%s\n",
82 fscache_object_states[xobject->fscache.state]);
83 printk(KERN_ERR "xobjflags=%lx\n", xobject->fscache.flags);
84 printk(KERN_ERR "xobjevent=%lx [%lx]\n",
85 xobject->fscache.events, xobject->fscache.event_mask);
86 printk(KERN_ERR "xops=%u inp=%u exc=%u\n",
87 xobject->fscache.n_ops, xobject->fscache.n_in_progress,
88 xobject->fscache.n_exclusive);
89 printk(KERN_ERR "xcookie=%p [pr=%p nd=%p fl=%lx]\n",
90 xobject->fscache.cookie,
91 xobject->fscache.cookie->parent,
92 xobject->fscache.cookie->netfs_data,
93 xobject->fscache.cookie->flags);
94 printk(KERN_ERR "xparent=%p\n",
95 xobject->fscache.parent);
96 printk(KERN_ERR "object: OBJ%x\n",
97 object->fscache.debug_id);
98 printk(KERN_ERR "cookie=%p [pr=%p nd=%p fl=%lx]\n",
99 object->fscache.cookie,
100 object->fscache.cookie->parent,
101 object->fscache.cookie->netfs_data,
102 object->fscache.cookie->flags);
103 printk(KERN_ERR "parent=%p\n",
104 object->fscache.parent);
105 BUG(); 147 BUG();
106 } 148 }
107 atomic_inc(&xobject->usage); 149 atomic_inc(&xobject->usage);
108 write_unlock(&cache->active_lock); 150 write_unlock(&cache->active_lock);
109 151
110 _debug(">>> wait"); 152 if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
111 wait_on_bit(&xobject->flags, CACHEFILES_OBJECT_ACTIVE, 153 wait_queue_head_t *wq;
112 cachefiles_wait_bit, TASK_UNINTERRUPTIBLE); 154
113 _debug("<<< waited"); 155 signed long timeout = 60 * HZ;
156 wait_queue_t wait;
157 bool requeue;
158
159 /* if the object we're waiting for is queued for processing,
160 * then just put ourselves on the queue behind it */
161 if (slow_work_is_queued(&xobject->fscache.work)) {
162 _debug("queue OBJ%x behind OBJ%x immediately",
163 object->fscache.debug_id,
164 xobject->fscache.debug_id);
165 goto requeue;
166 }
167
168 /* otherwise we sleep until either the object we're waiting for
169 * is done, or the slow-work facility wants the thread back to
170 * do other work */
171 wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE);
172 init_wait(&wait);
173 requeue = false;
174 do {
175 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
176 if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags))
177 break;
178 requeue = slow_work_sleep_till_thread_needed(
179 &object->fscache.work, &timeout);
180 } while (timeout > 0 && !requeue);
181 finish_wait(wq, &wait);
182
183 if (requeue &&
184 test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
185 _debug("queue OBJ%x behind OBJ%x after wait",
186 object->fscache.debug_id,
187 xobject->fscache.debug_id);
188 goto requeue;
189 }
190
191 if (timeout <= 0) {
192 printk(KERN_ERR "\n");
193 printk(KERN_ERR "CacheFiles: Error: Overlong"
194 " wait for old active object to go away\n");
195 cachefiles_printk_object(object, xobject);
196 goto requeue;
197 }
198 }
199
200 ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
114 201
115 cache->cache.ops->put_object(&xobject->fscache); 202 cache->cache.ops->put_object(&xobject->fscache);
116 goto try_again; 203 goto try_again;
204
205requeue:
206 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
207 cache->cache.ops->put_object(&xobject->fscache);
208 _leave(" = -ETIMEDOUT");
209 return -ETIMEDOUT;
117} 210}
118 211
119/* 212/*
@@ -254,7 +347,7 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
254 347
255 dir = dget_parent(object->dentry); 348 dir = dget_parent(object->dentry);
256 349
257 mutex_lock(&dir->d_inode->i_mutex); 350 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
258 ret = cachefiles_bury_object(cache, dir, object->dentry); 351 ret = cachefiles_bury_object(cache, dir, object->dentry);
259 352
260 dput(dir); 353 dput(dir);
@@ -307,7 +400,7 @@ lookup_again:
307 /* search the current directory for the element name */ 400 /* search the current directory for the element name */
308 _debug("lookup '%s'", name); 401 _debug("lookup '%s'", name);
309 402
310 mutex_lock(&dir->d_inode->i_mutex); 403 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
311 404
312 start = jiffies; 405 start = jiffies;
313 next = lookup_one_len(name, dir, nlen); 406 next = lookup_one_len(name, dir, nlen);
@@ -418,12 +511,15 @@ lookup_again:
418 } 511 }
419 512
420 /* note that we're now using this object */ 513 /* note that we're now using this object */
421 cachefiles_mark_object_active(cache, object); 514 ret = cachefiles_mark_object_active(cache, object);
422 515
423 mutex_unlock(&dir->d_inode->i_mutex); 516 mutex_unlock(&dir->d_inode->i_mutex);
424 dput(dir); 517 dput(dir);
425 dir = NULL; 518 dir = NULL;
426 519
520 if (ret == -ETIMEDOUT)
521 goto mark_active_timed_out;
522
427 _debug("=== OBTAINED_OBJECT ==="); 523 _debug("=== OBTAINED_OBJECT ===");
428 524
429 if (object->new) { 525 if (object->new) {
@@ -467,6 +563,10 @@ create_error:
467 cachefiles_io_error(cache, "Create/mkdir failed"); 563 cachefiles_io_error(cache, "Create/mkdir failed");
468 goto error; 564 goto error;
469 565
566mark_active_timed_out:
567 _debug("mark active timed out");
568 goto release_dentry;
569
470check_error: 570check_error:
471 _debug("check error %d", ret); 571 _debug("check error %d", ret);
472 write_lock(&cache->active_lock); 572 write_lock(&cache->active_lock);
@@ -474,7 +574,7 @@ check_error:
474 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags); 574 clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
475 wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE); 575 wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE);
476 write_unlock(&cache->active_lock); 576 write_unlock(&cache->active_lock);
477 577release_dentry:
478 dput(object->dentry); 578 dput(object->dentry);
479 object->dentry = NULL; 579 object->dentry = NULL;
480 goto error_out; 580 goto error_out;
@@ -495,9 +595,6 @@ error:
495error_out2: 595error_out2:
496 dput(dir); 596 dput(dir);
497error_out: 597error_out:
498 if (ret == -ENOSPC)
499 ret = -ENOBUFS;
500
501 _leave(" = error %d", -ret); 598 _leave(" = error %d", -ret);
502 return ret; 599 return ret;
503} 600}
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index a69787e7dd96..a6c8c6fe8df9 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/mount.h> 12#include <linux/mount.h>
13#include <linux/file.h> 13#include <linux/file.h>
14#include <linux/ima.h>
14#include "internal.h" 15#include "internal.h"
15 16
16/* 17/*
@@ -40,8 +41,10 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
40 41
41 _debug("--- monitor %p %lx ---", page, page->flags); 42 _debug("--- monitor %p %lx ---", page, page->flags);
42 43
43 if (!PageUptodate(page) && !PageError(page)) 44 if (!PageUptodate(page) && !PageError(page)) {
44 dump_stack(); 45 /* unlocked, not uptodate and not erronous? */
46 _debug("page probably truncated");
47 }
45 48
46 /* remove from the waitqueue */ 49 /* remove from the waitqueue */
47 list_del(&wait->task_list); 50 list_del(&wait->task_list);
@@ -61,6 +64,84 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
61} 64}
62 65
63/* 66/*
67 * handle a probably truncated page
68 * - check to see if the page is still relevant and reissue the read if
69 * possible
70 * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
71 * must wait again and 0 if successful
72 */
73static int cachefiles_read_reissue(struct cachefiles_object *object,
74 struct cachefiles_one_read *monitor)
75{
76 struct address_space *bmapping = object->backer->d_inode->i_mapping;
77 struct page *backpage = monitor->back_page, *backpage2;
78 int ret;
79
80 kenter("{ino=%lx},{%lx,%lx}",
81 object->backer->d_inode->i_ino,
82 backpage->index, backpage->flags);
83
84 /* skip if the page was truncated away completely */
85 if (backpage->mapping != bmapping) {
86 kleave(" = -ENODATA [mapping]");
87 return -ENODATA;
88 }
89
90 backpage2 = find_get_page(bmapping, backpage->index);
91 if (!backpage2) {
92 kleave(" = -ENODATA [gone]");
93 return -ENODATA;
94 }
95
96 if (backpage != backpage2) {
97 put_page(backpage2);
98 kleave(" = -ENODATA [different]");
99 return -ENODATA;
100 }
101
102 /* the page is still there and we already have a ref on it, so we don't
103 * need a second */
104 put_page(backpage2);
105
106 INIT_LIST_HEAD(&monitor->op_link);
107 add_page_wait_queue(backpage, &monitor->monitor);
108
109 if (trylock_page(backpage)) {
110 ret = -EIO;
111 if (PageError(backpage))
112 goto unlock_discard;
113 ret = 0;
114 if (PageUptodate(backpage))
115 goto unlock_discard;
116
117 kdebug("reissue read");
118 ret = bmapping->a_ops->readpage(NULL, backpage);
119 if (ret < 0)
120 goto unlock_discard;
121 }
122
123 /* but the page may have been read before the monitor was installed, so
124 * the monitor may miss the event - so we have to ensure that we do get
125 * one in such a case */
126 if (trylock_page(backpage)) {
127 _debug("jumpstart %p {%lx}", backpage, backpage->flags);
128 unlock_page(backpage);
129 }
130
131 /* it'll reappear on the todo list */
132 kleave(" = -EINPROGRESS");
133 return -EINPROGRESS;
134
135unlock_discard:
136 unlock_page(backpage);
137 spin_lock_irq(&object->work_lock);
138 list_del(&monitor->op_link);
139 spin_unlock_irq(&object->work_lock);
140 kleave(" = %d", ret);
141 return ret;
142}
143
144/*
64 * copy data from backing pages to netfs pages to complete a read operation 145 * copy data from backing pages to netfs pages to complete a read operation
65 * - driven by FS-Cache's thread pool 146 * - driven by FS-Cache's thread pool
66 */ 147 */
@@ -92,20 +173,26 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
92 173
93 _debug("- copy {%lu}", monitor->back_page->index); 174 _debug("- copy {%lu}", monitor->back_page->index);
94 175
95 error = -EIO; 176 recheck:
96 if (PageUptodate(monitor->back_page)) { 177 if (PageUptodate(monitor->back_page)) {
97 copy_highpage(monitor->netfs_page, monitor->back_page); 178 copy_highpage(monitor->netfs_page, monitor->back_page);
98 179
99 pagevec_add(&pagevec, monitor->netfs_page); 180 pagevec_add(&pagevec, monitor->netfs_page);
100 fscache_mark_pages_cached(monitor->op, &pagevec); 181 fscache_mark_pages_cached(monitor->op, &pagevec);
101 error = 0; 182 error = 0;
102 } 183 } else if (!PageError(monitor->back_page)) {
103 184 /* the page has probably been truncated */
104 if (error) 185 error = cachefiles_read_reissue(object, monitor);
186 if (error == -EINPROGRESS)
187 goto next;
188 goto recheck;
189 } else {
105 cachefiles_io_error_obj( 190 cachefiles_io_error_obj(
106 object, 191 object,
107 "Readpage failed on backing file %lx", 192 "Readpage failed on backing file %lx",
108 (unsigned long) monitor->back_page->flags); 193 (unsigned long) monitor->back_page->flags);
194 error = -EIO;
195 }
109 196
110 page_cache_release(monitor->back_page); 197 page_cache_release(monitor->back_page);
111 198
@@ -114,6 +201,7 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
114 fscache_put_retrieval(op); 201 fscache_put_retrieval(op);
115 kfree(monitor); 202 kfree(monitor);
116 203
204 next:
117 /* let the thread pool have some air occasionally */ 205 /* let the thread pool have some air occasionally */
118 max--; 206 max--;
119 if (max < 0 || need_resched()) { 207 if (max < 0 || need_resched()) {
@@ -333,7 +421,8 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
333 421
334 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; 422 shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
335 423
336 op->op.flags = FSCACHE_OP_FAST; 424 op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
425 op->op.flags |= FSCACHE_OP_FAST;
337 op->op.processor = cachefiles_read_copier; 426 op->op.processor = cachefiles_read_copier;
338 427
339 pagevec_init(&pagevec, 0); 428 pagevec_init(&pagevec, 0);
@@ -639,7 +728,8 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
639 728
640 pagevec_init(&pagevec, 0); 729 pagevec_init(&pagevec, 0);
641 730
642 op->op.flags = FSCACHE_OP_FAST; 731 op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
732 op->op.flags |= FSCACHE_OP_FAST;
643 op->op.processor = cachefiles_read_copier; 733 op->op.processor = cachefiles_read_copier;
644 734
645 INIT_LIST_HEAD(&backpages); 735 INIT_LIST_HEAD(&backpages);
@@ -801,7 +891,8 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
801 struct cachefiles_cache *cache; 891 struct cachefiles_cache *cache;
802 mm_segment_t old_fs; 892 mm_segment_t old_fs;
803 struct file *file; 893 struct file *file;
804 loff_t pos; 894 loff_t pos, eof;
895 size_t len;
805 void *data; 896 void *data;
806 int ret; 897 int ret;
807 898
@@ -832,18 +923,33 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
832 if (IS_ERR(file)) { 923 if (IS_ERR(file)) {
833 ret = PTR_ERR(file); 924 ret = PTR_ERR(file);
834 } else { 925 } else {
926 ima_counts_get(file);
835 ret = -EIO; 927 ret = -EIO;
836 if (file->f_op->write) { 928 if (file->f_op->write) {
837 pos = (loff_t) page->index << PAGE_SHIFT; 929 pos = (loff_t) page->index << PAGE_SHIFT;
930
931 /* we mustn't write more data than we have, so we have
932 * to beware of a partial page at EOF */
933 eof = object->fscache.store_limit_l;
934 len = PAGE_SIZE;
935 if (eof & ~PAGE_MASK) {
936 ASSERTCMP(pos, <, eof);
937 if (eof - pos < PAGE_SIZE) {
938 _debug("cut short %llx to %llx",
939 pos, eof);
940 len = eof - pos;
941 ASSERTCMP(pos + len, ==, eof);
942 }
943 }
944
838 data = kmap(page); 945 data = kmap(page);
839 old_fs = get_fs(); 946 old_fs = get_fs();
840 set_fs(KERNEL_DS); 947 set_fs(KERNEL_DS);
841 ret = file->f_op->write( 948 ret = file->f_op->write(
842 file, (const void __user *) data, PAGE_SIZE, 949 file, (const void __user *) data, len, &pos);
843 &pos);
844 set_fs(old_fs); 950 set_fs(old_fs);
845 kunmap(page); 951 kunmap(page);
846 if (ret != PAGE_SIZE) 952 if (ret != len)
847 ret = -EIO; 953 ret = -EIO;
848 } 954 }
849 fput(file); 955 fput(file);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 145540a316ab..094ea65afc85 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,12 @@
1Version 1.61
2------------
3Fix append problem to Samba servers (files opened with O_APPEND could
4have duplicated data). Fix oops in cifs_lookup. Workaround problem
5mounting to OS/400 Netserve. Fix oops in cifs_get_tcp_session.
6Disable use of server inode numbers when server only
7partially supports them (e.g. for one server querying inode numbers on
8FindFirst fails but QPathInfo queries works).
9
1Version 1.60 10Version 1.60
2------------- 11-------------
3Fix memory leak in reconnect. Fix oops in DFS mount error path. 12Fix memory leak in reconnect. Fix oops in DFS mount error path.
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 9a5e4f5f3122..29f1da761bbf 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1037,7 +1037,7 @@ init_cifs(void)
1037 if (rc) 1037 if (rc)
1038 goto out_unregister_key_type; 1038 goto out_unregister_key_type;
1039#endif 1039#endif
1040 rc = slow_work_register_user(); 1040 rc = slow_work_register_user(THIS_MODULE);
1041 if (rc) 1041 if (rc)
1042 goto out_unregister_resolver_key; 1042 goto out_unregister_resolver_key;
1043 1043
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 627a60a6c1b1..1f42f772865a 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -214,8 +214,6 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
214 posix_flags |= SMB_O_EXCL; 214 posix_flags |= SMB_O_EXCL;
215 if (oflags & O_TRUNC) 215 if (oflags & O_TRUNC)
216 posix_flags |= SMB_O_TRUNC; 216 posix_flags |= SMB_O_TRUNC;
217 if (oflags & O_APPEND)
218 posix_flags |= SMB_O_APPEND;
219 if (oflags & O_SYNC) 217 if (oflags & O_SYNC)
220 posix_flags |= SMB_O_SYNC; 218 posix_flags |= SMB_O_SYNC;
221 if (oflags & O_DIRECTORY) 219 if (oflags & O_DIRECTORY)
@@ -643,9 +641,9 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
643 * O_EXCL: optimize away the lookup, but don't hash the dentry. Let 641 * O_EXCL: optimize away the lookup, but don't hash the dentry. Let
644 * the VFS handle the create. 642 * the VFS handle the create.
645 */ 643 */
646 if (nd->flags & LOOKUP_EXCL) { 644 if (nd && (nd->flags & LOOKUP_EXCL)) {
647 d_instantiate(direntry, NULL); 645 d_instantiate(direntry, NULL);
648 return 0; 646 return NULL;
649 } 647 }
650 648
651 /* can not grab the rename sem here since it would 649 /* can not grab the rename sem here since it would
@@ -675,7 +673,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
675 * reduction in network traffic in the other paths. 673 * reduction in network traffic in the other paths.
676 */ 674 */
677 if (pTcon->unix_ext) { 675 if (pTcon->unix_ext) {
678 if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) && 676 if (nd && !(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) &&
679 (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open && 677 (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
680 (nd->intent.open.flags & O_CREAT)) { 678 (nd->intent.open.flags & O_CREAT)) {
681 rc = cifs_posix_open(full_path, &newInode, nd->path.mnt, 679 rc = cifs_posix_open(full_path, &newInode, nd->path.mnt,
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 1e25efcb55c8..d27d4ec6579b 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -720,7 +720,7 @@ void
720cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb) 720cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
721{ 721{
722 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { 722 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
723 cifs_sb->mnt_cifs_flags &= CIFS_MOUNT_SERVER_INUM; 723 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
724 cERROR(1, ("Autodisabling the use of server inode numbers on " 724 cERROR(1, ("Autodisabling the use of server inode numbers on "
725 "%s. This server doesn't seem to support them " 725 "%s. This server doesn't seem to support them "
726 "properly. Hardlinks will not be recognized on this " 726 "properly. Hardlinks will not be recognized on this "
diff --git a/fs/exec.c b/fs/exec.c
index ba112bd4a339..c0c636e34f60 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -46,7 +46,6 @@
46#include <linux/proc_fs.h> 46#include <linux/proc_fs.h>
47#include <linux/mount.h> 47#include <linux/mount.h>
48#include <linux/security.h> 48#include <linux/security.h>
49#include <linux/ima.h>
50#include <linux/syscalls.h> 49#include <linux/syscalls.h>
51#include <linux/tsacct_kern.h> 50#include <linux/tsacct_kern.h>
52#include <linux/cn_proc.h> 51#include <linux/cn_proc.h>
@@ -1209,9 +1208,6 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1209 retval = security_bprm_check(bprm); 1208 retval = security_bprm_check(bprm);
1210 if (retval) 1209 if (retval)
1211 return retval; 1210 return retval;
1212 retval = ima_bprm_check(bprm);
1213 if (retval)
1214 return retval;
1215 1211
1216 /* kernel module loader fixup */ 1212 /* kernel module loader fixup */
1217 /* so we don't try to load run modprobe in kernel space. */ 1213 /* so we don't try to load run modprobe in kernel space. */
diff --git a/fs/fcntl.c b/fs/fcntl.c
index fc089f2f7f56..2cf93ec40a67 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -284,7 +284,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
284 type = PIDTYPE_PID; 284 type = PIDTYPE_PID;
285 break; 285 break;
286 286
287 case F_OWNER_GID: 287 case F_OWNER_PGRP:
288 type = PIDTYPE_PGID; 288 type = PIDTYPE_PGID;
289 break; 289 break;
290 290
@@ -321,7 +321,7 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
321 break; 321 break;
322 322
323 case PIDTYPE_PGID: 323 case PIDTYPE_PGID:
324 owner.type = F_OWNER_GID; 324 owner.type = F_OWNER_PGRP;
325 break; 325 break;
326 326
327 default: 327 default:
diff --git a/fs/file_table.c b/fs/file_table.c
index 8eb44042e009..4bef4c01ec6f 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -13,7 +13,6 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/fs.h> 14#include <linux/fs.h>
15#include <linux/security.h> 15#include <linux/security.h>
16#include <linux/ima.h>
17#include <linux/eventpoll.h> 16#include <linux/eventpoll.h>
18#include <linux/rcupdate.h> 17#include <linux/rcupdate.h>
19#include <linux/mount.h> 18#include <linux/mount.h>
@@ -280,7 +279,6 @@ void __fput(struct file *file)
280 if (file->f_op && file->f_op->release) 279 if (file->f_op && file->f_op->release)
281 file->f_op->release(inode, file); 280 file->f_op->release(inode, file);
282 security_file_free(file); 281 security_file_free(file);
283 ima_file_free(file);
284 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL)) 282 if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
285 cdev_put(inode->i_cdev); 283 cdev_put(inode->i_cdev);
286 fops_put(file->f_op); 284 fops_put(file->f_op);
diff --git a/fs/fscache/Kconfig b/fs/fscache/Kconfig
index 9bbb8ce7bea0..864dac20a242 100644
--- a/fs/fscache/Kconfig
+++ b/fs/fscache/Kconfig
@@ -54,3 +54,10 @@ config FSCACHE_DEBUG
54 enabled by setting bits in /sys/modules/fscache/parameter/debug. 54 enabled by setting bits in /sys/modules/fscache/parameter/debug.
55 55
56 See Documentation/filesystems/caching/fscache.txt for more information. 56 See Documentation/filesystems/caching/fscache.txt for more information.
57
58config FSCACHE_OBJECT_LIST
59 bool "Maintain global object list for debugging purposes"
60 depends on FSCACHE && PROC_FS
61 help
62 Maintain a global list of active fscache objects that can be
63 retrieved through /proc/fs/fscache/objects for debugging purposes
diff --git a/fs/fscache/Makefile b/fs/fscache/Makefile
index 91571b95aacc..6d561531cb36 100644
--- a/fs/fscache/Makefile
+++ b/fs/fscache/Makefile
@@ -15,5 +15,6 @@ fscache-y := \
15fscache-$(CONFIG_PROC_FS) += proc.o 15fscache-$(CONFIG_PROC_FS) += proc.o
16fscache-$(CONFIG_FSCACHE_STATS) += stats.o 16fscache-$(CONFIG_FSCACHE_STATS) += stats.o
17fscache-$(CONFIG_FSCACHE_HISTOGRAM) += histogram.o 17fscache-$(CONFIG_FSCACHE_HISTOGRAM) += histogram.o
18fscache-$(CONFIG_FSCACHE_OBJECT_LIST) += object-list.o
18 19
19obj-$(CONFIG_FSCACHE) := fscache.o 20obj-$(CONFIG_FSCACHE) := fscache.o
diff --git a/fs/fscache/cache.c b/fs/fscache/cache.c
index e21985bbb1fb..6a3c48abd677 100644
--- a/fs/fscache/cache.c
+++ b/fs/fscache/cache.c
@@ -263,6 +263,7 @@ int fscache_add_cache(struct fscache_cache *cache,
263 spin_lock(&cache->object_list_lock); 263 spin_lock(&cache->object_list_lock);
264 list_add_tail(&ifsdef->cache_link, &cache->object_list); 264 list_add_tail(&ifsdef->cache_link, &cache->object_list);
265 spin_unlock(&cache->object_list_lock); 265 spin_unlock(&cache->object_list_lock);
266 fscache_objlist_add(ifsdef);
266 267
267 /* add the cache's netfs definition index object to the top level index 268 /* add the cache's netfs definition index object to the top level index
268 * cookie as a known backing object */ 269 * cookie as a known backing object */
@@ -380,11 +381,15 @@ void fscache_withdraw_cache(struct fscache_cache *cache)
380 381
381 /* make sure all pages pinned by operations on behalf of the netfs are 382 /* make sure all pages pinned by operations on behalf of the netfs are
382 * written to disk */ 383 * written to disk */
384 fscache_stat(&fscache_n_cop_sync_cache);
383 cache->ops->sync_cache(cache); 385 cache->ops->sync_cache(cache);
386 fscache_stat_d(&fscache_n_cop_sync_cache);
384 387
385 /* dissociate all the netfs pages backed by this cache from the block 388 /* dissociate all the netfs pages backed by this cache from the block
386 * mappings in the cache */ 389 * mappings in the cache */
390 fscache_stat(&fscache_n_cop_dissociate_pages);
387 cache->ops->dissociate_pages(cache); 391 cache->ops->dissociate_pages(cache);
392 fscache_stat_d(&fscache_n_cop_dissociate_pages);
388 393
389 /* we now have to destroy all the active objects pertaining to this 394 /* we now have to destroy all the active objects pertaining to this
390 * cache - which we do by passing them off to thread pool to be 395 * cache - which we do by passing them off to thread pool to be
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 72fd18f6c71f..990535071a8a 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -36,6 +36,7 @@ void fscache_cookie_init_once(void *_cookie)
36 36
37 memset(cookie, 0, sizeof(*cookie)); 37 memset(cookie, 0, sizeof(*cookie));
38 spin_lock_init(&cookie->lock); 38 spin_lock_init(&cookie->lock);
39 spin_lock_init(&cookie->stores_lock);
39 INIT_HLIST_HEAD(&cookie->backing_objects); 40 INIT_HLIST_HEAD(&cookie->backing_objects);
40} 41}
41 42
@@ -102,7 +103,9 @@ struct fscache_cookie *__fscache_acquire_cookie(
102 cookie->netfs_data = netfs_data; 103 cookie->netfs_data = netfs_data;
103 cookie->flags = 0; 104 cookie->flags = 0;
104 105
105 INIT_RADIX_TREE(&cookie->stores, GFP_NOFS); 106 /* radix tree insertion won't use the preallocation pool unless it's
107 * told it may not wait */
108 INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_WAIT);
106 109
107 switch (cookie->def->type) { 110 switch (cookie->def->type) {
108 case FSCACHE_COOKIE_TYPE_INDEX: 111 case FSCACHE_COOKIE_TYPE_INDEX:
@@ -249,7 +252,9 @@ static int fscache_alloc_object(struct fscache_cache *cache,
249 252
250 /* ask the cache to allocate an object (we may end up with duplicate 253 /* ask the cache to allocate an object (we may end up with duplicate
251 * objects at this stage, but we sort that out later) */ 254 * objects at this stage, but we sort that out later) */
255 fscache_stat(&fscache_n_cop_alloc_object);
252 object = cache->ops->alloc_object(cache, cookie); 256 object = cache->ops->alloc_object(cache, cookie);
257 fscache_stat_d(&fscache_n_cop_alloc_object);
253 if (IS_ERR(object)) { 258 if (IS_ERR(object)) {
254 fscache_stat(&fscache_n_object_no_alloc); 259 fscache_stat(&fscache_n_object_no_alloc);
255 ret = PTR_ERR(object); 260 ret = PTR_ERR(object);
@@ -270,8 +275,11 @@ static int fscache_alloc_object(struct fscache_cache *cache,
270 /* only attach if we managed to allocate all we needed, otherwise 275 /* only attach if we managed to allocate all we needed, otherwise
271 * discard the object we just allocated and instead use the one 276 * discard the object we just allocated and instead use the one
272 * attached to the cookie */ 277 * attached to the cookie */
273 if (fscache_attach_object(cookie, object) < 0) 278 if (fscache_attach_object(cookie, object) < 0) {
279 fscache_stat(&fscache_n_cop_put_object);
274 cache->ops->put_object(object); 280 cache->ops->put_object(object);
281 fscache_stat_d(&fscache_n_cop_put_object);
282 }
275 283
276 _leave(" = 0"); 284 _leave(" = 0");
277 return 0; 285 return 0;
@@ -287,7 +295,9 @@ object_already_extant:
287 return 0; 295 return 0;
288 296
289error_put: 297error_put:
298 fscache_stat(&fscache_n_cop_put_object);
290 cache->ops->put_object(object); 299 cache->ops->put_object(object);
300 fscache_stat_d(&fscache_n_cop_put_object);
291error: 301error:
292 _leave(" = %d", ret); 302 _leave(" = %d", ret);
293 return ret; 303 return ret;
@@ -349,6 +359,8 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
349 object->cookie = cookie; 359 object->cookie = cookie;
350 atomic_inc(&cookie->usage); 360 atomic_inc(&cookie->usage);
351 hlist_add_head(&object->cookie_link, &cookie->backing_objects); 361 hlist_add_head(&object->cookie_link, &cookie->backing_objects);
362
363 fscache_objlist_add(object);
352 ret = 0; 364 ret = 0;
353 365
354cant_attach_object: 366cant_attach_object:
@@ -403,6 +415,8 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
403 unsigned long event; 415 unsigned long event;
404 416
405 fscache_stat(&fscache_n_relinquishes); 417 fscache_stat(&fscache_n_relinquishes);
418 if (retire)
419 fscache_stat(&fscache_n_relinquishes_retire);
406 420
407 if (!cookie) { 421 if (!cookie) {
408 fscache_stat(&fscache_n_relinquishes_null); 422 fscache_stat(&fscache_n_relinquishes_null);
@@ -428,12 +442,8 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
428 442
429 event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE; 443 event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE;
430 444
431 /* detach pointers back to the netfs */
432 spin_lock(&cookie->lock); 445 spin_lock(&cookie->lock);
433 446
434 cookie->netfs_data = NULL;
435 cookie->def = NULL;
436
437 /* break links with all the active objects */ 447 /* break links with all the active objects */
438 while (!hlist_empty(&cookie->backing_objects)) { 448 while (!hlist_empty(&cookie->backing_objects)) {
439 object = hlist_entry(cookie->backing_objects.first, 449 object = hlist_entry(cookie->backing_objects.first,
@@ -456,6 +466,10 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
456 BUG(); 466 BUG();
457 } 467 }
458 468
469 /* detach pointers back to the netfs */
470 cookie->netfs_data = NULL;
471 cookie->def = NULL;
472
459 spin_unlock(&cookie->lock); 473 spin_unlock(&cookie->lock);
460 474
461 if (cookie->parent) { 475 if (cookie->parent) {
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
index 1c341304621f..edd7434ab6e5 100644
--- a/fs/fscache/internal.h
+++ b/fs/fscache/internal.h
@@ -17,6 +17,7 @@
17 * - cache->object_list_lock 17 * - cache->object_list_lock
18 * - object->lock 18 * - object->lock
19 * - object->parent->lock 19 * - object->parent->lock
20 * - cookie->stores_lock
20 * - fscache_thread_lock 21 * - fscache_thread_lock
21 * 22 *
22 */ 23 */
@@ -88,17 +89,31 @@ extern int fscache_wait_bit_interruptible(void *);
88/* 89/*
89 * object.c 90 * object.c
90 */ 91 */
92extern const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5];
93
91extern void fscache_withdrawing_object(struct fscache_cache *, 94extern void fscache_withdrawing_object(struct fscache_cache *,
92 struct fscache_object *); 95 struct fscache_object *);
93extern void fscache_enqueue_object(struct fscache_object *); 96extern void fscache_enqueue_object(struct fscache_object *);
94 97
95/* 98/*
99 * object-list.c
100 */
101#ifdef CONFIG_FSCACHE_OBJECT_LIST
102extern const struct file_operations fscache_objlist_fops;
103
104extern void fscache_objlist_add(struct fscache_object *);
105#else
106#define fscache_objlist_add(object) do {} while(0)
107#endif
108
109/*
96 * operation.c 110 * operation.c
97 */ 111 */
98extern int fscache_submit_exclusive_op(struct fscache_object *, 112extern int fscache_submit_exclusive_op(struct fscache_object *,
99 struct fscache_operation *); 113 struct fscache_operation *);
100extern int fscache_submit_op(struct fscache_object *, 114extern int fscache_submit_op(struct fscache_object *,
101 struct fscache_operation *); 115 struct fscache_operation *);
116extern int fscache_cancel_op(struct fscache_operation *);
102extern void fscache_abort_object(struct fscache_object *); 117extern void fscache_abort_object(struct fscache_object *);
103extern void fscache_start_operations(struct fscache_object *); 118extern void fscache_start_operations(struct fscache_object *);
104extern void fscache_operation_gc(struct work_struct *); 119extern void fscache_operation_gc(struct work_struct *);
@@ -127,6 +142,8 @@ extern atomic_t fscache_n_op_enqueue;
127extern atomic_t fscache_n_op_deferred_release; 142extern atomic_t fscache_n_op_deferred_release;
128extern atomic_t fscache_n_op_release; 143extern atomic_t fscache_n_op_release;
129extern atomic_t fscache_n_op_gc; 144extern atomic_t fscache_n_op_gc;
145extern atomic_t fscache_n_op_cancelled;
146extern atomic_t fscache_n_op_rejected;
130 147
131extern atomic_t fscache_n_attr_changed; 148extern atomic_t fscache_n_attr_changed;
132extern atomic_t fscache_n_attr_changed_ok; 149extern atomic_t fscache_n_attr_changed_ok;
@@ -138,6 +155,8 @@ extern atomic_t fscache_n_allocs;
138extern atomic_t fscache_n_allocs_ok; 155extern atomic_t fscache_n_allocs_ok;
139extern atomic_t fscache_n_allocs_wait; 156extern atomic_t fscache_n_allocs_wait;
140extern atomic_t fscache_n_allocs_nobufs; 157extern atomic_t fscache_n_allocs_nobufs;
158extern atomic_t fscache_n_allocs_intr;
159extern atomic_t fscache_n_allocs_object_dead;
141extern atomic_t fscache_n_alloc_ops; 160extern atomic_t fscache_n_alloc_ops;
142extern atomic_t fscache_n_alloc_op_waits; 161extern atomic_t fscache_n_alloc_op_waits;
143 162
@@ -148,6 +167,7 @@ extern atomic_t fscache_n_retrievals_nodata;
148extern atomic_t fscache_n_retrievals_nobufs; 167extern atomic_t fscache_n_retrievals_nobufs;
149extern atomic_t fscache_n_retrievals_intr; 168extern atomic_t fscache_n_retrievals_intr;
150extern atomic_t fscache_n_retrievals_nomem; 169extern atomic_t fscache_n_retrievals_nomem;
170extern atomic_t fscache_n_retrievals_object_dead;
151extern atomic_t fscache_n_retrieval_ops; 171extern atomic_t fscache_n_retrieval_ops;
152extern atomic_t fscache_n_retrieval_op_waits; 172extern atomic_t fscache_n_retrieval_op_waits;
153 173
@@ -158,6 +178,14 @@ extern atomic_t fscache_n_stores_nobufs;
158extern atomic_t fscache_n_stores_oom; 178extern atomic_t fscache_n_stores_oom;
159extern atomic_t fscache_n_store_ops; 179extern atomic_t fscache_n_store_ops;
160extern atomic_t fscache_n_store_calls; 180extern atomic_t fscache_n_store_calls;
181extern atomic_t fscache_n_store_pages;
182extern atomic_t fscache_n_store_radix_deletes;
183extern atomic_t fscache_n_store_pages_over_limit;
184
185extern atomic_t fscache_n_store_vmscan_not_storing;
186extern atomic_t fscache_n_store_vmscan_gone;
187extern atomic_t fscache_n_store_vmscan_busy;
188extern atomic_t fscache_n_store_vmscan_cancelled;
161 189
162extern atomic_t fscache_n_marks; 190extern atomic_t fscache_n_marks;
163extern atomic_t fscache_n_uncaches; 191extern atomic_t fscache_n_uncaches;
@@ -176,6 +204,7 @@ extern atomic_t fscache_n_updates_run;
176extern atomic_t fscache_n_relinquishes; 204extern atomic_t fscache_n_relinquishes;
177extern atomic_t fscache_n_relinquishes_null; 205extern atomic_t fscache_n_relinquishes_null;
178extern atomic_t fscache_n_relinquishes_waitcrt; 206extern atomic_t fscache_n_relinquishes_waitcrt;
207extern atomic_t fscache_n_relinquishes_retire;
179 208
180extern atomic_t fscache_n_cookie_index; 209extern atomic_t fscache_n_cookie_index;
181extern atomic_t fscache_n_cookie_data; 210extern atomic_t fscache_n_cookie_data;
@@ -186,6 +215,7 @@ extern atomic_t fscache_n_object_no_alloc;
186extern atomic_t fscache_n_object_lookups; 215extern atomic_t fscache_n_object_lookups;
187extern atomic_t fscache_n_object_lookups_negative; 216extern atomic_t fscache_n_object_lookups_negative;
188extern atomic_t fscache_n_object_lookups_positive; 217extern atomic_t fscache_n_object_lookups_positive;
218extern atomic_t fscache_n_object_lookups_timed_out;
189extern atomic_t fscache_n_object_created; 219extern atomic_t fscache_n_object_created;
190extern atomic_t fscache_n_object_avail; 220extern atomic_t fscache_n_object_avail;
191extern atomic_t fscache_n_object_dead; 221extern atomic_t fscache_n_object_dead;
@@ -195,15 +225,41 @@ extern atomic_t fscache_n_checkaux_okay;
195extern atomic_t fscache_n_checkaux_update; 225extern atomic_t fscache_n_checkaux_update;
196extern atomic_t fscache_n_checkaux_obsolete; 226extern atomic_t fscache_n_checkaux_obsolete;
197 227
228extern atomic_t fscache_n_cop_alloc_object;
229extern atomic_t fscache_n_cop_lookup_object;
230extern atomic_t fscache_n_cop_lookup_complete;
231extern atomic_t fscache_n_cop_grab_object;
232extern atomic_t fscache_n_cop_update_object;
233extern atomic_t fscache_n_cop_drop_object;
234extern atomic_t fscache_n_cop_put_object;
235extern atomic_t fscache_n_cop_sync_cache;
236extern atomic_t fscache_n_cop_attr_changed;
237extern atomic_t fscache_n_cop_read_or_alloc_page;
238extern atomic_t fscache_n_cop_read_or_alloc_pages;
239extern atomic_t fscache_n_cop_allocate_page;
240extern atomic_t fscache_n_cop_allocate_pages;
241extern atomic_t fscache_n_cop_write_page;
242extern atomic_t fscache_n_cop_uncache_page;
243extern atomic_t fscache_n_cop_dissociate_pages;
244
198static inline void fscache_stat(atomic_t *stat) 245static inline void fscache_stat(atomic_t *stat)
199{ 246{
200 atomic_inc(stat); 247 atomic_inc(stat);
201} 248}
202 249
250static inline void fscache_stat_d(atomic_t *stat)
251{
252 atomic_dec(stat);
253}
254
255#define __fscache_stat(stat) (stat)
256
203extern const struct file_operations fscache_stats_fops; 257extern const struct file_operations fscache_stats_fops;
204#else 258#else
205 259
260#define __fscache_stat(stat) (NULL)
206#define fscache_stat(stat) do {} while (0) 261#define fscache_stat(stat) do {} while (0)
262#define fscache_stat_d(stat) do {} while (0)
207#endif 263#endif
208 264
209/* 265/*
diff --git a/fs/fscache/main.c b/fs/fscache/main.c
index 4de41b597499..add6bdb53f04 100644
--- a/fs/fscache/main.c
+++ b/fs/fscache/main.c
@@ -48,7 +48,7 @@ static int __init fscache_init(void)
48{ 48{
49 int ret; 49 int ret;
50 50
51 ret = slow_work_register_user(); 51 ret = slow_work_register_user(THIS_MODULE);
52 if (ret < 0) 52 if (ret < 0)
53 goto error_slow_work; 53 goto error_slow_work;
54 54
@@ -80,7 +80,7 @@ error_kobj:
80error_cookie_jar: 80error_cookie_jar:
81 fscache_proc_cleanup(); 81 fscache_proc_cleanup();
82error_proc: 82error_proc:
83 slow_work_unregister_user(); 83 slow_work_unregister_user(THIS_MODULE);
84error_slow_work: 84error_slow_work:
85 return ret; 85 return ret;
86} 86}
@@ -97,7 +97,7 @@ static void __exit fscache_exit(void)
97 kobject_put(fscache_root); 97 kobject_put(fscache_root);
98 kmem_cache_destroy(fscache_cookie_jar); 98 kmem_cache_destroy(fscache_cookie_jar);
99 fscache_proc_cleanup(); 99 fscache_proc_cleanup();
100 slow_work_unregister_user(); 100 slow_work_unregister_user(THIS_MODULE);
101 printk(KERN_NOTICE "FS-Cache: Unloaded\n"); 101 printk(KERN_NOTICE "FS-Cache: Unloaded\n");
102} 102}
103 103
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
new file mode 100644
index 000000000000..e590242fa41a
--- /dev/null
+++ b/fs/fscache/object-list.c
@@ -0,0 +1,432 @@
1/* Global fscache object list maintainer and viewer
2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#define FSCACHE_DEBUG_LEVEL COOKIE
13#include <linux/module.h>
14#include <linux/seq_file.h>
15#include <linux/key.h>
16#include <keys/user-type.h>
17#include "internal.h"
18
19static struct rb_root fscache_object_list;
20static DEFINE_RWLOCK(fscache_object_list_lock);
21
22struct fscache_objlist_data {
23 unsigned long config; /* display configuration */
24#define FSCACHE_OBJLIST_CONFIG_KEY 0x00000001 /* show object keys */
25#define FSCACHE_OBJLIST_CONFIG_AUX 0x00000002 /* show object auxdata */
26#define FSCACHE_OBJLIST_CONFIG_COOKIE 0x00000004 /* show objects with cookies */
27#define FSCACHE_OBJLIST_CONFIG_NOCOOKIE 0x00000008 /* show objects without cookies */
28#define FSCACHE_OBJLIST_CONFIG_BUSY 0x00000010 /* show busy objects */
29#define FSCACHE_OBJLIST_CONFIG_IDLE 0x00000020 /* show idle objects */
30#define FSCACHE_OBJLIST_CONFIG_PENDWR 0x00000040 /* show objects with pending writes */
31#define FSCACHE_OBJLIST_CONFIG_NOPENDWR 0x00000080 /* show objects without pending writes */
32#define FSCACHE_OBJLIST_CONFIG_READS 0x00000100 /* show objects with active reads */
33#define FSCACHE_OBJLIST_CONFIG_NOREADS 0x00000200 /* show objects without active reads */
34#define FSCACHE_OBJLIST_CONFIG_EVENTS 0x00000400 /* show objects with events */
35#define FSCACHE_OBJLIST_CONFIG_NOEVENTS 0x00000800 /* show objects without no events */
36#define FSCACHE_OBJLIST_CONFIG_WORK 0x00001000 /* show objects with slow work */
37#define FSCACHE_OBJLIST_CONFIG_NOWORK 0x00002000 /* show objects without slow work */
38
39 u8 buf[512]; /* key and aux data buffer */
40};
41
42/*
43 * Add an object to the object list
44 * - we use the address of the fscache_object structure as the key into the
45 * tree
46 */
47void fscache_objlist_add(struct fscache_object *obj)
48{
49 struct fscache_object *xobj;
50 struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL;
51
52 write_lock(&fscache_object_list_lock);
53
54 while (*p) {
55 parent = *p;
56 xobj = rb_entry(parent, struct fscache_object, objlist_link);
57
58 if (obj < xobj)
59 p = &(*p)->rb_left;
60 else if (obj > xobj)
61 p = &(*p)->rb_right;
62 else
63 BUG();
64 }
65
66 rb_link_node(&obj->objlist_link, parent, p);
67 rb_insert_color(&obj->objlist_link, &fscache_object_list);
68
69 write_unlock(&fscache_object_list_lock);
70}
71
72/**
73 * fscache_object_destroy - Note that a cache object is about to be destroyed
74 * @object: The object to be destroyed
75 *
76 * Note the imminent destruction and deallocation of a cache object record.
77 */
78void fscache_object_destroy(struct fscache_object *obj)
79{
80 write_lock(&fscache_object_list_lock);
81
82 BUG_ON(RB_EMPTY_ROOT(&fscache_object_list));
83 rb_erase(&obj->objlist_link, &fscache_object_list);
84
85 write_unlock(&fscache_object_list_lock);
86}
87EXPORT_SYMBOL(fscache_object_destroy);
88
89/*
90 * find the object in the tree on or after the specified index
91 */
92static struct fscache_object *fscache_objlist_lookup(loff_t *_pos)
93{
94 struct fscache_object *pobj, *obj, *minobj = NULL;
95 struct rb_node *p;
96 unsigned long pos;
97
98 if (*_pos >= (unsigned long) ERR_PTR(-ENOENT))
99 return NULL;
100 pos = *_pos;
101
102 /* banners (can't represent line 0 by pos 0 as that would involve
103 * returning a NULL pointer) */
104 if (pos == 0)
105 return (struct fscache_object *) ++(*_pos);
106 if (pos < 3)
107 return (struct fscache_object *)pos;
108
109 pobj = (struct fscache_object *)pos;
110 p = fscache_object_list.rb_node;
111 while (p) {
112 obj = rb_entry(p, struct fscache_object, objlist_link);
113 if (pobj < obj) {
114 if (!minobj || minobj > obj)
115 minobj = obj;
116 p = p->rb_left;
117 } else if (pobj > obj) {
118 p = p->rb_right;
119 } else {
120 minobj = obj;
121 break;
122 }
123 obj = NULL;
124 }
125
126 if (!minobj)
127 *_pos = (unsigned long) ERR_PTR(-ENOENT);
128 else if (minobj != obj)
129 *_pos = (unsigned long) minobj;
130 return minobj;
131}
132
133/*
134 * set up the iterator to start reading from the first line
135 */
136static void *fscache_objlist_start(struct seq_file *m, loff_t *_pos)
137 __acquires(&fscache_object_list_lock)
138{
139 read_lock(&fscache_object_list_lock);
140 return fscache_objlist_lookup(_pos);
141}
142
143/*
144 * move to the next line
145 */
146static void *fscache_objlist_next(struct seq_file *m, void *v, loff_t *_pos)
147{
148 (*_pos)++;
149 return fscache_objlist_lookup(_pos);
150}
151
152/*
153 * clean up after reading
154 */
155static void fscache_objlist_stop(struct seq_file *m, void *v)
156 __releases(&fscache_object_list_lock)
157{
158 read_unlock(&fscache_object_list_lock);
159}
160
161/*
162 * display an object
163 */
164static int fscache_objlist_show(struct seq_file *m, void *v)
165{
166 struct fscache_objlist_data *data = m->private;
167 struct fscache_object *obj = v;
168 unsigned long config = data->config;
169 uint16_t keylen, auxlen;
170 char _type[3], *type;
171 bool no_cookie;
172 u8 *buf = data->buf, *p;
173
174 if ((unsigned long) v == 1) {
175 seq_puts(m, "OBJECT PARENT STAT CHLDN OPS OOP IPR EX READS"
176 " EM EV F S"
177 " | NETFS_COOKIE_DEF TY FL NETFS_DATA");
178 if (config & (FSCACHE_OBJLIST_CONFIG_KEY |
179 FSCACHE_OBJLIST_CONFIG_AUX))
180 seq_puts(m, " ");
181 if (config & FSCACHE_OBJLIST_CONFIG_KEY)
182 seq_puts(m, "OBJECT_KEY");
183 if ((config & (FSCACHE_OBJLIST_CONFIG_KEY |
184 FSCACHE_OBJLIST_CONFIG_AUX)) ==
185 (FSCACHE_OBJLIST_CONFIG_KEY | FSCACHE_OBJLIST_CONFIG_AUX))
186 seq_puts(m, ", ");
187 if (config & FSCACHE_OBJLIST_CONFIG_AUX)
188 seq_puts(m, "AUX_DATA");
189 seq_puts(m, "\n");
190 return 0;
191 }
192
193 if ((unsigned long) v == 2) {
194 seq_puts(m, "======== ======== ==== ===== === === === == ====="
195 " == == = ="
196 " | ================ == == ================");
197 if (config & (FSCACHE_OBJLIST_CONFIG_KEY |
198 FSCACHE_OBJLIST_CONFIG_AUX))
199 seq_puts(m, " ================");
200 seq_puts(m, "\n");
201 return 0;
202 }
203
204 /* filter out any unwanted objects */
205#define FILTER(criterion, _yes, _no) \
206 do { \
207 unsigned long yes = FSCACHE_OBJLIST_CONFIG_##_yes; \
208 unsigned long no = FSCACHE_OBJLIST_CONFIG_##_no; \
209 if (criterion) { \
210 if (!(config & yes)) \
211 return 0; \
212 } else { \
213 if (!(config & no)) \
214 return 0; \
215 } \
216 } while(0)
217
218 if (~config) {
219 FILTER(obj->cookie,
220 COOKIE, NOCOOKIE);
221 FILTER(obj->state != FSCACHE_OBJECT_ACTIVE ||
222 obj->n_ops != 0 ||
223 obj->n_obj_ops != 0 ||
224 obj->flags ||
225 !list_empty(&obj->dependents),
226 BUSY, IDLE);
227 FILTER(test_bit(FSCACHE_OBJECT_PENDING_WRITE, &obj->flags),
228 PENDWR, NOPENDWR);
229 FILTER(atomic_read(&obj->n_reads),
230 READS, NOREADS);
231 FILTER(obj->events & obj->event_mask,
232 EVENTS, NOEVENTS);
233 FILTER(obj->work.flags & ~(1UL << SLOW_WORK_VERY_SLOW),
234 WORK, NOWORK);
235 }
236
237 seq_printf(m,
238 "%8x %8x %s %5u %3u %3u %3u %2u %5u %2lx %2lx %1lx %1lx | ",
239 obj->debug_id,
240 obj->parent ? obj->parent->debug_id : -1,
241 fscache_object_states_short[obj->state],
242 obj->n_children,
243 obj->n_ops,
244 obj->n_obj_ops,
245 obj->n_in_progress,
246 obj->n_exclusive,
247 atomic_read(&obj->n_reads),
248 obj->event_mask & FSCACHE_OBJECT_EVENTS_MASK,
249 obj->events,
250 obj->flags,
251 obj->work.flags);
252
253 no_cookie = true;
254 keylen = auxlen = 0;
255 if (obj->cookie) {
256 spin_lock(&obj->lock);
257 if (obj->cookie) {
258 switch (obj->cookie->def->type) {
259 case 0:
260 type = "IX";
261 break;
262 case 1:
263 type = "DT";
264 break;
265 default:
266 sprintf(_type, "%02u",
267 obj->cookie->def->type);
268 type = _type;
269 break;
270 }
271
272 seq_printf(m, "%-16s %s %2lx %16p",
273 obj->cookie->def->name,
274 type,
275 obj->cookie->flags,
276 obj->cookie->netfs_data);
277
278 if (obj->cookie->def->get_key &&
279 config & FSCACHE_OBJLIST_CONFIG_KEY)
280 keylen = obj->cookie->def->get_key(
281 obj->cookie->netfs_data,
282 buf, 400);
283
284 if (obj->cookie->def->get_aux &&
285 config & FSCACHE_OBJLIST_CONFIG_AUX)
286 auxlen = obj->cookie->def->get_aux(
287 obj->cookie->netfs_data,
288 buf + keylen, 512 - keylen);
289
290 no_cookie = false;
291 }
292 spin_unlock(&obj->lock);
293
294 if (!no_cookie && (keylen > 0 || auxlen > 0)) {
295 seq_printf(m, " ");
296 for (p = buf; keylen > 0; keylen--)
297 seq_printf(m, "%02x", *p++);
298 if (auxlen > 0) {
299 if (config & FSCACHE_OBJLIST_CONFIG_KEY)
300 seq_printf(m, ", ");
301 for (; auxlen > 0; auxlen--)
302 seq_printf(m, "%02x", *p++);
303 }
304 }
305 }
306
307 if (no_cookie)
308 seq_printf(m, "<no_cookie>\n");
309 else
310 seq_printf(m, "\n");
311 return 0;
312}
313
314static const struct seq_operations fscache_objlist_ops = {
315 .start = fscache_objlist_start,
316 .stop = fscache_objlist_stop,
317 .next = fscache_objlist_next,
318 .show = fscache_objlist_show,
319};
320
321/*
322 * get the configuration for filtering the list
323 */
324static void fscache_objlist_config(struct fscache_objlist_data *data)
325{
326#ifdef CONFIG_KEYS
327 struct user_key_payload *confkey;
328 unsigned long config;
329 struct key *key;
330 const char *buf;
331 int len;
332
333 key = request_key(&key_type_user, "fscache:objlist", NULL);
334 if (IS_ERR(key))
335 goto no_config;
336
337 config = 0;
338 rcu_read_lock();
339
340 confkey = key->payload.data;
341 buf = confkey->data;
342
343 for (len = confkey->datalen - 1; len >= 0; len--) {
344 switch (buf[len]) {
345 case 'K': config |= FSCACHE_OBJLIST_CONFIG_KEY; break;
346 case 'A': config |= FSCACHE_OBJLIST_CONFIG_AUX; break;
347 case 'C': config |= FSCACHE_OBJLIST_CONFIG_COOKIE; break;
348 case 'c': config |= FSCACHE_OBJLIST_CONFIG_NOCOOKIE; break;
349 case 'B': config |= FSCACHE_OBJLIST_CONFIG_BUSY; break;
350 case 'b': config |= FSCACHE_OBJLIST_CONFIG_IDLE; break;
351 case 'W': config |= FSCACHE_OBJLIST_CONFIG_PENDWR; break;
352 case 'w': config |= FSCACHE_OBJLIST_CONFIG_NOPENDWR; break;
353 case 'R': config |= FSCACHE_OBJLIST_CONFIG_READS; break;
354 case 'r': config |= FSCACHE_OBJLIST_CONFIG_NOREADS; break;
355 case 'S': config |= FSCACHE_OBJLIST_CONFIG_WORK; break;
356 case 's': config |= FSCACHE_OBJLIST_CONFIG_NOWORK; break;
357 }
358 }
359
360 rcu_read_unlock();
361 key_put(key);
362
363 if (!(config & (FSCACHE_OBJLIST_CONFIG_COOKIE | FSCACHE_OBJLIST_CONFIG_NOCOOKIE)))
364 config |= FSCACHE_OBJLIST_CONFIG_COOKIE | FSCACHE_OBJLIST_CONFIG_NOCOOKIE;
365 if (!(config & (FSCACHE_OBJLIST_CONFIG_BUSY | FSCACHE_OBJLIST_CONFIG_IDLE)))
366 config |= FSCACHE_OBJLIST_CONFIG_BUSY | FSCACHE_OBJLIST_CONFIG_IDLE;
367 if (!(config & (FSCACHE_OBJLIST_CONFIG_PENDWR | FSCACHE_OBJLIST_CONFIG_NOPENDWR)))
368 config |= FSCACHE_OBJLIST_CONFIG_PENDWR | FSCACHE_OBJLIST_CONFIG_NOPENDWR;
369 if (!(config & (FSCACHE_OBJLIST_CONFIG_READS | FSCACHE_OBJLIST_CONFIG_NOREADS)))
370 config |= FSCACHE_OBJLIST_CONFIG_READS | FSCACHE_OBJLIST_CONFIG_NOREADS;
371 if (!(config & (FSCACHE_OBJLIST_CONFIG_EVENTS | FSCACHE_OBJLIST_CONFIG_NOEVENTS)))
372 config |= FSCACHE_OBJLIST_CONFIG_EVENTS | FSCACHE_OBJLIST_CONFIG_NOEVENTS;
373 if (!(config & (FSCACHE_OBJLIST_CONFIG_WORK | FSCACHE_OBJLIST_CONFIG_NOWORK)))
374 config |= FSCACHE_OBJLIST_CONFIG_WORK | FSCACHE_OBJLIST_CONFIG_NOWORK;
375
376 data->config = config;
377 return;
378
379no_config:
380#endif
381 data->config = ULONG_MAX;
382}
383
384/*
385 * open "/proc/fs/fscache/objects" to provide a list of active objects
386 * - can be configured by a user-defined key added to the caller's keyrings
387 */
388static int fscache_objlist_open(struct inode *inode, struct file *file)
389{
390 struct fscache_objlist_data *data;
391 struct seq_file *m;
392 int ret;
393
394 ret = seq_open(file, &fscache_objlist_ops);
395 if (ret < 0)
396 return ret;
397
398 m = file->private_data;
399
400 /* buffer for key extraction */
401 data = kmalloc(sizeof(struct fscache_objlist_data), GFP_KERNEL);
402 if (!data) {
403 seq_release(inode, file);
404 return -ENOMEM;
405 }
406
407 /* get the configuration key */
408 fscache_objlist_config(data);
409
410 m->private = data;
411 return 0;
412}
413
414/*
415 * clean up on close
416 */
417static int fscache_objlist_release(struct inode *inode, struct file *file)
418{
419 struct seq_file *m = file->private_data;
420
421 kfree(m->private);
422 m->private = NULL;
423 return seq_release(inode, file);
424}
425
426const struct file_operations fscache_objlist_fops = {
427 .owner = THIS_MODULE,
428 .open = fscache_objlist_open,
429 .read = seq_read,
430 .llseek = seq_lseek,
431 .release = fscache_objlist_release,
432};
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 392a41b1b79d..e513ac599c8e 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -14,9 +14,10 @@
14 14
15#define FSCACHE_DEBUG_LEVEL COOKIE 15#define FSCACHE_DEBUG_LEVEL COOKIE
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/seq_file.h>
17#include "internal.h" 18#include "internal.h"
18 19
19const char *fscache_object_states[] = { 20const char *fscache_object_states[FSCACHE_OBJECT__NSTATES] = {
20 [FSCACHE_OBJECT_INIT] = "OBJECT_INIT", 21 [FSCACHE_OBJECT_INIT] = "OBJECT_INIT",
21 [FSCACHE_OBJECT_LOOKING_UP] = "OBJECT_LOOKING_UP", 22 [FSCACHE_OBJECT_LOOKING_UP] = "OBJECT_LOOKING_UP",
22 [FSCACHE_OBJECT_CREATING] = "OBJECT_CREATING", 23 [FSCACHE_OBJECT_CREATING] = "OBJECT_CREATING",
@@ -33,9 +34,28 @@ const char *fscache_object_states[] = {
33}; 34};
34EXPORT_SYMBOL(fscache_object_states); 35EXPORT_SYMBOL(fscache_object_states);
35 36
37const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5] = {
38 [FSCACHE_OBJECT_INIT] = "INIT",
39 [FSCACHE_OBJECT_LOOKING_UP] = "LOOK",
40 [FSCACHE_OBJECT_CREATING] = "CRTN",
41 [FSCACHE_OBJECT_AVAILABLE] = "AVBL",
42 [FSCACHE_OBJECT_ACTIVE] = "ACTV",
43 [FSCACHE_OBJECT_UPDATING] = "UPDT",
44 [FSCACHE_OBJECT_DYING] = "DYNG",
45 [FSCACHE_OBJECT_LC_DYING] = "LCDY",
46 [FSCACHE_OBJECT_ABORT_INIT] = "ABTI",
47 [FSCACHE_OBJECT_RELEASING] = "RELS",
48 [FSCACHE_OBJECT_RECYCLING] = "RCYC",
49 [FSCACHE_OBJECT_WITHDRAWING] = "WTHD",
50 [FSCACHE_OBJECT_DEAD] = "DEAD",
51};
52
36static void fscache_object_slow_work_put_ref(struct slow_work *); 53static void fscache_object_slow_work_put_ref(struct slow_work *);
37static int fscache_object_slow_work_get_ref(struct slow_work *); 54static int fscache_object_slow_work_get_ref(struct slow_work *);
38static void fscache_object_slow_work_execute(struct slow_work *); 55static void fscache_object_slow_work_execute(struct slow_work *);
56#ifdef CONFIG_SLOW_WORK_PROC
57static void fscache_object_slow_work_desc(struct slow_work *, struct seq_file *);
58#endif
39static void fscache_initialise_object(struct fscache_object *); 59static void fscache_initialise_object(struct fscache_object *);
40static void fscache_lookup_object(struct fscache_object *); 60static void fscache_lookup_object(struct fscache_object *);
41static void fscache_object_available(struct fscache_object *); 61static void fscache_object_available(struct fscache_object *);
@@ -45,9 +65,13 @@ static void fscache_enqueue_dependents(struct fscache_object *);
45static void fscache_dequeue_object(struct fscache_object *); 65static void fscache_dequeue_object(struct fscache_object *);
46 66
47const struct slow_work_ops fscache_object_slow_work_ops = { 67const struct slow_work_ops fscache_object_slow_work_ops = {
68 .owner = THIS_MODULE,
48 .get_ref = fscache_object_slow_work_get_ref, 69 .get_ref = fscache_object_slow_work_get_ref,
49 .put_ref = fscache_object_slow_work_put_ref, 70 .put_ref = fscache_object_slow_work_put_ref,
50 .execute = fscache_object_slow_work_execute, 71 .execute = fscache_object_slow_work_execute,
72#ifdef CONFIG_SLOW_WORK_PROC
73 .desc = fscache_object_slow_work_desc,
74#endif
51}; 75};
52EXPORT_SYMBOL(fscache_object_slow_work_ops); 76EXPORT_SYMBOL(fscache_object_slow_work_ops);
53 77
@@ -81,6 +105,7 @@ static inline void fscache_done_parent_op(struct fscache_object *object)
81static void fscache_object_state_machine(struct fscache_object *object) 105static void fscache_object_state_machine(struct fscache_object *object)
82{ 106{
83 enum fscache_object_state new_state; 107 enum fscache_object_state new_state;
108 struct fscache_cookie *cookie;
84 109
85 ASSERT(object != NULL); 110 ASSERT(object != NULL);
86 111
@@ -120,20 +145,31 @@ static void fscache_object_state_machine(struct fscache_object *object)
120 case FSCACHE_OBJECT_UPDATING: 145 case FSCACHE_OBJECT_UPDATING:
121 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events); 146 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
122 fscache_stat(&fscache_n_updates_run); 147 fscache_stat(&fscache_n_updates_run);
148 fscache_stat(&fscache_n_cop_update_object);
123 object->cache->ops->update_object(object); 149 object->cache->ops->update_object(object);
150 fscache_stat_d(&fscache_n_cop_update_object);
124 goto active_transit; 151 goto active_transit;
125 152
126 /* handle an object dying during lookup or creation */ 153 /* handle an object dying during lookup or creation */
127 case FSCACHE_OBJECT_LC_DYING: 154 case FSCACHE_OBJECT_LC_DYING:
128 object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE); 155 object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE);
156 fscache_stat(&fscache_n_cop_lookup_complete);
129 object->cache->ops->lookup_complete(object); 157 object->cache->ops->lookup_complete(object);
158 fscache_stat_d(&fscache_n_cop_lookup_complete);
130 159
131 spin_lock(&object->lock); 160 spin_lock(&object->lock);
132 object->state = FSCACHE_OBJECT_DYING; 161 object->state = FSCACHE_OBJECT_DYING;
133 if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, 162 cookie = object->cookie;
134 &object->cookie->flags)) 163 if (cookie) {
135 wake_up_bit(&object->cookie->flags, 164 if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP,
136 FSCACHE_COOKIE_CREATING); 165 &cookie->flags))
166 wake_up_bit(&cookie->flags,
167 FSCACHE_COOKIE_LOOKING_UP);
168 if (test_and_clear_bit(FSCACHE_COOKIE_CREATING,
169 &cookie->flags))
170 wake_up_bit(&cookie->flags,
171 FSCACHE_COOKIE_CREATING);
172 }
137 spin_unlock(&object->lock); 173 spin_unlock(&object->lock);
138 174
139 fscache_done_parent_op(object); 175 fscache_done_parent_op(object);
@@ -165,6 +201,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
165 } 201 }
166 spin_unlock(&object->lock); 202 spin_unlock(&object->lock);
167 fscache_enqueue_dependents(object); 203 fscache_enqueue_dependents(object);
204 fscache_start_operations(object);
168 goto terminal_transit; 205 goto terminal_transit;
169 206
170 /* handle an abort during initialisation */ 207 /* handle an abort during initialisation */
@@ -316,14 +353,29 @@ static void fscache_object_slow_work_execute(struct slow_work *work)
316 353
317 _enter("{OBJ%x}", object->debug_id); 354 _enter("{OBJ%x}", object->debug_id);
318 355
319 clear_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
320
321 start = jiffies; 356 start = jiffies;
322 fscache_object_state_machine(object); 357 fscache_object_state_machine(object);
323 fscache_hist(fscache_objs_histogram, start); 358 fscache_hist(fscache_objs_histogram, start);
324 if (object->events & object->event_mask) 359 if (object->events & object->event_mask)
325 fscache_enqueue_object(object); 360 fscache_enqueue_object(object);
361 clear_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
362}
363
364/*
365 * describe an object for slow-work debugging
366 */
367#ifdef CONFIG_SLOW_WORK_PROC
368static void fscache_object_slow_work_desc(struct slow_work *work,
369 struct seq_file *m)
370{
371 struct fscache_object *object =
372 container_of(work, struct fscache_object, work);
373
374 seq_printf(m, "FSC: OBJ%x: %s",
375 object->debug_id,
376 fscache_object_states_short[object->state]);
326} 377}
378#endif
327 379
328/* 380/*
329 * initialise an object 381 * initialise an object
@@ -376,7 +428,9 @@ static void fscache_initialise_object(struct fscache_object *object)
376 * binding on to us, so we need to make sure we don't 428 * binding on to us, so we need to make sure we don't
377 * add ourself to the list multiple times */ 429 * add ourself to the list multiple times */
378 if (list_empty(&object->dep_link)) { 430 if (list_empty(&object->dep_link)) {
431 fscache_stat(&fscache_n_cop_grab_object);
379 object->cache->ops->grab_object(object); 432 object->cache->ops->grab_object(object);
433 fscache_stat_d(&fscache_n_cop_grab_object);
380 list_add(&object->dep_link, 434 list_add(&object->dep_link,
381 &parent->dependents); 435 &parent->dependents);
382 436
@@ -414,6 +468,7 @@ static void fscache_lookup_object(struct fscache_object *object)
414{ 468{
415 struct fscache_cookie *cookie = object->cookie; 469 struct fscache_cookie *cookie = object->cookie;
416 struct fscache_object *parent; 470 struct fscache_object *parent;
471 int ret;
417 472
418 _enter(""); 473 _enter("");
419 474
@@ -438,11 +493,20 @@ static void fscache_lookup_object(struct fscache_object *object)
438 object->cache->tag->name); 493 object->cache->tag->name);
439 494
440 fscache_stat(&fscache_n_object_lookups); 495 fscache_stat(&fscache_n_object_lookups);
441 object->cache->ops->lookup_object(object); 496 fscache_stat(&fscache_n_cop_lookup_object);
497 ret = object->cache->ops->lookup_object(object);
498 fscache_stat_d(&fscache_n_cop_lookup_object);
442 499
443 if (test_bit(FSCACHE_OBJECT_EV_ERROR, &object->events)) 500 if (test_bit(FSCACHE_OBJECT_EV_ERROR, &object->events))
444 set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); 501 set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
445 502
503 if (ret == -ETIMEDOUT) {
504 /* probably stuck behind another object, so move this one to
505 * the back of the queue */
506 fscache_stat(&fscache_n_object_lookups_timed_out);
507 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
508 }
509
446 _leave(""); 510 _leave("");
447} 511}
448 512
@@ -546,7 +610,8 @@ static void fscache_object_available(struct fscache_object *object)
546 610
547 spin_lock(&object->lock); 611 spin_lock(&object->lock);
548 612
549 if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, &object->cookie->flags)) 613 if (object->cookie &&
614 test_and_clear_bit(FSCACHE_COOKIE_CREATING, &object->cookie->flags))
550 wake_up_bit(&object->cookie->flags, FSCACHE_COOKIE_CREATING); 615 wake_up_bit(&object->cookie->flags, FSCACHE_COOKIE_CREATING);
551 616
552 fscache_done_parent_op(object); 617 fscache_done_parent_op(object);
@@ -562,7 +627,9 @@ static void fscache_object_available(struct fscache_object *object)
562 } 627 }
563 spin_unlock(&object->lock); 628 spin_unlock(&object->lock);
564 629
630 fscache_stat(&fscache_n_cop_lookup_complete);
565 object->cache->ops->lookup_complete(object); 631 object->cache->ops->lookup_complete(object);
632 fscache_stat_d(&fscache_n_cop_lookup_complete);
566 fscache_enqueue_dependents(object); 633 fscache_enqueue_dependents(object);
567 634
568 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif); 635 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
@@ -581,11 +648,16 @@ static void fscache_drop_object(struct fscache_object *object)
581 648
582 _enter("{OBJ%x,%d}", object->debug_id, object->n_children); 649 _enter("{OBJ%x,%d}", object->debug_id, object->n_children);
583 650
651 ASSERTCMP(object->cookie, ==, NULL);
652 ASSERT(hlist_unhashed(&object->cookie_link));
653
584 spin_lock(&cache->object_list_lock); 654 spin_lock(&cache->object_list_lock);
585 list_del_init(&object->cache_link); 655 list_del_init(&object->cache_link);
586 spin_unlock(&cache->object_list_lock); 656 spin_unlock(&cache->object_list_lock);
587 657
658 fscache_stat(&fscache_n_cop_drop_object);
588 cache->ops->drop_object(object); 659 cache->ops->drop_object(object);
660 fscache_stat_d(&fscache_n_cop_drop_object);
589 661
590 if (parent) { 662 if (parent) {
591 _debug("release parent OBJ%x {%d}", 663 _debug("release parent OBJ%x {%d}",
@@ -600,7 +672,9 @@ static void fscache_drop_object(struct fscache_object *object)
600 } 672 }
601 673
602 /* this just shifts the object release to the slow work processor */ 674 /* this just shifts the object release to the slow work processor */
675 fscache_stat(&fscache_n_cop_put_object);
603 object->cache->ops->put_object(object); 676 object->cache->ops->put_object(object);
677 fscache_stat_d(&fscache_n_cop_put_object);
604 678
605 _leave(""); 679 _leave("");
606} 680}
@@ -690,8 +764,12 @@ static int fscache_object_slow_work_get_ref(struct slow_work *work)
690{ 764{
691 struct fscache_object *object = 765 struct fscache_object *object =
692 container_of(work, struct fscache_object, work); 766 container_of(work, struct fscache_object, work);
767 int ret;
693 768
694 return object->cache->ops->grab_object(object) ? 0 : -EAGAIN; 769 fscache_stat(&fscache_n_cop_grab_object);
770 ret = object->cache->ops->grab_object(object) ? 0 : -EAGAIN;
771 fscache_stat_d(&fscache_n_cop_grab_object);
772 return ret;
695} 773}
696 774
697/* 775/*
@@ -702,7 +780,9 @@ static void fscache_object_slow_work_put_ref(struct slow_work *work)
702 struct fscache_object *object = 780 struct fscache_object *object =
703 container_of(work, struct fscache_object, work); 781 container_of(work, struct fscache_object, work);
704 782
705 return object->cache->ops->put_object(object); 783 fscache_stat(&fscache_n_cop_put_object);
784 object->cache->ops->put_object(object);
785 fscache_stat_d(&fscache_n_cop_put_object);
706} 786}
707 787
708/* 788/*
@@ -739,7 +819,9 @@ static void fscache_enqueue_dependents(struct fscache_object *object)
739 819
740 /* sort onto appropriate lists */ 820 /* sort onto appropriate lists */
741 fscache_enqueue_object(dep); 821 fscache_enqueue_object(dep);
822 fscache_stat(&fscache_n_cop_put_object);
742 dep->cache->ops->put_object(dep); 823 dep->cache->ops->put_object(dep);
824 fscache_stat_d(&fscache_n_cop_put_object);
743 825
744 if (!list_empty(&object->dependents)) 826 if (!list_empty(&object->dependents))
745 cond_resched_lock(&object->lock); 827 cond_resched_lock(&object->lock);
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index e7f8d53b8b6b..313e79a14266 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -13,6 +13,7 @@
13 13
14#define FSCACHE_DEBUG_LEVEL OPERATION 14#define FSCACHE_DEBUG_LEVEL OPERATION
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/seq_file.h>
16#include "internal.h" 17#include "internal.h"
17 18
18atomic_t fscache_op_debug_id; 19atomic_t fscache_op_debug_id;
@@ -31,32 +32,33 @@ void fscache_enqueue_operation(struct fscache_operation *op)
31 _enter("{OBJ%x OP%x,%u}", 32 _enter("{OBJ%x OP%x,%u}",
32 op->object->debug_id, op->debug_id, atomic_read(&op->usage)); 33 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
33 34
35 fscache_set_op_state(op, "EnQ");
36
37 ASSERT(list_empty(&op->pend_link));
34 ASSERT(op->processor != NULL); 38 ASSERT(op->processor != NULL);
35 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE); 39 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
36 ASSERTCMP(atomic_read(&op->usage), >, 0); 40 ASSERTCMP(atomic_read(&op->usage), >, 0);
37 41
38 if (list_empty(&op->pend_link)) { 42 fscache_stat(&fscache_n_op_enqueue);
39 switch (op->flags & FSCACHE_OP_TYPE) { 43 switch (op->flags & FSCACHE_OP_TYPE) {
40 case FSCACHE_OP_FAST: 44 case FSCACHE_OP_FAST:
41 _debug("queue fast"); 45 _debug("queue fast");
42 atomic_inc(&op->usage); 46 atomic_inc(&op->usage);
43 if (!schedule_work(&op->fast_work)) 47 if (!schedule_work(&op->fast_work))
44 fscache_put_operation(op); 48 fscache_put_operation(op);
45 break; 49 break;
46 case FSCACHE_OP_SLOW: 50 case FSCACHE_OP_SLOW:
47 _debug("queue slow"); 51 _debug("queue slow");
48 slow_work_enqueue(&op->slow_work); 52 slow_work_enqueue(&op->slow_work);
49 break; 53 break;
50 case FSCACHE_OP_MYTHREAD: 54 case FSCACHE_OP_MYTHREAD:
51 _debug("queue for caller's attention"); 55 _debug("queue for caller's attention");
52 break; 56 break;
53 default: 57 default:
54 printk(KERN_ERR "FS-Cache: Unexpected op type %lx", 58 printk(KERN_ERR "FS-Cache: Unexpected op type %lx",
55 op->flags); 59 op->flags);
56 BUG(); 60 BUG();
57 break; 61 break;
58 }
59 fscache_stat(&fscache_n_op_enqueue);
60 } 62 }
61} 63}
62EXPORT_SYMBOL(fscache_enqueue_operation); 64EXPORT_SYMBOL(fscache_enqueue_operation);
@@ -67,6 +69,8 @@ EXPORT_SYMBOL(fscache_enqueue_operation);
67static void fscache_run_op(struct fscache_object *object, 69static void fscache_run_op(struct fscache_object *object,
68 struct fscache_operation *op) 70 struct fscache_operation *op)
69{ 71{
72 fscache_set_op_state(op, "Run");
73
70 object->n_in_progress++; 74 object->n_in_progress++;
71 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) 75 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
72 wake_up_bit(&op->flags, FSCACHE_OP_WAITING); 76 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
@@ -87,9 +91,12 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
87 91
88 _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id); 92 _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
89 93
94 fscache_set_op_state(op, "SubmitX");
95
90 spin_lock(&object->lock); 96 spin_lock(&object->lock);
91 ASSERTCMP(object->n_ops, >=, object->n_in_progress); 97 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
92 ASSERTCMP(object->n_ops, >=, object->n_exclusive); 98 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
99 ASSERT(list_empty(&op->pend_link));
93 100
94 ret = -ENOBUFS; 101 ret = -ENOBUFS;
95 if (fscache_object_is_active(object)) { 102 if (fscache_object_is_active(object)) {
@@ -190,9 +197,12 @@ int fscache_submit_op(struct fscache_object *object,
190 197
191 ASSERTCMP(atomic_read(&op->usage), >, 0); 198 ASSERTCMP(atomic_read(&op->usage), >, 0);
192 199
200 fscache_set_op_state(op, "Submit");
201
193 spin_lock(&object->lock); 202 spin_lock(&object->lock);
194 ASSERTCMP(object->n_ops, >=, object->n_in_progress); 203 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
195 ASSERTCMP(object->n_ops, >=, object->n_exclusive); 204 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
205 ASSERT(list_empty(&op->pend_link));
196 206
197 ostate = object->state; 207 ostate = object->state;
198 smp_rmb(); 208 smp_rmb();
@@ -222,6 +232,11 @@ int fscache_submit_op(struct fscache_object *object,
222 list_add_tail(&op->pend_link, &object->pending_ops); 232 list_add_tail(&op->pend_link, &object->pending_ops);
223 fscache_stat(&fscache_n_op_pend); 233 fscache_stat(&fscache_n_op_pend);
224 ret = 0; 234 ret = 0;
235 } else if (object->state == FSCACHE_OBJECT_DYING ||
236 object->state == FSCACHE_OBJECT_LC_DYING ||
237 object->state == FSCACHE_OBJECT_WITHDRAWING) {
238 fscache_stat(&fscache_n_op_rejected);
239 ret = -ENOBUFS;
225 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) { 240 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
226 fscache_report_unexpected_submission(object, op, ostate); 241 fscache_report_unexpected_submission(object, op, ostate);
227 ASSERT(!fscache_object_is_active(object)); 242 ASSERT(!fscache_object_is_active(object));
@@ -264,12 +279,7 @@ void fscache_start_operations(struct fscache_object *object)
264 stop = true; 279 stop = true;
265 } 280 }
266 list_del_init(&op->pend_link); 281 list_del_init(&op->pend_link);
267 object->n_in_progress++; 282 fscache_run_op(object, op);
268
269 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
270 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
271 if (op->processor)
272 fscache_enqueue_operation(op);
273 283
274 /* the pending queue was holding a ref on the object */ 284 /* the pending queue was holding a ref on the object */
275 fscache_put_operation(op); 285 fscache_put_operation(op);
@@ -282,6 +292,36 @@ void fscache_start_operations(struct fscache_object *object)
282} 292}
283 293
284/* 294/*
295 * cancel an operation that's pending on an object
296 */
297int fscache_cancel_op(struct fscache_operation *op)
298{
299 struct fscache_object *object = op->object;
300 int ret;
301
302 _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
303
304 spin_lock(&object->lock);
305
306 ret = -EBUSY;
307 if (!list_empty(&op->pend_link)) {
308 fscache_stat(&fscache_n_op_cancelled);
309 list_del_init(&op->pend_link);
310 object->n_ops--;
311 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
312 object->n_exclusive--;
313 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
314 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
315 fscache_put_operation(op);
316 ret = 0;
317 }
318
319 spin_unlock(&object->lock);
320 _leave(" = %d", ret);
321 return ret;
322}
323
324/*
285 * release an operation 325 * release an operation
286 * - queues pending ops if this is the last in-progress op 326 * - queues pending ops if this is the last in-progress op
287 */ 327 */
@@ -298,6 +338,8 @@ void fscache_put_operation(struct fscache_operation *op)
298 if (!atomic_dec_and_test(&op->usage)) 338 if (!atomic_dec_and_test(&op->usage))
299 return; 339 return;
300 340
341 fscache_set_op_state(op, "Put");
342
301 _debug("PUT OP"); 343 _debug("PUT OP");
302 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags)) 344 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
303 BUG(); 345 BUG();
@@ -311,6 +353,9 @@ void fscache_put_operation(struct fscache_operation *op)
311 353
312 object = op->object; 354 object = op->object;
313 355
356 if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
357 atomic_dec(&object->n_reads);
358
314 /* now... we may get called with the object spinlock held, so we 359 /* now... we may get called with the object spinlock held, so we
315 * complete the cleanup here only if we can immediately acquire the 360 * complete the cleanup here only if we can immediately acquire the
316 * lock, and defer it otherwise */ 361 * lock, and defer it otherwise */
@@ -452,8 +497,27 @@ static void fscache_op_execute(struct slow_work *work)
452 _leave(""); 497 _leave("");
453} 498}
454 499
500/*
501 * describe an operation for slow-work debugging
502 */
503#ifdef CONFIG_SLOW_WORK_PROC
504static void fscache_op_desc(struct slow_work *work, struct seq_file *m)
505{
506 struct fscache_operation *op =
507 container_of(work, struct fscache_operation, slow_work);
508
509 seq_printf(m, "FSC: OBJ%x OP%x: %s/%s fl=%lx",
510 op->object->debug_id, op->debug_id,
511 op->name, op->state, op->flags);
512}
513#endif
514
455const struct slow_work_ops fscache_op_slow_work_ops = { 515const struct slow_work_ops fscache_op_slow_work_ops = {
516 .owner = THIS_MODULE,
456 .get_ref = fscache_op_get_ref, 517 .get_ref = fscache_op_get_ref,
457 .put_ref = fscache_op_put_ref, 518 .put_ref = fscache_op_put_ref,
458 .execute = fscache_op_execute, 519 .execute = fscache_op_execute,
520#ifdef CONFIG_SLOW_WORK_PROC
521 .desc = fscache_op_desc,
522#endif
459}; 523};
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 2568e0eb644f..c598ea4c4e7d 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -43,18 +43,102 @@ void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *pa
43EXPORT_SYMBOL(__fscache_wait_on_page_write); 43EXPORT_SYMBOL(__fscache_wait_on_page_write);
44 44
45/* 45/*
46 * note that a page has finished being written to the cache 46 * decide whether a page can be released, possibly by cancelling a store to it
47 * - we're allowed to sleep if __GFP_WAIT is flagged
47 */ 48 */
48static void fscache_end_page_write(struct fscache_cookie *cookie, struct page *page) 49bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50 struct page *page,
51 gfp_t gfp)
49{ 52{
50 struct page *xpage; 53 struct page *xpage;
54 void *val;
55
56 _enter("%p,%p,%x", cookie, page, gfp);
57
58 rcu_read_lock();
59 val = radix_tree_lookup(&cookie->stores, page->index);
60 if (!val) {
61 rcu_read_unlock();
62 fscache_stat(&fscache_n_store_vmscan_not_storing);
63 __fscache_uncache_page(cookie, page);
64 return true;
65 }
66
67 /* see if the page is actually undergoing storage - if so we can't get
68 * rid of it till the cache has finished with it */
69 if (radix_tree_tag_get(&cookie->stores, page->index,
70 FSCACHE_COOKIE_STORING_TAG)) {
71 rcu_read_unlock();
72 goto page_busy;
73 }
74
75 /* the page is pending storage, so we attempt to cancel the store and
76 * discard the store request so that the page can be reclaimed */
77 spin_lock(&cookie->stores_lock);
78 rcu_read_unlock();
79
80 if (radix_tree_tag_get(&cookie->stores, page->index,
81 FSCACHE_COOKIE_STORING_TAG)) {
82 /* the page started to undergo storage whilst we were looking,
83 * so now we can only wait or return */
84 spin_unlock(&cookie->stores_lock);
85 goto page_busy;
86 }
51 87
52 spin_lock(&cookie->lock);
53 xpage = radix_tree_delete(&cookie->stores, page->index); 88 xpage = radix_tree_delete(&cookie->stores, page->index);
54 spin_unlock(&cookie->lock); 89 spin_unlock(&cookie->stores_lock);
55 ASSERT(xpage != NULL); 90
91 if (xpage) {
92 fscache_stat(&fscache_n_store_vmscan_cancelled);
93 fscache_stat(&fscache_n_store_radix_deletes);
94 ASSERTCMP(xpage, ==, page);
95 } else {
96 fscache_stat(&fscache_n_store_vmscan_gone);
97 }
56 98
57 wake_up_bit(&cookie->flags, 0); 99 wake_up_bit(&cookie->flags, 0);
100 if (xpage)
101 page_cache_release(xpage);
102 __fscache_uncache_page(cookie, page);
103 return true;
104
105page_busy:
106 /* we might want to wait here, but that could deadlock the allocator as
107 * the slow-work threads writing to the cache may all end up sleeping
108 * on memory allocation */
109 fscache_stat(&fscache_n_store_vmscan_busy);
110 return false;
111}
112EXPORT_SYMBOL(__fscache_maybe_release_page);
113
114/*
115 * note that a page has finished being written to the cache
116 */
117static void fscache_end_page_write(struct fscache_object *object,
118 struct page *page)
119{
120 struct fscache_cookie *cookie;
121 struct page *xpage = NULL;
122
123 spin_lock(&object->lock);
124 cookie = object->cookie;
125 if (cookie) {
126 /* delete the page from the tree if it is now no longer
127 * pending */
128 spin_lock(&cookie->stores_lock);
129 radix_tree_tag_clear(&cookie->stores, page->index,
130 FSCACHE_COOKIE_STORING_TAG);
131 if (!radix_tree_tag_get(&cookie->stores, page->index,
132 FSCACHE_COOKIE_PENDING_TAG)) {
133 fscache_stat(&fscache_n_store_radix_deletes);
134 xpage = radix_tree_delete(&cookie->stores, page->index);
135 }
136 spin_unlock(&cookie->stores_lock);
137 wake_up_bit(&cookie->flags, 0);
138 }
139 spin_unlock(&object->lock);
140 if (xpage)
141 page_cache_release(xpage);
58} 142}
59 143
60/* 144/*
@@ -63,14 +147,21 @@ static void fscache_end_page_write(struct fscache_cookie *cookie, struct page *p
63static void fscache_attr_changed_op(struct fscache_operation *op) 147static void fscache_attr_changed_op(struct fscache_operation *op)
64{ 148{
65 struct fscache_object *object = op->object; 149 struct fscache_object *object = op->object;
150 int ret;
66 151
67 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id); 152 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
68 153
69 fscache_stat(&fscache_n_attr_changed_calls); 154 fscache_stat(&fscache_n_attr_changed_calls);
70 155
71 if (fscache_object_is_active(object) && 156 if (fscache_object_is_active(object)) {
72 object->cache->ops->attr_changed(object) < 0) 157 fscache_set_op_state(op, "CallFS");
73 fscache_abort_object(object); 158 fscache_stat(&fscache_n_cop_attr_changed);
159 ret = object->cache->ops->attr_changed(object);
160 fscache_stat_d(&fscache_n_cop_attr_changed);
161 fscache_set_op_state(op, "Done");
162 if (ret < 0)
163 fscache_abort_object(object);
164 }
74 165
75 _leave(""); 166 _leave("");
76} 167}
@@ -99,6 +190,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
99 fscache_operation_init(op, NULL); 190 fscache_operation_init(op, NULL);
100 fscache_operation_init_slow(op, fscache_attr_changed_op); 191 fscache_operation_init_slow(op, fscache_attr_changed_op);
101 op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE); 192 op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE);
193 fscache_set_op_name(op, "Attr");
102 194
103 spin_lock(&cookie->lock); 195 spin_lock(&cookie->lock);
104 196
@@ -184,6 +276,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
184 op->start_time = jiffies; 276 op->start_time = jiffies;
185 INIT_WORK(&op->op.fast_work, fscache_retrieval_work); 277 INIT_WORK(&op->op.fast_work, fscache_retrieval_work);
186 INIT_LIST_HEAD(&op->to_do); 278 INIT_LIST_HEAD(&op->to_do);
279 fscache_set_op_name(&op->op, "Retr");
187 return op; 280 return op;
188} 281}
189 282
@@ -221,6 +314,43 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
221} 314}
222 315
223/* 316/*
317 * wait for an object to become active (or dead)
318 */
319static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
320 struct fscache_retrieval *op,
321 atomic_t *stat_op_waits,
322 atomic_t *stat_object_dead)
323{
324 int ret;
325
326 if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags))
327 goto check_if_dead;
328
329 _debug(">>> WT");
330 fscache_stat(stat_op_waits);
331 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
332 fscache_wait_bit_interruptible,
333 TASK_INTERRUPTIBLE) < 0) {
334 ret = fscache_cancel_op(&op->op);
335 if (ret == 0)
336 return -ERESTARTSYS;
337
338 /* it's been removed from the pending queue by another party,
339 * so we should get to run shortly */
340 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
341 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
342 }
343 _debug("<<< GO");
344
345check_if_dead:
346 if (unlikely(fscache_object_is_dead(object))) {
347 fscache_stat(stat_object_dead);
348 return -ENOBUFS;
349 }
350 return 0;
351}
352
353/*
224 * read a page from the cache or allocate a block in which to store it 354 * read a page from the cache or allocate a block in which to store it
225 * - we return: 355 * - we return:
226 * -ENOMEM - out of memory, nothing done 356 * -ENOMEM - out of memory, nothing done
@@ -257,6 +387,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
257 _leave(" = -ENOMEM"); 387 _leave(" = -ENOMEM");
258 return -ENOMEM; 388 return -ENOMEM;
259 } 389 }
390 fscache_set_op_name(&op->op, "RetrRA1");
260 391
261 spin_lock(&cookie->lock); 392 spin_lock(&cookie->lock);
262 393
@@ -267,6 +398,9 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
267 398
268 ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP); 399 ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
269 400
401 atomic_inc(&object->n_reads);
402 set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
403
270 if (fscache_submit_op(object, &op->op) < 0) 404 if (fscache_submit_op(object, &op->op) < 0)
271 goto nobufs_unlock; 405 goto nobufs_unlock;
272 spin_unlock(&cookie->lock); 406 spin_unlock(&cookie->lock);
@@ -279,23 +413,27 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
279 413
280 /* we wait for the operation to become active, and then process it 414 /* we wait for the operation to become active, and then process it
281 * *here*, in this thread, and not in the thread pool */ 415 * *here*, in this thread, and not in the thread pool */
282 if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { 416 ret = fscache_wait_for_retrieval_activation(
283 _debug(">>> WT"); 417 object, op,
284 fscache_stat(&fscache_n_retrieval_op_waits); 418 __fscache_stat(&fscache_n_retrieval_op_waits),
285 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, 419 __fscache_stat(&fscache_n_retrievals_object_dead));
286 fscache_wait_bit, TASK_UNINTERRUPTIBLE); 420 if (ret < 0)
287 _debug("<<< GO"); 421 goto error;
288 }
289 422
290 /* ask the cache to honour the operation */ 423 /* ask the cache to honour the operation */
291 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) { 424 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
425 fscache_stat(&fscache_n_cop_allocate_page);
292 ret = object->cache->ops->allocate_page(op, page, gfp); 426 ret = object->cache->ops->allocate_page(op, page, gfp);
427 fscache_stat_d(&fscache_n_cop_allocate_page);
293 if (ret == 0) 428 if (ret == 0)
294 ret = -ENODATA; 429 ret = -ENODATA;
295 } else { 430 } else {
431 fscache_stat(&fscache_n_cop_read_or_alloc_page);
296 ret = object->cache->ops->read_or_alloc_page(op, page, gfp); 432 ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
433 fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
297 } 434 }
298 435
436error:
299 if (ret == -ENOMEM) 437 if (ret == -ENOMEM)
300 fscache_stat(&fscache_n_retrievals_nomem); 438 fscache_stat(&fscache_n_retrievals_nomem);
301 else if (ret == -ERESTARTSYS) 439 else if (ret == -ERESTARTSYS)
@@ -347,7 +485,6 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
347 void *context, 485 void *context,
348 gfp_t gfp) 486 gfp_t gfp)
349{ 487{
350 fscache_pages_retrieval_func_t func;
351 struct fscache_retrieval *op; 488 struct fscache_retrieval *op;
352 struct fscache_object *object; 489 struct fscache_object *object;
353 int ret; 490 int ret;
@@ -369,6 +506,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
369 op = fscache_alloc_retrieval(mapping, end_io_func, context); 506 op = fscache_alloc_retrieval(mapping, end_io_func, context);
370 if (!op) 507 if (!op)
371 return -ENOMEM; 508 return -ENOMEM;
509 fscache_set_op_name(&op->op, "RetrRAN");
372 510
373 spin_lock(&cookie->lock); 511 spin_lock(&cookie->lock);
374 512
@@ -377,6 +515,9 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
377 object = hlist_entry(cookie->backing_objects.first, 515 object = hlist_entry(cookie->backing_objects.first,
378 struct fscache_object, cookie_link); 516 struct fscache_object, cookie_link);
379 517
518 atomic_inc(&object->n_reads);
519 set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
520
380 if (fscache_submit_op(object, &op->op) < 0) 521 if (fscache_submit_op(object, &op->op) < 0)
381 goto nobufs_unlock; 522 goto nobufs_unlock;
382 spin_unlock(&cookie->lock); 523 spin_unlock(&cookie->lock);
@@ -389,21 +530,27 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
389 530
390 /* we wait for the operation to become active, and then process it 531 /* we wait for the operation to become active, and then process it
391 * *here*, in this thread, and not in the thread pool */ 532 * *here*, in this thread, and not in the thread pool */
392 if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { 533 ret = fscache_wait_for_retrieval_activation(
393 _debug(">>> WT"); 534 object, op,
394 fscache_stat(&fscache_n_retrieval_op_waits); 535 __fscache_stat(&fscache_n_retrieval_op_waits),
395 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, 536 __fscache_stat(&fscache_n_retrievals_object_dead));
396 fscache_wait_bit, TASK_UNINTERRUPTIBLE); 537 if (ret < 0)
397 _debug("<<< GO"); 538 goto error;
398 }
399 539
400 /* ask the cache to honour the operation */ 540 /* ask the cache to honour the operation */
401 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) 541 if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
402 func = object->cache->ops->allocate_pages; 542 fscache_stat(&fscache_n_cop_allocate_pages);
403 else 543 ret = object->cache->ops->allocate_pages(
404 func = object->cache->ops->read_or_alloc_pages; 544 op, pages, nr_pages, gfp);
405 ret = func(op, pages, nr_pages, gfp); 545 fscache_stat_d(&fscache_n_cop_allocate_pages);
546 } else {
547 fscache_stat(&fscache_n_cop_read_or_alloc_pages);
548 ret = object->cache->ops->read_or_alloc_pages(
549 op, pages, nr_pages, gfp);
550 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
551 }
406 552
553error:
407 if (ret == -ENOMEM) 554 if (ret == -ENOMEM)
408 fscache_stat(&fscache_n_retrievals_nomem); 555 fscache_stat(&fscache_n_retrievals_nomem);
409 else if (ret == -ERESTARTSYS) 556 else if (ret == -ERESTARTSYS)
@@ -461,6 +608,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
461 op = fscache_alloc_retrieval(page->mapping, NULL, NULL); 608 op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
462 if (!op) 609 if (!op)
463 return -ENOMEM; 610 return -ENOMEM;
611 fscache_set_op_name(&op->op, "RetrAL1");
464 612
465 spin_lock(&cookie->lock); 613 spin_lock(&cookie->lock);
466 614
@@ -475,18 +623,22 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
475 623
476 fscache_stat(&fscache_n_alloc_ops); 624 fscache_stat(&fscache_n_alloc_ops);
477 625
478 if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) { 626 ret = fscache_wait_for_retrieval_activation(
479 _debug(">>> WT"); 627 object, op,
480 fscache_stat(&fscache_n_alloc_op_waits); 628 __fscache_stat(&fscache_n_alloc_op_waits),
481 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING, 629 __fscache_stat(&fscache_n_allocs_object_dead));
482 fscache_wait_bit, TASK_UNINTERRUPTIBLE); 630 if (ret < 0)
483 _debug("<<< GO"); 631 goto error;
484 }
485 632
486 /* ask the cache to honour the operation */ 633 /* ask the cache to honour the operation */
634 fscache_stat(&fscache_n_cop_allocate_page);
487 ret = object->cache->ops->allocate_page(op, page, gfp); 635 ret = object->cache->ops->allocate_page(op, page, gfp);
636 fscache_stat_d(&fscache_n_cop_allocate_page);
488 637
489 if (ret < 0) 638error:
639 if (ret == -ERESTARTSYS)
640 fscache_stat(&fscache_n_allocs_intr);
641 else if (ret < 0)
490 fscache_stat(&fscache_n_allocs_nobufs); 642 fscache_stat(&fscache_n_allocs_nobufs);
491 else 643 else
492 fscache_stat(&fscache_n_allocs_ok); 644 fscache_stat(&fscache_n_allocs_ok);
@@ -521,7 +673,7 @@ static void fscache_write_op(struct fscache_operation *_op)
521 struct fscache_storage *op = 673 struct fscache_storage *op =
522 container_of(_op, struct fscache_storage, op); 674 container_of(_op, struct fscache_storage, op);
523 struct fscache_object *object = op->op.object; 675 struct fscache_object *object = op->op.object;
524 struct fscache_cookie *cookie = object->cookie; 676 struct fscache_cookie *cookie;
525 struct page *page; 677 struct page *page;
526 unsigned n; 678 unsigned n;
527 void *results[1]; 679 void *results[1];
@@ -529,16 +681,19 @@ static void fscache_write_op(struct fscache_operation *_op)
529 681
530 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage)); 682 _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
531 683
532 spin_lock(&cookie->lock); 684 fscache_set_op_state(&op->op, "GetPage");
685
533 spin_lock(&object->lock); 686 spin_lock(&object->lock);
687 cookie = object->cookie;
534 688
535 if (!fscache_object_is_active(object)) { 689 if (!fscache_object_is_active(object) || !cookie) {
536 spin_unlock(&object->lock); 690 spin_unlock(&object->lock);
537 spin_unlock(&cookie->lock);
538 _leave(""); 691 _leave("");
539 return; 692 return;
540 } 693 }
541 694
695 spin_lock(&cookie->stores_lock);
696
542 fscache_stat(&fscache_n_store_calls); 697 fscache_stat(&fscache_n_store_calls);
543 698
544 /* find a page to store */ 699 /* find a page to store */
@@ -549,23 +704,35 @@ static void fscache_write_op(struct fscache_operation *_op)
549 goto superseded; 704 goto superseded;
550 page = results[0]; 705 page = results[0];
551 _debug("gang %d [%lx]", n, page->index); 706 _debug("gang %d [%lx]", n, page->index);
552 if (page->index > op->store_limit) 707 if (page->index > op->store_limit) {
708 fscache_stat(&fscache_n_store_pages_over_limit);
553 goto superseded; 709 goto superseded;
710 }
554 711
555 radix_tree_tag_clear(&cookie->stores, page->index, 712 if (page) {
556 FSCACHE_COOKIE_PENDING_TAG); 713 radix_tree_tag_set(&cookie->stores, page->index,
714 FSCACHE_COOKIE_STORING_TAG);
715 radix_tree_tag_clear(&cookie->stores, page->index,
716 FSCACHE_COOKIE_PENDING_TAG);
717 }
557 718
719 spin_unlock(&cookie->stores_lock);
558 spin_unlock(&object->lock); 720 spin_unlock(&object->lock);
559 spin_unlock(&cookie->lock);
560 721
561 if (page) { 722 if (page) {
723 fscache_set_op_state(&op->op, "Store");
724 fscache_stat(&fscache_n_store_pages);
725 fscache_stat(&fscache_n_cop_write_page);
562 ret = object->cache->ops->write_page(op, page); 726 ret = object->cache->ops->write_page(op, page);
563 fscache_end_page_write(cookie, page); 727 fscache_stat_d(&fscache_n_cop_write_page);
564 page_cache_release(page); 728 fscache_set_op_state(&op->op, "EndWrite");
565 if (ret < 0) 729 fscache_end_page_write(object, page);
730 if (ret < 0) {
731 fscache_set_op_state(&op->op, "Abort");
566 fscache_abort_object(object); 732 fscache_abort_object(object);
567 else 733 } else {
568 fscache_enqueue_operation(&op->op); 734 fscache_enqueue_operation(&op->op);
735 }
569 } 736 }
570 737
571 _leave(""); 738 _leave("");
@@ -575,9 +742,9 @@ superseded:
575 /* this writer is going away and there aren't any more things to 742 /* this writer is going away and there aren't any more things to
576 * write */ 743 * write */
577 _debug("cease"); 744 _debug("cease");
745 spin_unlock(&cookie->stores_lock);
578 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); 746 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
579 spin_unlock(&object->lock); 747 spin_unlock(&object->lock);
580 spin_unlock(&cookie->lock);
581 _leave(""); 748 _leave("");
582} 749}
583 750
@@ -634,6 +801,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
634 fscache_operation_init(&op->op, fscache_release_write_op); 801 fscache_operation_init(&op->op, fscache_release_write_op);
635 fscache_operation_init_slow(&op->op, fscache_write_op); 802 fscache_operation_init_slow(&op->op, fscache_write_op);
636 op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING); 803 op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING);
804 fscache_set_op_name(&op->op, "Write1");
637 805
638 ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM); 806 ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
639 if (ret < 0) 807 if (ret < 0)
@@ -652,6 +820,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
652 /* add the page to the pending-storage radix tree on the backing 820 /* add the page to the pending-storage radix tree on the backing
653 * object */ 821 * object */
654 spin_lock(&object->lock); 822 spin_lock(&object->lock);
823 spin_lock(&cookie->stores_lock);
655 824
656 _debug("store limit %llx", (unsigned long long) object->store_limit); 825 _debug("store limit %llx", (unsigned long long) object->store_limit);
657 826
@@ -672,6 +841,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
672 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags)) 841 if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
673 goto already_pending; 842 goto already_pending;
674 843
844 spin_unlock(&cookie->stores_lock);
675 spin_unlock(&object->lock); 845 spin_unlock(&object->lock);
676 846
677 op->op.debug_id = atomic_inc_return(&fscache_op_debug_id); 847 op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
@@ -693,6 +863,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
693already_queued: 863already_queued:
694 fscache_stat(&fscache_n_stores_again); 864 fscache_stat(&fscache_n_stores_again);
695already_pending: 865already_pending:
866 spin_unlock(&cookie->stores_lock);
696 spin_unlock(&object->lock); 867 spin_unlock(&object->lock);
697 spin_unlock(&cookie->lock); 868 spin_unlock(&cookie->lock);
698 radix_tree_preload_end(); 869 radix_tree_preload_end();
@@ -702,7 +873,9 @@ already_pending:
702 return 0; 873 return 0;
703 874
704submit_failed: 875submit_failed:
876 spin_lock(&cookie->stores_lock);
705 radix_tree_delete(&cookie->stores, page->index); 877 radix_tree_delete(&cookie->stores, page->index);
878 spin_unlock(&cookie->stores_lock);
706 page_cache_release(page); 879 page_cache_release(page);
707 ret = -ENOBUFS; 880 ret = -ENOBUFS;
708 goto nobufs; 881 goto nobufs;
@@ -763,7 +936,9 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
763 if (TestClearPageFsCache(page) && 936 if (TestClearPageFsCache(page) &&
764 object->cache->ops->uncache_page) { 937 object->cache->ops->uncache_page) {
765 /* the cache backend releases the cookie lock */ 938 /* the cache backend releases the cookie lock */
939 fscache_stat(&fscache_n_cop_uncache_page);
766 object->cache->ops->uncache_page(object, page); 940 object->cache->ops->uncache_page(object, page);
941 fscache_stat_d(&fscache_n_cop_uncache_page);
767 goto done; 942 goto done;
768 } 943 }
769 944
diff --git a/fs/fscache/proc.c b/fs/fscache/proc.c
index beeab44bc31a..1d9e4951a597 100644
--- a/fs/fscache/proc.c
+++ b/fs/fscache/proc.c
@@ -37,10 +37,20 @@ int __init fscache_proc_init(void)
37 goto error_histogram; 37 goto error_histogram;
38#endif 38#endif
39 39
40#ifdef CONFIG_FSCACHE_OBJECT_LIST
41 if (!proc_create("fs/fscache/objects", S_IFREG | 0444, NULL,
42 &fscache_objlist_fops))
43 goto error_objects;
44#endif
45
40 _leave(" = 0"); 46 _leave(" = 0");
41 return 0; 47 return 0;
42 48
49#ifdef CONFIG_FSCACHE_OBJECT_LIST
50error_objects:
51#endif
43#ifdef CONFIG_FSCACHE_HISTOGRAM 52#ifdef CONFIG_FSCACHE_HISTOGRAM
53 remove_proc_entry("fs/fscache/histogram", NULL);
44error_histogram: 54error_histogram:
45#endif 55#endif
46#ifdef CONFIG_FSCACHE_STATS 56#ifdef CONFIG_FSCACHE_STATS
@@ -58,6 +68,9 @@ error_dir:
58 */ 68 */
59void fscache_proc_cleanup(void) 69void fscache_proc_cleanup(void)
60{ 70{
71#ifdef CONFIG_FSCACHE_OBJECT_LIST
72 remove_proc_entry("fs/fscache/objects", NULL);
73#endif
61#ifdef CONFIG_FSCACHE_HISTOGRAM 74#ifdef CONFIG_FSCACHE_HISTOGRAM
62 remove_proc_entry("fs/fscache/histogram", NULL); 75 remove_proc_entry("fs/fscache/histogram", NULL);
63#endif 76#endif
diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
index 65deb99e756b..46435f3aae68 100644
--- a/fs/fscache/stats.c
+++ b/fs/fscache/stats.c
@@ -25,6 +25,8 @@ atomic_t fscache_n_op_requeue;
25atomic_t fscache_n_op_deferred_release; 25atomic_t fscache_n_op_deferred_release;
26atomic_t fscache_n_op_release; 26atomic_t fscache_n_op_release;
27atomic_t fscache_n_op_gc; 27atomic_t fscache_n_op_gc;
28atomic_t fscache_n_op_cancelled;
29atomic_t fscache_n_op_rejected;
28 30
29atomic_t fscache_n_attr_changed; 31atomic_t fscache_n_attr_changed;
30atomic_t fscache_n_attr_changed_ok; 32atomic_t fscache_n_attr_changed_ok;
@@ -36,6 +38,8 @@ atomic_t fscache_n_allocs;
36atomic_t fscache_n_allocs_ok; 38atomic_t fscache_n_allocs_ok;
37atomic_t fscache_n_allocs_wait; 39atomic_t fscache_n_allocs_wait;
38atomic_t fscache_n_allocs_nobufs; 40atomic_t fscache_n_allocs_nobufs;
41atomic_t fscache_n_allocs_intr;
42atomic_t fscache_n_allocs_object_dead;
39atomic_t fscache_n_alloc_ops; 43atomic_t fscache_n_alloc_ops;
40atomic_t fscache_n_alloc_op_waits; 44atomic_t fscache_n_alloc_op_waits;
41 45
@@ -46,6 +50,7 @@ atomic_t fscache_n_retrievals_nodata;
46atomic_t fscache_n_retrievals_nobufs; 50atomic_t fscache_n_retrievals_nobufs;
47atomic_t fscache_n_retrievals_intr; 51atomic_t fscache_n_retrievals_intr;
48atomic_t fscache_n_retrievals_nomem; 52atomic_t fscache_n_retrievals_nomem;
53atomic_t fscache_n_retrievals_object_dead;
49atomic_t fscache_n_retrieval_ops; 54atomic_t fscache_n_retrieval_ops;
50atomic_t fscache_n_retrieval_op_waits; 55atomic_t fscache_n_retrieval_op_waits;
51 56
@@ -56,6 +61,14 @@ atomic_t fscache_n_stores_nobufs;
56atomic_t fscache_n_stores_oom; 61atomic_t fscache_n_stores_oom;
57atomic_t fscache_n_store_ops; 62atomic_t fscache_n_store_ops;
58atomic_t fscache_n_store_calls; 63atomic_t fscache_n_store_calls;
64atomic_t fscache_n_store_pages;
65atomic_t fscache_n_store_radix_deletes;
66atomic_t fscache_n_store_pages_over_limit;
67
68atomic_t fscache_n_store_vmscan_not_storing;
69atomic_t fscache_n_store_vmscan_gone;
70atomic_t fscache_n_store_vmscan_busy;
71atomic_t fscache_n_store_vmscan_cancelled;
59 72
60atomic_t fscache_n_marks; 73atomic_t fscache_n_marks;
61atomic_t fscache_n_uncaches; 74atomic_t fscache_n_uncaches;
@@ -74,6 +87,7 @@ atomic_t fscache_n_updates_run;
74atomic_t fscache_n_relinquishes; 87atomic_t fscache_n_relinquishes;
75atomic_t fscache_n_relinquishes_null; 88atomic_t fscache_n_relinquishes_null;
76atomic_t fscache_n_relinquishes_waitcrt; 89atomic_t fscache_n_relinquishes_waitcrt;
90atomic_t fscache_n_relinquishes_retire;
77 91
78atomic_t fscache_n_cookie_index; 92atomic_t fscache_n_cookie_index;
79atomic_t fscache_n_cookie_data; 93atomic_t fscache_n_cookie_data;
@@ -84,6 +98,7 @@ atomic_t fscache_n_object_no_alloc;
84atomic_t fscache_n_object_lookups; 98atomic_t fscache_n_object_lookups;
85atomic_t fscache_n_object_lookups_negative; 99atomic_t fscache_n_object_lookups_negative;
86atomic_t fscache_n_object_lookups_positive; 100atomic_t fscache_n_object_lookups_positive;
101atomic_t fscache_n_object_lookups_timed_out;
87atomic_t fscache_n_object_created; 102atomic_t fscache_n_object_created;
88atomic_t fscache_n_object_avail; 103atomic_t fscache_n_object_avail;
89atomic_t fscache_n_object_dead; 104atomic_t fscache_n_object_dead;
@@ -93,6 +108,23 @@ atomic_t fscache_n_checkaux_okay;
93atomic_t fscache_n_checkaux_update; 108atomic_t fscache_n_checkaux_update;
94atomic_t fscache_n_checkaux_obsolete; 109atomic_t fscache_n_checkaux_obsolete;
95 110
111atomic_t fscache_n_cop_alloc_object;
112atomic_t fscache_n_cop_lookup_object;
113atomic_t fscache_n_cop_lookup_complete;
114atomic_t fscache_n_cop_grab_object;
115atomic_t fscache_n_cop_update_object;
116atomic_t fscache_n_cop_drop_object;
117atomic_t fscache_n_cop_put_object;
118atomic_t fscache_n_cop_sync_cache;
119atomic_t fscache_n_cop_attr_changed;
120atomic_t fscache_n_cop_read_or_alloc_page;
121atomic_t fscache_n_cop_read_or_alloc_pages;
122atomic_t fscache_n_cop_allocate_page;
123atomic_t fscache_n_cop_allocate_pages;
124atomic_t fscache_n_cop_write_page;
125atomic_t fscache_n_cop_uncache_page;
126atomic_t fscache_n_cop_dissociate_pages;
127
96/* 128/*
97 * display the general statistics 129 * display the general statistics
98 */ 130 */
@@ -129,10 +161,11 @@ static int fscache_stats_show(struct seq_file *m, void *v)
129 atomic_read(&fscache_n_acquires_nobufs), 161 atomic_read(&fscache_n_acquires_nobufs),
130 atomic_read(&fscache_n_acquires_oom)); 162 atomic_read(&fscache_n_acquires_oom));
131 163
132 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u\n", 164 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
133 atomic_read(&fscache_n_object_lookups), 165 atomic_read(&fscache_n_object_lookups),
134 atomic_read(&fscache_n_object_lookups_negative), 166 atomic_read(&fscache_n_object_lookups_negative),
135 atomic_read(&fscache_n_object_lookups_positive), 167 atomic_read(&fscache_n_object_lookups_positive),
168 atomic_read(&fscache_n_object_lookups_timed_out),
136 atomic_read(&fscache_n_object_created)); 169 atomic_read(&fscache_n_object_created));
137 170
138 seq_printf(m, "Updates: n=%u nul=%u run=%u\n", 171 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
@@ -140,10 +173,11 @@ static int fscache_stats_show(struct seq_file *m, void *v)
140 atomic_read(&fscache_n_updates_null), 173 atomic_read(&fscache_n_updates_null),
141 atomic_read(&fscache_n_updates_run)); 174 atomic_read(&fscache_n_updates_run));
142 175
143 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u\n", 176 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
144 atomic_read(&fscache_n_relinquishes), 177 atomic_read(&fscache_n_relinquishes),
145 atomic_read(&fscache_n_relinquishes_null), 178 atomic_read(&fscache_n_relinquishes_null),
146 atomic_read(&fscache_n_relinquishes_waitcrt)); 179 atomic_read(&fscache_n_relinquishes_waitcrt),
180 atomic_read(&fscache_n_relinquishes_retire));
147 181
148 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n", 182 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
149 atomic_read(&fscache_n_attr_changed), 183 atomic_read(&fscache_n_attr_changed),
@@ -152,14 +186,16 @@ static int fscache_stats_show(struct seq_file *m, void *v)
152 atomic_read(&fscache_n_attr_changed_nomem), 186 atomic_read(&fscache_n_attr_changed_nomem),
153 atomic_read(&fscache_n_attr_changed_calls)); 187 atomic_read(&fscache_n_attr_changed_calls));
154 188
155 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u\n", 189 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
156 atomic_read(&fscache_n_allocs), 190 atomic_read(&fscache_n_allocs),
157 atomic_read(&fscache_n_allocs_ok), 191 atomic_read(&fscache_n_allocs_ok),
158 atomic_read(&fscache_n_allocs_wait), 192 atomic_read(&fscache_n_allocs_wait),
159 atomic_read(&fscache_n_allocs_nobufs)); 193 atomic_read(&fscache_n_allocs_nobufs),
160 seq_printf(m, "Allocs : ops=%u owt=%u\n", 194 atomic_read(&fscache_n_allocs_intr));
195 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
161 atomic_read(&fscache_n_alloc_ops), 196 atomic_read(&fscache_n_alloc_ops),
162 atomic_read(&fscache_n_alloc_op_waits)); 197 atomic_read(&fscache_n_alloc_op_waits),
198 atomic_read(&fscache_n_allocs_object_dead));
163 199
164 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u" 200 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
165 " int=%u oom=%u\n", 201 " int=%u oom=%u\n",
@@ -170,9 +206,10 @@ static int fscache_stats_show(struct seq_file *m, void *v)
170 atomic_read(&fscache_n_retrievals_nobufs), 206 atomic_read(&fscache_n_retrievals_nobufs),
171 atomic_read(&fscache_n_retrievals_intr), 207 atomic_read(&fscache_n_retrievals_intr),
172 atomic_read(&fscache_n_retrievals_nomem)); 208 atomic_read(&fscache_n_retrievals_nomem));
173 seq_printf(m, "Retrvls: ops=%u owt=%u\n", 209 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
174 atomic_read(&fscache_n_retrieval_ops), 210 atomic_read(&fscache_n_retrieval_ops),
175 atomic_read(&fscache_n_retrieval_op_waits)); 211 atomic_read(&fscache_n_retrieval_op_waits),
212 atomic_read(&fscache_n_retrievals_object_dead));
176 213
177 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n", 214 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
178 atomic_read(&fscache_n_stores), 215 atomic_read(&fscache_n_stores),
@@ -180,18 +217,49 @@ static int fscache_stats_show(struct seq_file *m, void *v)
180 atomic_read(&fscache_n_stores_again), 217 atomic_read(&fscache_n_stores_again),
181 atomic_read(&fscache_n_stores_nobufs), 218 atomic_read(&fscache_n_stores_nobufs),
182 atomic_read(&fscache_n_stores_oom)); 219 atomic_read(&fscache_n_stores_oom));
183 seq_printf(m, "Stores : ops=%u run=%u\n", 220 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
184 atomic_read(&fscache_n_store_ops), 221 atomic_read(&fscache_n_store_ops),
185 atomic_read(&fscache_n_store_calls)); 222 atomic_read(&fscache_n_store_calls),
223 atomic_read(&fscache_n_store_pages),
224 atomic_read(&fscache_n_store_radix_deletes),
225 atomic_read(&fscache_n_store_pages_over_limit));
186 226
187 seq_printf(m, "Ops : pend=%u run=%u enq=%u\n", 227 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
228 atomic_read(&fscache_n_store_vmscan_not_storing),
229 atomic_read(&fscache_n_store_vmscan_gone),
230 atomic_read(&fscache_n_store_vmscan_busy),
231 atomic_read(&fscache_n_store_vmscan_cancelled));
232
233 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
188 atomic_read(&fscache_n_op_pend), 234 atomic_read(&fscache_n_op_pend),
189 atomic_read(&fscache_n_op_run), 235 atomic_read(&fscache_n_op_run),
190 atomic_read(&fscache_n_op_enqueue)); 236 atomic_read(&fscache_n_op_enqueue),
237 atomic_read(&fscache_n_op_cancelled),
238 atomic_read(&fscache_n_op_rejected));
191 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n", 239 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
192 atomic_read(&fscache_n_op_deferred_release), 240 atomic_read(&fscache_n_op_deferred_release),
193 atomic_read(&fscache_n_op_release), 241 atomic_read(&fscache_n_op_release),
194 atomic_read(&fscache_n_op_gc)); 242 atomic_read(&fscache_n_op_gc));
243
244 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
245 atomic_read(&fscache_n_cop_alloc_object),
246 atomic_read(&fscache_n_cop_lookup_object),
247 atomic_read(&fscache_n_cop_lookup_complete),
248 atomic_read(&fscache_n_cop_grab_object));
249 seq_printf(m, "CacheOp: upo=%d dro=%d pto=%d atc=%d syn=%d\n",
250 atomic_read(&fscache_n_cop_update_object),
251 atomic_read(&fscache_n_cop_drop_object),
252 atomic_read(&fscache_n_cop_put_object),
253 atomic_read(&fscache_n_cop_attr_changed),
254 atomic_read(&fscache_n_cop_sync_cache));
255 seq_printf(m, "CacheOp: rap=%d ras=%d alp=%d als=%d wrp=%d ucp=%d dsp=%d\n",
256 atomic_read(&fscache_n_cop_read_or_alloc_page),
257 atomic_read(&fscache_n_cop_read_or_alloc_pages),
258 atomic_read(&fscache_n_cop_allocate_page),
259 atomic_read(&fscache_n_cop_allocate_pages),
260 atomic_read(&fscache_n_cop_write_page),
261 atomic_read(&fscache_n_cop_uncache_page),
262 atomic_read(&fscache_n_cop_dissociate_pages));
195 return 0; 263 return 0;
196} 264}
197 265
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 8ada78aade58..4787ae6c5c1c 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -385,6 +385,9 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
385 if (fc->no_create) 385 if (fc->no_create)
386 return -ENOSYS; 386 return -ENOSYS;
387 387
388 if (flags & O_DIRECT)
389 return -EINVAL;
390
388 forget_req = fuse_get_req(fc); 391 forget_req = fuse_get_req(fc);
389 if (IS_ERR(forget_req)) 392 if (IS_ERR(forget_req))
390 return PTR_ERR(forget_req); 393 return PTR_ERR(forget_req);
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index 5971359d2090..4dcddf83326f 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -8,6 +8,8 @@ config GFS2_FS
8 select FS_POSIX_ACL 8 select FS_POSIX_ACL
9 select CRC32 9 select CRC32
10 select SLOW_WORK 10 select SLOW_WORK
11 select QUOTA
12 select QUOTACTL
11 help 13 help
12 A cluster filesystem. 14 A cluster filesystem.
13 15
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 3fc4e3ac7d84..3eb1ea846173 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -12,6 +12,7 @@
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/completion.h> 13#include <linux/completion.h>
14#include <linux/buffer_head.h> 14#include <linux/buffer_head.h>
15#include <linux/xattr.h>
15#include <linux/posix_acl.h> 16#include <linux/posix_acl.h>
16#include <linux/posix_acl_xattr.h> 17#include <linux/posix_acl_xattr.h>
17#include <linux/gfs2_ondisk.h> 18#include <linux/gfs2_ondisk.h>
@@ -26,108 +27,44 @@
26#include "trans.h" 27#include "trans.h"
27#include "util.h" 28#include "util.h"
28 29
29#define ACL_ACCESS 1 30static const char *gfs2_acl_name(int type)
30#define ACL_DEFAULT 0
31
32int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
33 struct gfs2_ea_request *er, int *remove, mode_t *mode)
34{ 31{
35 struct posix_acl *acl; 32 switch (type) {
36 int error; 33 case ACL_TYPE_ACCESS:
37 34 return GFS2_POSIX_ACL_ACCESS;
38 error = gfs2_acl_validate_remove(ip, access); 35 case ACL_TYPE_DEFAULT:
39 if (error) 36 return GFS2_POSIX_ACL_DEFAULT;
40 return error;
41
42 if (!er->er_data)
43 return -EINVAL;
44
45 acl = posix_acl_from_xattr(er->er_data, er->er_data_len);
46 if (IS_ERR(acl))
47 return PTR_ERR(acl);
48 if (!acl) {
49 *remove = 1;
50 return 0;
51 }
52
53 error = posix_acl_valid(acl);
54 if (error)
55 goto out;
56
57 if (access) {
58 error = posix_acl_equiv_mode(acl, mode);
59 if (!error)
60 *remove = 1;
61 else if (error > 0)
62 error = 0;
63 } 37 }
64 38 return NULL;
65out:
66 posix_acl_release(acl);
67 return error;
68}
69
70int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access)
71{
72 if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl)
73 return -EOPNOTSUPP;
74 if (!is_owner_or_cap(&ip->i_inode))
75 return -EPERM;
76 if (S_ISLNK(ip->i_inode.i_mode))
77 return -EOPNOTSUPP;
78 if (!access && !S_ISDIR(ip->i_inode.i_mode))
79 return -EACCES;
80
81 return 0;
82} 39}
83 40
84static int acl_get(struct gfs2_inode *ip, const char *name, 41static struct posix_acl *gfs2_acl_get(struct gfs2_inode *ip, int type)
85 struct posix_acl **acl, struct gfs2_ea_location *el,
86 char **datap, unsigned int *lenp)
87{ 42{
43 struct posix_acl *acl;
44 const char *name;
88 char *data; 45 char *data;
89 unsigned int len; 46 int len;
90 int error;
91
92 el->el_bh = NULL;
93 47
94 if (!ip->i_eattr) 48 if (!ip->i_eattr)
95 return 0; 49 return NULL;
96
97 error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, el);
98 if (error)
99 return error;
100 if (!el->el_ea)
101 return 0;
102 if (!GFS2_EA_DATA_LEN(el->el_ea))
103 goto out;
104 50
105 len = GFS2_EA_DATA_LEN(el->el_ea); 51 acl = get_cached_acl(&ip->i_inode, type);
106 data = kmalloc(len, GFP_NOFS); 52 if (acl != ACL_NOT_CACHED)
107 error = -ENOMEM; 53 return acl;
108 if (!data)
109 goto out;
110 54
111 error = gfs2_ea_get_copy(ip, el, data, len); 55 name = gfs2_acl_name(type);
112 if (error < 0) 56 if (name == NULL)
113 goto out_kfree; 57 return ERR_PTR(-EINVAL);
114 error = 0;
115 58
116 if (acl) { 59 len = gfs2_xattr_acl_get(ip, name, &data);
117 *acl = posix_acl_from_xattr(data, len); 60 if (len < 0)
118 if (IS_ERR(*acl)) 61 return ERR_PTR(len);
119 error = PTR_ERR(*acl); 62 if (len == 0)
120 } 63 return NULL;
121 64
122out_kfree: 65 acl = posix_acl_from_xattr(data, len);
123 if (error || !datap) { 66 kfree(data);
124 kfree(data); 67 return acl;
125 } else {
126 *datap = data;
127 *lenp = len;
128 }
129out:
130 return error;
131} 68}
132 69
133/** 70/**
@@ -140,14 +77,12 @@ out:
140 77
141int gfs2_check_acl(struct inode *inode, int mask) 78int gfs2_check_acl(struct inode *inode, int mask)
142{ 79{
143 struct gfs2_ea_location el; 80 struct posix_acl *acl;
144 struct posix_acl *acl = NULL;
145 int error; 81 int error;
146 82
147 error = acl_get(GFS2_I(inode), GFS2_POSIX_ACL_ACCESS, &acl, &el, NULL, NULL); 83 acl = gfs2_acl_get(GFS2_I(inode), ACL_TYPE_ACCESS);
148 brelse(el.el_bh); 84 if (IS_ERR(acl))
149 if (error) 85 return PTR_ERR(acl);
150 return error;
151 86
152 if (acl) { 87 if (acl) {
153 error = posix_acl_permission(inode, acl, mask); 88 error = posix_acl_permission(inode, acl, mask);
@@ -158,57 +93,75 @@ int gfs2_check_acl(struct inode *inode, int mask)
158 return -EAGAIN; 93 return -EAGAIN;
159} 94}
160 95
161static int munge_mode(struct gfs2_inode *ip, mode_t mode) 96static int gfs2_set_mode(struct inode *inode, mode_t mode)
162{ 97{
163 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 98 int error = 0;
164 struct buffer_head *dibh;
165 int error;
166 99
167 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 100 if (mode != inode->i_mode) {
168 if (error) 101 struct iattr iattr;
169 return error;
170 102
171 error = gfs2_meta_inode_buffer(ip, &dibh); 103 iattr.ia_valid = ATTR_MODE;
172 if (!error) { 104 iattr.ia_mode = mode;
173 gfs2_assert_withdraw(sdp, 105
174 (ip->i_inode.i_mode & S_IFMT) == (mode & S_IFMT)); 106 error = gfs2_setattr_simple(GFS2_I(inode), &iattr);
175 ip->i_inode.i_mode = mode;
176 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
177 gfs2_dinode_out(ip, dibh->b_data);
178 brelse(dibh);
179 } 107 }
180 108
181 gfs2_trans_end(sdp); 109 return error;
110}
111
112static int gfs2_acl_set(struct inode *inode, int type, struct posix_acl *acl)
113{
114 int error;
115 int len;
116 char *data;
117 const char *name = gfs2_acl_name(type);
182 118
183 return 0; 119 BUG_ON(name == NULL);
120 len = posix_acl_to_xattr(acl, NULL, 0);
121 if (len == 0)
122 return 0;
123 data = kmalloc(len, GFP_NOFS);
124 if (data == NULL)
125 return -ENOMEM;
126 error = posix_acl_to_xattr(acl, data, len);
127 if (error < 0)
128 goto out;
129 error = gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, data, len, 0);
130 if (!error)
131 set_cached_acl(inode, type, acl);
132out:
133 kfree(data);
134 return error;
184} 135}
185 136
186int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip) 137int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode)
187{ 138{
188 struct gfs2_ea_location el;
189 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 139 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
190 struct posix_acl *acl = NULL, *clone; 140 struct posix_acl *acl, *clone;
191 mode_t mode = ip->i_inode.i_mode; 141 mode_t mode = inode->i_mode;
192 char *data = NULL; 142 int error = 0;
193 unsigned int len;
194 int error;
195 143
196 if (!sdp->sd_args.ar_posix_acl) 144 if (!sdp->sd_args.ar_posix_acl)
197 return 0; 145 return 0;
198 if (S_ISLNK(ip->i_inode.i_mode)) 146 if (S_ISLNK(inode->i_mode))
199 return 0; 147 return 0;
200 148
201 error = acl_get(dip, GFS2_POSIX_ACL_DEFAULT, &acl, &el, &data, &len); 149 acl = gfs2_acl_get(dip, ACL_TYPE_DEFAULT);
202 brelse(el.el_bh); 150 if (IS_ERR(acl))
203 if (error) 151 return PTR_ERR(acl);
204 return error;
205 if (!acl) { 152 if (!acl) {
206 mode &= ~current_umask(); 153 mode &= ~current_umask();
207 if (mode != ip->i_inode.i_mode) 154 if (mode != inode->i_mode)
208 error = munge_mode(ip, mode); 155 error = gfs2_set_mode(inode, mode);
209 return error; 156 return error;
210 } 157 }
211 158
159 if (S_ISDIR(inode->i_mode)) {
160 error = gfs2_acl_set(inode, ACL_TYPE_DEFAULT, acl);
161 if (error)
162 goto out;
163 }
164
212 clone = posix_acl_clone(acl, GFP_NOFS); 165 clone = posix_acl_clone(acl, GFP_NOFS);
213 error = -ENOMEM; 166 error = -ENOMEM;
214 if (!clone) 167 if (!clone)
@@ -216,43 +169,32 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
216 posix_acl_release(acl); 169 posix_acl_release(acl);
217 acl = clone; 170 acl = clone;
218 171
219 if (S_ISDIR(ip->i_inode.i_mode)) {
220 error = gfs2_xattr_set(&ip->i_inode, GFS2_EATYPE_SYS,
221 GFS2_POSIX_ACL_DEFAULT, data, len, 0);
222 if (error)
223 goto out;
224 }
225
226 error = posix_acl_create_masq(acl, &mode); 172 error = posix_acl_create_masq(acl, &mode);
227 if (error < 0) 173 if (error < 0)
228 goto out; 174 goto out;
229 if (error == 0) 175 if (error == 0)
230 goto munge; 176 goto munge;
231 177
232 posix_acl_to_xattr(acl, data, len); 178 error = gfs2_acl_set(inode, ACL_TYPE_ACCESS, acl);
233 error = gfs2_xattr_set(&ip->i_inode, GFS2_EATYPE_SYS,
234 GFS2_POSIX_ACL_ACCESS, data, len, 0);
235 if (error) 179 if (error)
236 goto out; 180 goto out;
237munge: 181munge:
238 error = munge_mode(ip, mode); 182 error = gfs2_set_mode(inode, mode);
239out: 183out:
240 posix_acl_release(acl); 184 posix_acl_release(acl);
241 kfree(data);
242 return error; 185 return error;
243} 186}
244 187
245int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr) 188int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr)
246{ 189{
247 struct posix_acl *acl = NULL, *clone; 190 struct posix_acl *acl, *clone;
248 struct gfs2_ea_location el;
249 char *data; 191 char *data;
250 unsigned int len; 192 unsigned int len;
251 int error; 193 int error;
252 194
253 error = acl_get(ip, GFS2_POSIX_ACL_ACCESS, &acl, &el, &data, &len); 195 acl = gfs2_acl_get(ip, ACL_TYPE_ACCESS);
254 if (error) 196 if (IS_ERR(acl))
255 goto out_brelse; 197 return PTR_ERR(acl);
256 if (!acl) 198 if (!acl)
257 return gfs2_setattr_simple(ip, attr); 199 return gfs2_setattr_simple(ip, attr);
258 200
@@ -265,15 +207,134 @@ int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr)
265 207
266 error = posix_acl_chmod_masq(acl, attr->ia_mode); 208 error = posix_acl_chmod_masq(acl, attr->ia_mode);
267 if (!error) { 209 if (!error) {
210 len = posix_acl_to_xattr(acl, NULL, 0);
211 data = kmalloc(len, GFP_NOFS);
212 error = -ENOMEM;
213 if (data == NULL)
214 goto out;
268 posix_acl_to_xattr(acl, data, len); 215 posix_acl_to_xattr(acl, data, len);
269 error = gfs2_ea_acl_chmod(ip, &el, attr, data); 216 error = gfs2_xattr_acl_chmod(ip, attr, data);
217 kfree(data);
218 set_cached_acl(&ip->i_inode, ACL_TYPE_ACCESS, acl);
270 } 219 }
271 220
272out: 221out:
273 posix_acl_release(acl); 222 posix_acl_release(acl);
274 kfree(data);
275out_brelse:
276 brelse(el.el_bh);
277 return error; 223 return error;
278} 224}
279 225
226static int gfs2_acl_type(const char *name)
227{
228 if (strcmp(name, GFS2_POSIX_ACL_ACCESS) == 0)
229 return ACL_TYPE_ACCESS;
230 if (strcmp(name, GFS2_POSIX_ACL_DEFAULT) == 0)
231 return ACL_TYPE_DEFAULT;
232 return -EINVAL;
233}
234
235static int gfs2_xattr_system_get(struct inode *inode, const char *name,
236 void *buffer, size_t size)
237{
238 struct posix_acl *acl;
239 int type;
240 int error;
241
242 type = gfs2_acl_type(name);
243 if (type < 0)
244 return type;
245
246 acl = gfs2_acl_get(GFS2_I(inode), type);
247 if (IS_ERR(acl))
248 return PTR_ERR(acl);
249 if (acl == NULL)
250 return -ENODATA;
251
252 error = posix_acl_to_xattr(acl, buffer, size);
253 posix_acl_release(acl);
254
255 return error;
256}
257
258static int gfs2_xattr_system_set(struct inode *inode, const char *name,
259 const void *value, size_t size, int flags)
260{
261 struct gfs2_sbd *sdp = GFS2_SB(inode);
262 struct posix_acl *acl = NULL;
263 int error = 0, type;
264
265 if (!sdp->sd_args.ar_posix_acl)
266 return -EOPNOTSUPP;
267
268 type = gfs2_acl_type(name);
269 if (type < 0)
270 return type;
271 if (flags & XATTR_CREATE)
272 return -EINVAL;
273 if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
274 return value ? -EACCES : 0;
275 if ((current_fsuid() != inode->i_uid) && !capable(CAP_FOWNER))
276 return -EPERM;
277 if (S_ISLNK(inode->i_mode))
278 return -EOPNOTSUPP;
279
280 if (!value)
281 goto set_acl;
282
283 acl = posix_acl_from_xattr(value, size);
284 if (!acl) {
285 /*
286 * acl_set_file(3) may request that we set default ACLs with
287 * zero length -- defend (gracefully) against that here.
288 */
289 goto out;
290 }
291 if (IS_ERR(acl)) {
292 error = PTR_ERR(acl);
293 goto out;
294 }
295
296 error = posix_acl_valid(acl);
297 if (error)
298 goto out_release;
299
300 error = -EINVAL;
301 if (acl->a_count > GFS2_ACL_MAX_ENTRIES)
302 goto out_release;
303
304 if (type == ACL_TYPE_ACCESS) {
305 mode_t mode = inode->i_mode;
306 error = posix_acl_equiv_mode(acl, &mode);
307
308 if (error <= 0) {
309 posix_acl_release(acl);
310 acl = NULL;
311
312 if (error < 0)
313 return error;
314 }
315
316 error = gfs2_set_mode(inode, mode);
317 if (error)
318 goto out_release;
319 }
320
321set_acl:
322 error = gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, value, size, 0);
323 if (!error) {
324 if (acl)
325 set_cached_acl(inode, type, acl);
326 else
327 forget_cached_acl(inode, type);
328 }
329out_release:
330 posix_acl_release(acl);
331out:
332 return error;
333}
334
335struct xattr_handler gfs2_xattr_system_handler = {
336 .prefix = XATTR_SYSTEM_PREFIX,
337 .get = gfs2_xattr_system_get,
338 .set = gfs2_xattr_system_set,
339};
340
diff --git a/fs/gfs2/acl.h b/fs/gfs2/acl.h
index 6751930bfb64..9306a2e6620c 100644
--- a/fs/gfs2/acl.h
+++ b/fs/gfs2/acl.h
@@ -13,26 +13,12 @@
13#include "incore.h" 13#include "incore.h"
14 14
15#define GFS2_POSIX_ACL_ACCESS "posix_acl_access" 15#define GFS2_POSIX_ACL_ACCESS "posix_acl_access"
16#define GFS2_POSIX_ACL_ACCESS_LEN 16
17#define GFS2_POSIX_ACL_DEFAULT "posix_acl_default" 16#define GFS2_POSIX_ACL_DEFAULT "posix_acl_default"
18#define GFS2_POSIX_ACL_DEFAULT_LEN 17 17#define GFS2_ACL_MAX_ENTRIES 25
19 18
20#define GFS2_ACL_IS_ACCESS(name, len) \ 19extern int gfs2_check_acl(struct inode *inode, int mask);
21 ((len) == GFS2_POSIX_ACL_ACCESS_LEN && \ 20extern int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode);
22 !memcmp(GFS2_POSIX_ACL_ACCESS, (name), (len))) 21extern int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
23 22extern struct xattr_handler gfs2_xattr_system_handler;
24#define GFS2_ACL_IS_DEFAULT(name, len) \
25 ((len) == GFS2_POSIX_ACL_DEFAULT_LEN && \
26 !memcmp(GFS2_POSIX_ACL_DEFAULT, (name), (len)))
27
28struct gfs2_ea_request;
29
30int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
31 struct gfs2_ea_request *er,
32 int *remove, mode_t *mode);
33int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access);
34int gfs2_check_acl(struct inode *inode, int mask);
35int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip);
36int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
37 23
38#endif /* __ACL_DOT_H__ */ 24#endif /* __ACL_DOT_H__ */
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 694b5d48f036..7b8da9415267 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -269,7 +269,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
269 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; 269 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
270 unsigned offset = i_size & (PAGE_CACHE_SIZE-1); 270 unsigned offset = i_size & (PAGE_CACHE_SIZE-1);
271 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize); 271 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
272 struct backing_dev_info *bdi = mapping->backing_dev_info;
273 int i; 272 int i;
274 int ret; 273 int ret;
275 274
@@ -313,11 +312,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
313 312
314 if (ret || (--(wbc->nr_to_write) <= 0)) 313 if (ret || (--(wbc->nr_to_write) <= 0))
315 ret = 1; 314 ret = 1;
316 if (wbc->nonblocking && bdi_write_congested(bdi)) {
317 wbc->encountered_congestion = 1;
318 ret = 1;
319 }
320
321 } 315 }
322 gfs2_trans_end(sdp); 316 gfs2_trans_end(sdp);
323 return ret; 317 return ret;
@@ -338,7 +332,6 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping,
338static int gfs2_write_cache_jdata(struct address_space *mapping, 332static int gfs2_write_cache_jdata(struct address_space *mapping,
339 struct writeback_control *wbc) 333 struct writeback_control *wbc)
340{ 334{
341 struct backing_dev_info *bdi = mapping->backing_dev_info;
342 int ret = 0; 335 int ret = 0;
343 int done = 0; 336 int done = 0;
344 struct pagevec pvec; 337 struct pagevec pvec;
@@ -348,11 +341,6 @@ static int gfs2_write_cache_jdata(struct address_space *mapping,
348 int scanned = 0; 341 int scanned = 0;
349 int range_whole = 0; 342 int range_whole = 0;
350 343
351 if (wbc->nonblocking && bdi_write_congested(bdi)) {
352 wbc->encountered_congestion = 1;
353 return 0;
354 }
355
356 pagevec_init(&pvec, 0); 344 pagevec_init(&pvec, 0);
357 if (wbc->range_cyclic) { 345 if (wbc->range_cyclic) {
358 index = mapping->writeback_index; /* Start from prev offset */ 346 index = mapping->writeback_index; /* Start from prev offset */
@@ -819,8 +807,10 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
819 mark_inode_dirty(inode); 807 mark_inode_dirty(inode);
820 } 808 }
821 809
822 if (inode == sdp->sd_rindex) 810 if (inode == sdp->sd_rindex) {
823 adjust_fs_space(inode); 811 adjust_fs_space(inode);
812 ip->i_gh.gh_flags |= GL_NOCACHE;
813 }
824 814
825 brelse(dibh); 815 brelse(dibh);
826 gfs2_trans_end(sdp); 816 gfs2_trans_end(sdp);
@@ -889,8 +879,10 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
889 mark_inode_dirty(inode); 879 mark_inode_dirty(inode);
890 } 880 }
891 881
892 if (inode == sdp->sd_rindex) 882 if (inode == sdp->sd_rindex) {
893 adjust_fs_space(inode); 883 adjust_fs_space(inode);
884 ip->i_gh.gh_flags |= GL_NOCACHE;
885 }
894 886
895 brelse(dibh); 887 brelse(dibh);
896 gfs2_trans_end(sdp); 888 gfs2_trans_end(sdp);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 297d7e5cebad..25fddc100f18 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -525,38 +525,6 @@ consist_inode:
525 return ERR_PTR(-EIO); 525 return ERR_PTR(-EIO);
526} 526}
527 527
528
529/**
530 * dirent_first - Return the first dirent
531 * @dip: the directory
532 * @bh: The buffer
533 * @dent: Pointer to list of dirents
534 *
535 * return first dirent whether bh points to leaf or stuffed dinode
536 *
537 * Returns: IS_LEAF, IS_DINODE, or -errno
538 */
539
540static int dirent_first(struct gfs2_inode *dip, struct buffer_head *bh,
541 struct gfs2_dirent **dent)
542{
543 struct gfs2_meta_header *h = (struct gfs2_meta_header *)bh->b_data;
544
545 if (be32_to_cpu(h->mh_type) == GFS2_METATYPE_LF) {
546 if (gfs2_meta_check(GFS2_SB(&dip->i_inode), bh))
547 return -EIO;
548 *dent = (struct gfs2_dirent *)(bh->b_data +
549 sizeof(struct gfs2_leaf));
550 return IS_LEAF;
551 } else {
552 if (gfs2_metatype_check(GFS2_SB(&dip->i_inode), bh, GFS2_METATYPE_DI))
553 return -EIO;
554 *dent = (struct gfs2_dirent *)(bh->b_data +
555 sizeof(struct gfs2_dinode));
556 return IS_DINODE;
557 }
558}
559
560static int dirent_check_reclen(struct gfs2_inode *dip, 528static int dirent_check_reclen(struct gfs2_inode *dip,
561 const struct gfs2_dirent *d, const void *end_p) 529 const struct gfs2_dirent *d, const void *end_p)
562{ 530{
@@ -1006,7 +974,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
1006 divider = (start + half_len) << (32 - dip->i_depth); 974 divider = (start + half_len) << (32 - dip->i_depth);
1007 975
1008 /* Copy the entries */ 976 /* Copy the entries */
1009 dirent_first(dip, obh, &dent); 977 dent = (struct gfs2_dirent *)(obh->b_data + sizeof(struct gfs2_leaf));
1010 978
1011 do { 979 do {
1012 next = dent; 980 next = dent;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 8b674b1f3a55..f455a03a09e2 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -241,15 +241,14 @@ int gfs2_glock_put(struct gfs2_glock *gl)
241 int rv = 0; 241 int rv = 0;
242 242
243 write_lock(gl_lock_addr(gl->gl_hash)); 243 write_lock(gl_lock_addr(gl->gl_hash));
244 if (atomic_dec_and_test(&gl->gl_ref)) { 244 if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
245 hlist_del(&gl->gl_list); 245 hlist_del(&gl->gl_list);
246 write_unlock(gl_lock_addr(gl->gl_hash));
247 spin_lock(&lru_lock);
248 if (!list_empty(&gl->gl_lru)) { 246 if (!list_empty(&gl->gl_lru)) {
249 list_del_init(&gl->gl_lru); 247 list_del_init(&gl->gl_lru);
250 atomic_dec(&lru_count); 248 atomic_dec(&lru_count);
251 } 249 }
252 spin_unlock(&lru_lock); 250 spin_unlock(&lru_lock);
251 write_unlock(gl_lock_addr(gl->gl_hash));
253 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); 252 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
254 glock_free(gl); 253 glock_free(gl);
255 rv = 1; 254 rv = 1;
@@ -513,7 +512,6 @@ retry:
513 GLOCK_BUG_ON(gl, 1); 512 GLOCK_BUG_ON(gl, 1);
514 } 513 }
515 spin_unlock(&gl->gl_spin); 514 spin_unlock(&gl->gl_spin);
516 gfs2_glock_put(gl);
517 return; 515 return;
518 } 516 }
519 517
@@ -524,8 +522,6 @@ retry:
524 if (glops->go_xmote_bh) { 522 if (glops->go_xmote_bh) {
525 spin_unlock(&gl->gl_spin); 523 spin_unlock(&gl->gl_spin);
526 rv = glops->go_xmote_bh(gl, gh); 524 rv = glops->go_xmote_bh(gl, gh);
527 if (rv == -EAGAIN)
528 return;
529 spin_lock(&gl->gl_spin); 525 spin_lock(&gl->gl_spin);
530 if (rv) { 526 if (rv) {
531 do_error(gl, rv); 527 do_error(gl, rv);
@@ -540,7 +536,6 @@ out:
540 clear_bit(GLF_LOCK, &gl->gl_flags); 536 clear_bit(GLF_LOCK, &gl->gl_flags);
541out_locked: 537out_locked:
542 spin_unlock(&gl->gl_spin); 538 spin_unlock(&gl->gl_spin);
543 gfs2_glock_put(gl);
544} 539}
545 540
546static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock, 541static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
@@ -600,7 +595,6 @@ __acquires(&gl->gl_spin)
600 595
601 if (!(ret & LM_OUT_ASYNC)) { 596 if (!(ret & LM_OUT_ASYNC)) {
602 finish_xmote(gl, ret); 597 finish_xmote(gl, ret);
603 gfs2_glock_hold(gl);
604 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 598 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
605 gfs2_glock_put(gl); 599 gfs2_glock_put(gl);
606 } else { 600 } else {
@@ -672,12 +666,17 @@ out:
672 return; 666 return;
673 667
674out_sched: 668out_sched:
669 clear_bit(GLF_LOCK, &gl->gl_flags);
670 smp_mb__after_clear_bit();
675 gfs2_glock_hold(gl); 671 gfs2_glock_hold(gl);
676 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 672 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
677 gfs2_glock_put_nolock(gl); 673 gfs2_glock_put_nolock(gl);
674 return;
675
678out_unlock: 676out_unlock:
679 clear_bit(GLF_LOCK, &gl->gl_flags); 677 clear_bit(GLF_LOCK, &gl->gl_flags);
680 goto out; 678 smp_mb__after_clear_bit();
679 return;
681} 680}
682 681
683static void delete_work_func(struct work_struct *work) 682static void delete_work_func(struct work_struct *work)
@@ -707,9 +706,12 @@ static void glock_work_func(struct work_struct *work)
707{ 706{
708 unsigned long delay = 0; 707 unsigned long delay = 0;
709 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); 708 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
709 int drop_ref = 0;
710 710
711 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) 711 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
712 finish_xmote(gl, gl->gl_reply); 712 finish_xmote(gl, gl->gl_reply);
713 drop_ref = 1;
714 }
713 down_read(&gfs2_umount_flush_sem); 715 down_read(&gfs2_umount_flush_sem);
714 spin_lock(&gl->gl_spin); 716 spin_lock(&gl->gl_spin);
715 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 717 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
@@ -727,6 +729,8 @@ static void glock_work_func(struct work_struct *work)
727 if (!delay || 729 if (!delay ||
728 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) 730 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
729 gfs2_glock_put(gl); 731 gfs2_glock_put(gl);
732 if (drop_ref)
733 gfs2_glock_put(gl);
730} 734}
731 735
732/** 736/**
@@ -1361,10 +1365,6 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
1361 list_del_init(&gl->gl_lru); 1365 list_del_init(&gl->gl_lru);
1362 atomic_dec(&lru_count); 1366 atomic_dec(&lru_count);
1363 1367
1364 /* Check if glock is about to be freed */
1365 if (atomic_read(&gl->gl_ref) == 0)
1366 continue;
1367
1368 /* Test for being demotable */ 1368 /* Test for being demotable */
1369 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1369 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1370 gfs2_glock_hold(gl); 1370 gfs2_glock_hold(gl);
@@ -1375,10 +1375,11 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
1375 handle_callback(gl, LM_ST_UNLOCKED, 0); 1375 handle_callback(gl, LM_ST_UNLOCKED, 0);
1376 nr--; 1376 nr--;
1377 } 1377 }
1378 clear_bit(GLF_LOCK, &gl->gl_flags);
1379 smp_mb__after_clear_bit();
1378 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1380 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1379 gfs2_glock_put_nolock(gl); 1381 gfs2_glock_put_nolock(gl);
1380 spin_unlock(&gl->gl_spin); 1382 spin_unlock(&gl->gl_spin);
1381 clear_bit(GLF_LOCK, &gl->gl_flags);
1382 spin_lock(&lru_lock); 1383 spin_lock(&lru_lock);
1383 continue; 1384 continue;
1384 } 1385 }
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index c609894ec0d0..13f0bd228132 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -180,15 +180,6 @@ static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
180 return gl->gl_state == LM_ST_SHARED; 180 return gl->gl_state == LM_ST_SHARED;
181} 181}
182 182
183static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
184{
185 int ret;
186 spin_lock(&gl->gl_spin);
187 ret = test_bit(GLF_DEMOTE, &gl->gl_flags);
188 spin_unlock(&gl->gl_spin);
189 return ret;
190}
191
192int gfs2_glock_get(struct gfs2_sbd *sdp, 183int gfs2_glock_get(struct gfs2_sbd *sdp,
193 u64 number, const struct gfs2_glock_operations *glops, 184 u64 number, const struct gfs2_glock_operations *glops,
194 int create, struct gfs2_glock **glp); 185 int create, struct gfs2_glock **glp);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 6985eef06c39..78554acc0605 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -13,6 +13,7 @@
13#include <linux/buffer_head.h> 13#include <linux/buffer_head.h>
14#include <linux/gfs2_ondisk.h> 14#include <linux/gfs2_ondisk.h>
15#include <linux/bio.h> 15#include <linux/bio.h>
16#include <linux/posix_acl.h>
16 17
17#include "gfs2.h" 18#include "gfs2.h"
18#include "incore.h" 19#include "incore.h"
@@ -184,8 +185,10 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
184 if (flags & DIO_METADATA) { 185 if (flags & DIO_METADATA) {
185 struct address_space *mapping = gl->gl_aspace->i_mapping; 186 struct address_space *mapping = gl->gl_aspace->i_mapping;
186 truncate_inode_pages(mapping, 0); 187 truncate_inode_pages(mapping, 0);
187 if (ip) 188 if (ip) {
188 set_bit(GIF_INVALID, &ip->i_flags); 189 set_bit(GIF_INVALID, &ip->i_flags);
190 forget_all_cached_acls(&ip->i_inode);
191 }
189 } 192 }
190 193
191 if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) 194 if (ip == GFS2_I(gl->gl_sbd->sd_rindex))
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 6edb423f90b3..4792200978c8 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -429,7 +429,11 @@ struct gfs2_args {
429 unsigned int ar_meta:1; /* mount metafs */ 429 unsigned int ar_meta:1; /* mount metafs */
430 unsigned int ar_discard:1; /* discard requests */ 430 unsigned int ar_discard:1; /* discard requests */
431 unsigned int ar_errors:2; /* errors=withdraw | panic */ 431 unsigned int ar_errors:2; /* errors=withdraw | panic */
432 unsigned int ar_nobarrier:1; /* do not send barriers */
432 int ar_commit; /* Commit interval */ 433 int ar_commit; /* Commit interval */
434 int ar_statfs_quantum; /* The fast statfs interval */
435 int ar_quota_quantum; /* The quota interval */
436 int ar_statfs_percent; /* The % change to force sync */
433}; 437};
434 438
435struct gfs2_tune { 439struct gfs2_tune {
@@ -558,6 +562,7 @@ struct gfs2_sbd {
558 spinlock_t sd_statfs_spin; 562 spinlock_t sd_statfs_spin;
559 struct gfs2_statfs_change_host sd_statfs_master; 563 struct gfs2_statfs_change_host sd_statfs_master;
560 struct gfs2_statfs_change_host sd_statfs_local; 564 struct gfs2_statfs_change_host sd_statfs_local;
565 int sd_statfs_force_sync;
561 566
562 /* Resource group stuff */ 567 /* Resource group stuff */
563 568
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index fb15d3b1f409..26ba2a4c4a2d 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -871,7 +871,7 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
871 if (error) 871 if (error)
872 goto fail_gunlock2; 872 goto fail_gunlock2;
873 873
874 error = gfs2_acl_create(dip, GFS2_I(inode)); 874 error = gfs2_acl_create(dip, inode);
875 if (error) 875 if (error)
876 goto fail_gunlock2; 876 goto fail_gunlock2;
877 877
@@ -947,9 +947,7 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
947 947
948 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 948 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
949 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI); 949 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
950 str->di_header.__pad0 = 0;
951 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI); 950 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
952 str->di_header.__pad1 = 0;
953 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr); 951 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
954 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino); 952 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
955 str->di_mode = cpu_to_be32(ip->i_inode.i_mode); 953 str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 13c6237c5f67..4511b08fc451 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -596,7 +596,9 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
596 memset(lh, 0, sizeof(struct gfs2_log_header)); 596 memset(lh, 0, sizeof(struct gfs2_log_header));
597 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 597 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
598 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); 598 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
599 lh->lh_header.__pad0 = cpu_to_be64(0);
599 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); 600 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
601 lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
600 lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++); 602 lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
601 lh->lh_flags = cpu_to_be32(flags); 603 lh->lh_flags = cpu_to_be32(flags);
602 lh->lh_tail = cpu_to_be32(tail); 604 lh->lh_tail = cpu_to_be32(tail);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 9969ff062c5b..de97632ba32f 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -132,6 +132,7 @@ static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type)
132static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) 132static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
133{ 133{
134 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le); 134 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
135 struct gfs2_meta_header *mh;
135 struct gfs2_trans *tr; 136 struct gfs2_trans *tr;
136 137
137 lock_buffer(bd->bd_bh); 138 lock_buffer(bd->bd_bh);
@@ -148,6 +149,9 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
148 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); 149 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
149 gfs2_meta_check(sdp, bd->bd_bh); 150 gfs2_meta_check(sdp, bd->bd_bh);
150 gfs2_pin(sdp, bd->bd_bh); 151 gfs2_pin(sdp, bd->bd_bh);
152 mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
153 mh->__pad0 = cpu_to_be64(0);
154 mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
151 sdp->sd_log_num_buf++; 155 sdp->sd_log_num_buf++;
152 list_add(&le->le_list, &sdp->sd_log_le_buf); 156 list_add(&le->le_list, &sdp->sd_log_le_buf);
153 tr->tr_num_buf_new++; 157 tr->tr_num_buf_new++;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index eacd78a5d082..5b31f7741a8f 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -114,7 +114,7 @@ static int __init init_gfs2_fs(void)
114 if (error) 114 if (error)
115 goto fail_unregister; 115 goto fail_unregister;
116 116
117 error = slow_work_register_user(); 117 error = slow_work_register_user(THIS_MODULE);
118 if (error) 118 if (error)
119 goto fail_slow; 119 goto fail_slow;
120 120
@@ -163,7 +163,7 @@ static void __exit exit_gfs2_fs(void)
163 gfs2_unregister_debugfs(); 163 gfs2_unregister_debugfs();
164 unregister_filesystem(&gfs2_fs_type); 164 unregister_filesystem(&gfs2_fs_type);
165 unregister_filesystem(&gfs2meta_fs_type); 165 unregister_filesystem(&gfs2meta_fs_type);
166 slow_work_unregister_user(); 166 slow_work_unregister_user(THIS_MODULE);
167 167
168 kmem_cache_destroy(gfs2_quotad_cachep); 168 kmem_cache_destroy(gfs2_quotad_cachep);
169 kmem_cache_destroy(gfs2_rgrpd_cachep); 169 kmem_cache_destroy(gfs2_rgrpd_cachep);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 52fb6c048981..edfee24f3636 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -18,6 +18,7 @@
18#include <linux/mount.h> 18#include <linux/mount.h>
19#include <linux/gfs2_ondisk.h> 19#include <linux/gfs2_ondisk.h>
20#include <linux/slow-work.h> 20#include <linux/slow-work.h>
21#include <linux/quotaops.h>
21 22
22#include "gfs2.h" 23#include "gfs2.h"
23#include "incore.h" 24#include "incore.h"
@@ -62,13 +63,10 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
62 gt->gt_quota_warn_period = 10; 63 gt->gt_quota_warn_period = 10;
63 gt->gt_quota_scale_num = 1; 64 gt->gt_quota_scale_num = 1;
64 gt->gt_quota_scale_den = 1; 65 gt->gt_quota_scale_den = 1;
65 gt->gt_quota_quantum = 60;
66 gt->gt_new_files_jdata = 0; 66 gt->gt_new_files_jdata = 0;
67 gt->gt_max_readahead = 1 << 18; 67 gt->gt_max_readahead = 1 << 18;
68 gt->gt_stall_secs = 600; 68 gt->gt_stall_secs = 600;
69 gt->gt_complain_secs = 10; 69 gt->gt_complain_secs = 10;
70 gt->gt_statfs_quantum = 30;
71 gt->gt_statfs_slow = 0;
72} 70}
73 71
74static struct gfs2_sbd *init_sbd(struct super_block *sb) 72static struct gfs2_sbd *init_sbd(struct super_block *sb)
@@ -1114,7 +1112,7 @@ void gfs2_online_uevent(struct gfs2_sbd *sdp)
1114 * Returns: errno 1112 * Returns: errno
1115 */ 1113 */
1116 1114
1117static int fill_super(struct super_block *sb, void *data, int silent) 1115static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent)
1118{ 1116{
1119 struct gfs2_sbd *sdp; 1117 struct gfs2_sbd *sdp;
1120 struct gfs2_holder mount_gh; 1118 struct gfs2_holder mount_gh;
@@ -1125,17 +1123,7 @@ static int fill_super(struct super_block *sb, void *data, int silent)
1125 printk(KERN_WARNING "GFS2: can't alloc struct gfs2_sbd\n"); 1123 printk(KERN_WARNING "GFS2: can't alloc struct gfs2_sbd\n");
1126 return -ENOMEM; 1124 return -ENOMEM;
1127 } 1125 }
1128 1126 sdp->sd_args = *args;
1129 sdp->sd_args.ar_quota = GFS2_QUOTA_DEFAULT;
1130 sdp->sd_args.ar_data = GFS2_DATA_DEFAULT;
1131 sdp->sd_args.ar_commit = 60;
1132 sdp->sd_args.ar_errors = GFS2_ERRORS_DEFAULT;
1133
1134 error = gfs2_mount_args(sdp, &sdp->sd_args, data);
1135 if (error) {
1136 printk(KERN_WARNING "GFS2: can't parse mount arguments\n");
1137 goto fail;
1138 }
1139 1127
1140 if (sdp->sd_args.ar_spectator) { 1128 if (sdp->sd_args.ar_spectator) {
1141 sb->s_flags |= MS_RDONLY; 1129 sb->s_flags |= MS_RDONLY;
@@ -1143,11 +1131,15 @@ static int fill_super(struct super_block *sb, void *data, int silent)
1143 } 1131 }
1144 if (sdp->sd_args.ar_posix_acl) 1132 if (sdp->sd_args.ar_posix_acl)
1145 sb->s_flags |= MS_POSIXACL; 1133 sb->s_flags |= MS_POSIXACL;
1134 if (sdp->sd_args.ar_nobarrier)
1135 set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1146 1136
1147 sb->s_magic = GFS2_MAGIC; 1137 sb->s_magic = GFS2_MAGIC;
1148 sb->s_op = &gfs2_super_ops; 1138 sb->s_op = &gfs2_super_ops;
1149 sb->s_export_op = &gfs2_export_ops; 1139 sb->s_export_op = &gfs2_export_ops;
1150 sb->s_xattr = gfs2_xattr_handlers; 1140 sb->s_xattr = gfs2_xattr_handlers;
1141 sb->s_qcop = &gfs2_quotactl_ops;
1142 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
1151 sb->s_time_gran = 1; 1143 sb->s_time_gran = 1;
1152 sb->s_maxbytes = MAX_LFS_FILESIZE; 1144 sb->s_maxbytes = MAX_LFS_FILESIZE;
1153 1145
@@ -1160,6 +1152,15 @@ static int fill_super(struct super_block *sb, void *data, int silent)
1160 sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift; 1152 sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift;
1161 1153
1162 sdp->sd_tune.gt_log_flush_secs = sdp->sd_args.ar_commit; 1154 sdp->sd_tune.gt_log_flush_secs = sdp->sd_args.ar_commit;
1155 sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum;
1156 if (sdp->sd_args.ar_statfs_quantum) {
1157 sdp->sd_tune.gt_statfs_slow = 0;
1158 sdp->sd_tune.gt_statfs_quantum = sdp->sd_args.ar_statfs_quantum;
1159 }
1160 else {
1161 sdp->sd_tune.gt_statfs_slow = 1;
1162 sdp->sd_tune.gt_statfs_quantum = 30;
1163 }
1163 1164
1164 error = init_names(sdp, silent); 1165 error = init_names(sdp, silent);
1165 if (error) 1166 if (error)
@@ -1243,18 +1244,127 @@ fail:
1243 return error; 1244 return error;
1244} 1245}
1245 1246
1246static int gfs2_get_sb(struct file_system_type *fs_type, int flags, 1247static int set_gfs2_super(struct super_block *s, void *data)
1247 const char *dev_name, void *data, struct vfsmount *mnt)
1248{ 1248{
1249 return get_sb_bdev(fs_type, flags, dev_name, data, fill_super, mnt); 1249 s->s_bdev = data;
1250 s->s_dev = s->s_bdev->bd_dev;
1251
1252 /*
1253 * We set the bdi here to the queue backing, file systems can
1254 * overwrite this in ->fill_super()
1255 */
1256 s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
1257 return 0;
1250} 1258}
1251 1259
1252static int test_meta_super(struct super_block *s, void *ptr) 1260static int test_gfs2_super(struct super_block *s, void *ptr)
1253{ 1261{
1254 struct block_device *bdev = ptr; 1262 struct block_device *bdev = ptr;
1255 return (bdev == s->s_bdev); 1263 return (bdev == s->s_bdev);
1256} 1264}
1257 1265
1266/**
1267 * gfs2_get_sb - Get the GFS2 superblock
1268 * @fs_type: The GFS2 filesystem type
1269 * @flags: Mount flags
1270 * @dev_name: The name of the device
1271 * @data: The mount arguments
1272 * @mnt: The vfsmnt for this mount
1273 *
1274 * Q. Why not use get_sb_bdev() ?
1275 * A. We need to select one of two root directories to mount, independent
1276 * of whether this is the initial, or subsequent, mount of this sb
1277 *
1278 * Returns: 0 or -ve on error
1279 */
1280
1281static int gfs2_get_sb(struct file_system_type *fs_type, int flags,
1282 const char *dev_name, void *data, struct vfsmount *mnt)
1283{
1284 struct block_device *bdev;
1285 struct super_block *s;
1286 fmode_t mode = FMODE_READ;
1287 int error;
1288 struct gfs2_args args;
1289 struct gfs2_sbd *sdp;
1290
1291 if (!(flags & MS_RDONLY))
1292 mode |= FMODE_WRITE;
1293
1294 bdev = open_bdev_exclusive(dev_name, mode, fs_type);
1295 if (IS_ERR(bdev))
1296 return PTR_ERR(bdev);
1297
1298 /*
1299 * once the super is inserted into the list by sget, s_umount
1300 * will protect the lockfs code from trying to start a snapshot
1301 * while we are mounting
1302 */
1303 mutex_lock(&bdev->bd_fsfreeze_mutex);
1304 if (bdev->bd_fsfreeze_count > 0) {
1305 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1306 error = -EBUSY;
1307 goto error_bdev;
1308 }
1309 s = sget(fs_type, test_gfs2_super, set_gfs2_super, bdev);
1310 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1311 error = PTR_ERR(s);
1312 if (IS_ERR(s))
1313 goto error_bdev;
1314
1315 memset(&args, 0, sizeof(args));
1316 args.ar_quota = GFS2_QUOTA_DEFAULT;
1317 args.ar_data = GFS2_DATA_DEFAULT;
1318 args.ar_commit = 60;
1319 args.ar_statfs_quantum = 30;
1320 args.ar_quota_quantum = 60;
1321 args.ar_errors = GFS2_ERRORS_DEFAULT;
1322
1323 error = gfs2_mount_args(&args, data);
1324 if (error) {
1325 printk(KERN_WARNING "GFS2: can't parse mount arguments\n");
1326 if (s->s_root)
1327 goto error_super;
1328 deactivate_locked_super(s);
1329 return error;
1330 }
1331
1332 if (s->s_root) {
1333 error = -EBUSY;
1334 if ((flags ^ s->s_flags) & MS_RDONLY)
1335 goto error_super;
1336 close_bdev_exclusive(bdev, mode);
1337 } else {
1338 char b[BDEVNAME_SIZE];
1339
1340 s->s_flags = flags;
1341 s->s_mode = mode;
1342 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
1343 sb_set_blocksize(s, block_size(bdev));
1344 error = fill_super(s, &args, flags & MS_SILENT ? 1 : 0);
1345 if (error) {
1346 deactivate_locked_super(s);
1347 return error;
1348 }
1349 s->s_flags |= MS_ACTIVE;
1350 bdev->bd_super = s;
1351 }
1352
1353 sdp = s->s_fs_info;
1354 mnt->mnt_sb = s;
1355 if (args.ar_meta)
1356 mnt->mnt_root = dget(sdp->sd_master_dir);
1357 else
1358 mnt->mnt_root = dget(sdp->sd_root_dir);
1359 return 0;
1360
1361error_super:
1362 deactivate_locked_super(s);
1363error_bdev:
1364 close_bdev_exclusive(bdev, mode);
1365 return error;
1366}
1367
1258static int set_meta_super(struct super_block *s, void *ptr) 1368static int set_meta_super(struct super_block *s, void *ptr)
1259{ 1369{
1260 return -EINVAL; 1370 return -EINVAL;
@@ -1274,13 +1384,17 @@ static int gfs2_get_sb_meta(struct file_system_type *fs_type, int flags,
1274 dev_name, error); 1384 dev_name, error);
1275 return error; 1385 return error;
1276 } 1386 }
1277 s = sget(&gfs2_fs_type, test_meta_super, set_meta_super, 1387 s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super,
1278 path.dentry->d_inode->i_sb->s_bdev); 1388 path.dentry->d_inode->i_sb->s_bdev);
1279 path_put(&path); 1389 path_put(&path);
1280 if (IS_ERR(s)) { 1390 if (IS_ERR(s)) {
1281 printk(KERN_WARNING "GFS2: gfs2 mount does not exist\n"); 1391 printk(KERN_WARNING "GFS2: gfs2 mount does not exist\n");
1282 return PTR_ERR(s); 1392 return PTR_ERR(s);
1283 } 1393 }
1394 if ((flags ^ s->s_flags) & MS_RDONLY) {
1395 deactivate_locked_super(s);
1396 return -EBUSY;
1397 }
1284 sdp = s->s_fs_info; 1398 sdp = s->s_fs_info;
1285 mnt->mnt_sb = s; 1399 mnt->mnt_sb = s;
1286 mnt->mnt_root = dget(sdp->sd_master_dir); 1400 mnt->mnt_root = dget(sdp->sd_master_dir);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 2e9b9326bfc9..e3bf6eab8750 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -15,7 +15,7 @@
15 * fuzziness in the current usage value of IDs that are being used on different 15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on 16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable. 17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need to a quota check 18 * Since quota tags are part of transactions, there is no need for a quota check
19 * program to be run on node crashes or anything like that. 19 * program to be run on node crashes or anything like that.
20 * 20 *
21 * There are couple of knobs that let the administrator manage the quota 21 * There are couple of knobs that let the administrator manage the quota
@@ -47,6 +47,8 @@
47#include <linux/gfs2_ondisk.h> 47#include <linux/gfs2_ondisk.h>
48#include <linux/kthread.h> 48#include <linux/kthread.h>
49#include <linux/freezer.h> 49#include <linux/freezer.h>
50#include <linux/quota.h>
51#include <linux/dqblk_xfs.h>
50 52
51#include "gfs2.h" 53#include "gfs2.h"
52#include "incore.h" 54#include "incore.h"
@@ -65,13 +67,6 @@
65#define QUOTA_USER 1 67#define QUOTA_USER 1
66#define QUOTA_GROUP 0 68#define QUOTA_GROUP 0
67 69
68struct gfs2_quota_host {
69 u64 qu_limit;
70 u64 qu_warn;
71 s64 qu_value;
72 u32 qu_ll_next;
73};
74
75struct gfs2_quota_change_host { 70struct gfs2_quota_change_host {
76 u64 qc_change; 71 u64 qc_change;
77 u32 qc_flags; /* GFS2_QCF_... */ 72 u32 qc_flags; /* GFS2_QCF_... */
@@ -164,7 +159,7 @@ fail:
164 return error; 159 return error;
165} 160}
166 161
167static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create, 162static int qd_get(struct gfs2_sbd *sdp, int user, u32 id,
168 struct gfs2_quota_data **qdp) 163 struct gfs2_quota_data **qdp)
169{ 164{
170 struct gfs2_quota_data *qd = NULL, *new_qd = NULL; 165 struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
@@ -202,7 +197,7 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
202 197
203 spin_unlock(&qd_lru_lock); 198 spin_unlock(&qd_lru_lock);
204 199
205 if (qd || !create) { 200 if (qd) {
206 if (new_qd) { 201 if (new_qd) {
207 gfs2_glock_put(new_qd->qd_gl); 202 gfs2_glock_put(new_qd->qd_gl);
208 kmem_cache_free(gfs2_quotad_cachep, new_qd); 203 kmem_cache_free(gfs2_quotad_cachep, new_qd);
@@ -461,12 +456,12 @@ static void qd_unlock(struct gfs2_quota_data *qd)
461 qd_put(qd); 456 qd_put(qd);
462} 457}
463 458
464static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create, 459static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id,
465 struct gfs2_quota_data **qdp) 460 struct gfs2_quota_data **qdp)
466{ 461{
467 int error; 462 int error;
468 463
469 error = qd_get(sdp, user, id, create, qdp); 464 error = qd_get(sdp, user, id, qdp);
470 if (error) 465 if (error)
471 return error; 466 return error;
472 467
@@ -508,20 +503,20 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
508 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 503 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
509 return 0; 504 return 0;
510 505
511 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd); 506 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
512 if (error) 507 if (error)
513 goto out; 508 goto out;
514 al->al_qd_num++; 509 al->al_qd_num++;
515 qd++; 510 qd++;
516 511
517 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd); 512 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
518 if (error) 513 if (error)
519 goto out; 514 goto out;
520 al->al_qd_num++; 515 al->al_qd_num++;
521 qd++; 516 qd++;
522 517
523 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) { 518 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
524 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd); 519 error = qdsb_get(sdp, QUOTA_USER, uid, qd);
525 if (error) 520 if (error)
526 goto out; 521 goto out;
527 al->al_qd_num++; 522 al->al_qd_num++;
@@ -529,7 +524,7 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
529 } 524 }
530 525
531 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) { 526 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
532 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd); 527 error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
533 if (error) 528 if (error)
534 goto out; 529 goto out;
535 al->al_qd_num++; 530 al->al_qd_num++;
@@ -617,48 +612,36 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
617 mutex_unlock(&sdp->sd_quota_mutex); 612 mutex_unlock(&sdp->sd_quota_mutex);
618} 613}
619 614
620static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
621{
622 const struct gfs2_quota *str = buf;
623
624 qu->qu_limit = be64_to_cpu(str->qu_limit);
625 qu->qu_warn = be64_to_cpu(str->qu_warn);
626 qu->qu_value = be64_to_cpu(str->qu_value);
627 qu->qu_ll_next = be32_to_cpu(str->qu_ll_next);
628}
629
630static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf)
631{
632 struct gfs2_quota *str = buf;
633
634 str->qu_limit = cpu_to_be64(qu->qu_limit);
635 str->qu_warn = cpu_to_be64(qu->qu_warn);
636 str->qu_value = cpu_to_be64(qu->qu_value);
637 str->qu_ll_next = cpu_to_be32(qu->qu_ll_next);
638 memset(&str->qu_reserved, 0, sizeof(str->qu_reserved));
639}
640
641/** 615/**
642 * gfs2_adjust_quota 616 * gfs2_adjust_quota - adjust record of current block usage
617 * @ip: The quota inode
618 * @loc: Offset of the entry in the quota file
619 * @change: The amount of usage change to record
620 * @qd: The quota data
621 * @fdq: The updated limits to record
643 * 622 *
644 * This function was mostly borrowed from gfs2_block_truncate_page which was 623 * This function was mostly borrowed from gfs2_block_truncate_page which was
645 * in turn mostly borrowed from ext3 624 * in turn mostly borrowed from ext3
625 *
626 * Returns: 0 or -ve on error
646 */ 627 */
628
647static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, 629static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
648 s64 change, struct gfs2_quota_data *qd) 630 s64 change, struct gfs2_quota_data *qd,
631 struct fs_disk_quota *fdq)
649{ 632{
650 struct inode *inode = &ip->i_inode; 633 struct inode *inode = &ip->i_inode;
651 struct address_space *mapping = inode->i_mapping; 634 struct address_space *mapping = inode->i_mapping;
652 unsigned long index = loc >> PAGE_CACHE_SHIFT; 635 unsigned long index = loc >> PAGE_CACHE_SHIFT;
653 unsigned offset = loc & (PAGE_CACHE_SIZE - 1); 636 unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
654 unsigned blocksize, iblock, pos; 637 unsigned blocksize, iblock, pos;
655 struct buffer_head *bh; 638 struct buffer_head *bh, *dibh;
656 struct page *page; 639 struct page *page;
657 void *kaddr; 640 void *kaddr;
658 char *ptr; 641 struct gfs2_quota *qp;
659 struct gfs2_quota_host qp;
660 s64 value; 642 s64 value;
661 int err = -EIO; 643 int err = -EIO;
644 u64 size;
662 645
663 if (gfs2_is_stuffed(ip)) 646 if (gfs2_is_stuffed(ip))
664 gfs2_unstuff_dinode(ip, NULL); 647 gfs2_unstuff_dinode(ip, NULL);
@@ -700,18 +683,38 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
700 gfs2_trans_add_bh(ip->i_gl, bh, 0); 683 gfs2_trans_add_bh(ip->i_gl, bh, 0);
701 684
702 kaddr = kmap_atomic(page, KM_USER0); 685 kaddr = kmap_atomic(page, KM_USER0);
703 ptr = kaddr + offset; 686 qp = kaddr + offset;
704 gfs2_quota_in(&qp, ptr); 687 value = (s64)be64_to_cpu(qp->qu_value) + change;
705 qp.qu_value += change; 688 qp->qu_value = cpu_to_be64(value);
706 value = qp.qu_value; 689 qd->qd_qb.qb_value = qp->qu_value;
707 gfs2_quota_out(&qp, ptr); 690 if (fdq) {
691 if (fdq->d_fieldmask & FS_DQ_BSOFT) {
692 qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit);
693 qd->qd_qb.qb_warn = qp->qu_warn;
694 }
695 if (fdq->d_fieldmask & FS_DQ_BHARD) {
696 qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit);
697 qd->qd_qb.qb_limit = qp->qu_limit;
698 }
699 }
708 flush_dcache_page(page); 700 flush_dcache_page(page);
709 kunmap_atomic(kaddr, KM_USER0); 701 kunmap_atomic(kaddr, KM_USER0);
710 err = 0; 702
711 qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC); 703 err = gfs2_meta_inode_buffer(ip, &dibh);
712 qd->qd_qb.qb_value = cpu_to_be64(value); 704 if (err)
713 ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC); 705 goto unlock;
714 ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value); 706
707 size = loc + sizeof(struct gfs2_quota);
708 if (size > inode->i_size) {
709 ip->i_disksize = size;
710 i_size_write(inode, size);
711 }
712 inode->i_mtime = inode->i_atime = CURRENT_TIME;
713 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
714 gfs2_dinode_out(ip, dibh->b_data);
715 brelse(dibh);
716 mark_inode_dirty(inode);
717
715unlock: 718unlock:
716 unlock_page(page); 719 unlock_page(page);
717 page_cache_release(page); 720 page_cache_release(page);
@@ -739,9 +742,9 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
739 return -ENOMEM; 742 return -ENOMEM;
740 743
741 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); 744 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
745 mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA);
742 for (qx = 0; qx < num_qd; qx++) { 746 for (qx = 0; qx < num_qd; qx++) {
743 error = gfs2_glock_nq_init(qda[qx]->qd_gl, 747 error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
744 LM_ST_EXCLUSIVE,
745 GL_NOCACHE, &ghs[qx]); 748 GL_NOCACHE, &ghs[qx]);
746 if (error) 749 if (error)
747 goto out; 750 goto out;
@@ -795,9 +798,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
795 for (x = 0; x < num_qd; x++) { 798 for (x = 0; x < num_qd; x++) {
796 qd = qda[x]; 799 qd = qda[x];
797 offset = qd2offset(qd); 800 offset = qd2offset(qd);
798 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, 801 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
799 (struct gfs2_quota_data *)
800 qd);
801 if (error) 802 if (error)
802 goto out_end_trans; 803 goto out_end_trans;
803 804
@@ -817,21 +818,44 @@ out_gunlock:
817out: 818out:
818 while (qx--) 819 while (qx--)
819 gfs2_glock_dq_uninit(&ghs[qx]); 820 gfs2_glock_dq_uninit(&ghs[qx]);
821 mutex_unlock(&ip->i_inode.i_mutex);
820 kfree(ghs); 822 kfree(ghs);
821 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl); 823 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
822 return error; 824 return error;
823} 825}
824 826
827static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
828{
829 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
830 struct gfs2_quota q;
831 struct gfs2_quota_lvb *qlvb;
832 loff_t pos;
833 int error;
834
835 memset(&q, 0, sizeof(struct gfs2_quota));
836 pos = qd2offset(qd);
837 error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q));
838 if (error < 0)
839 return error;
840
841 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
842 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
843 qlvb->__pad = 0;
844 qlvb->qb_limit = q.qu_limit;
845 qlvb->qb_warn = q.qu_warn;
846 qlvb->qb_value = q.qu_value;
847 qd->qd_qb = *qlvb;
848
849 return 0;
850}
851
825static int do_glock(struct gfs2_quota_data *qd, int force_refresh, 852static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
826 struct gfs2_holder *q_gh) 853 struct gfs2_holder *q_gh)
827{ 854{
828 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 855 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
829 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 856 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
830 struct gfs2_holder i_gh; 857 struct gfs2_holder i_gh;
831 struct gfs2_quota_host q;
832 char buf[sizeof(struct gfs2_quota)];
833 int error; 858 int error;
834 struct gfs2_quota_lvb *qlvb;
835 859
836restart: 860restart:
837 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); 861 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
@@ -841,11 +865,9 @@ restart:
841 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; 865 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
842 866
843 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { 867 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
844 loff_t pos;
845 gfs2_glock_dq_uninit(q_gh); 868 gfs2_glock_dq_uninit(q_gh);
846 error = gfs2_glock_nq_init(qd->qd_gl, 869 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
847 LM_ST_EXCLUSIVE, GL_NOCACHE, 870 GL_NOCACHE, q_gh);
848 q_gh);
849 if (error) 871 if (error)
850 return error; 872 return error;
851 873
@@ -853,29 +875,14 @@ restart:
853 if (error) 875 if (error)
854 goto fail; 876 goto fail;
855 877
856 memset(buf, 0, sizeof(struct gfs2_quota)); 878 error = update_qd(sdp, qd);
857 pos = qd2offset(qd); 879 if (error)
858 error = gfs2_internal_read(ip, NULL, buf, &pos,
859 sizeof(struct gfs2_quota));
860 if (error < 0)
861 goto fail_gunlock; 880 goto fail_gunlock;
862 881
863 gfs2_glock_dq_uninit(&i_gh); 882 gfs2_glock_dq_uninit(&i_gh);
864 883 gfs2_glock_dq_uninit(q_gh);
865 gfs2_quota_in(&q, buf); 884 force_refresh = 0;
866 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; 885 goto restart;
867 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
868 qlvb->__pad = 0;
869 qlvb->qb_limit = cpu_to_be64(q.qu_limit);
870 qlvb->qb_warn = cpu_to_be64(q.qu_warn);
871 qlvb->qb_value = cpu_to_be64(q.qu_value);
872 qd->qd_qb = *qlvb;
873
874 if (gfs2_glock_is_blocking(qd->qd_gl)) {
875 gfs2_glock_dq_uninit(q_gh);
876 force_refresh = 0;
877 goto restart;
878 }
879 } 886 }
880 887
881 return 0; 888 return 0;
@@ -995,7 +1002,7 @@ static int print_message(struct gfs2_quota_data *qd, char *type)
995{ 1002{
996 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 1003 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
997 1004
998 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n", 1005 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n",
999 sdp->sd_fsname, type, 1006 sdp->sd_fsname, type,
1000 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group", 1007 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
1001 qd->qd_id); 1008 qd->qd_id);
@@ -1032,6 +1039,10 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
1032 1039
1033 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { 1040 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
1034 print_message(qd, "exceeded"); 1041 print_message(qd, "exceeded");
1042 quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
1043 USRQUOTA : GRPQUOTA, qd->qd_id,
1044 sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
1045
1035 error = -EDQUOT; 1046 error = -EDQUOT;
1036 break; 1047 break;
1037 } else if (be64_to_cpu(qd->qd_qb.qb_warn) && 1048 } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
@@ -1039,6 +1050,9 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
1039 time_after_eq(jiffies, qd->qd_last_warn + 1050 time_after_eq(jiffies, qd->qd_last_warn +
1040 gfs2_tune_get(sdp, 1051 gfs2_tune_get(sdp,
1041 gt_quota_warn_period) * HZ)) { 1052 gt_quota_warn_period) * HZ)) {
1053 quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
1054 USRQUOTA : GRPQUOTA, qd->qd_id,
1055 sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1042 error = print_message(qd, "warning"); 1056 error = print_message(qd, "warning");
1043 qd->qd_last_warn = jiffies; 1057 qd->qd_last_warn = jiffies;
1044 } 1058 }
@@ -1069,8 +1083,9 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1069 } 1083 }
1070} 1084}
1071 1085
1072int gfs2_quota_sync(struct gfs2_sbd *sdp) 1086int gfs2_quota_sync(struct super_block *sb, int type)
1073{ 1087{
1088 struct gfs2_sbd *sdp = sb->s_fs_info;
1074 struct gfs2_quota_data **qda; 1089 struct gfs2_quota_data **qda;
1075 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync); 1090 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1076 unsigned int num_qd; 1091 unsigned int num_qd;
@@ -1118,7 +1133,7 @@ int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
1118 struct gfs2_holder q_gh; 1133 struct gfs2_holder q_gh;
1119 int error; 1134 int error;
1120 1135
1121 error = qd_get(sdp, user, id, CREATE, &qd); 1136 error = qd_get(sdp, user, id, &qd);
1122 if (error) 1137 if (error)
1123 return error; 1138 return error;
1124 1139
@@ -1127,7 +1142,6 @@ int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
1127 gfs2_glock_dq_uninit(&q_gh); 1142 gfs2_glock_dq_uninit(&q_gh);
1128 1143
1129 qd_put(qd); 1144 qd_put(qd);
1130
1131 return error; 1145 return error;
1132} 1146}
1133 1147
@@ -1298,12 +1312,12 @@ static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1298} 1312}
1299 1313
1300static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg, 1314static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1301 int (*fxn)(struct gfs2_sbd *sdp), 1315 int (*fxn)(struct super_block *sb, int type),
1302 unsigned long t, unsigned long *timeo, 1316 unsigned long t, unsigned long *timeo,
1303 unsigned int *new_timeo) 1317 unsigned int *new_timeo)
1304{ 1318{
1305 if (t >= *timeo) { 1319 if (t >= *timeo) {
1306 int error = fxn(sdp); 1320 int error = fxn(sdp->sd_vfs, 0);
1307 quotad_error(sdp, msg, error); 1321 quotad_error(sdp, msg, error);
1308 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ; 1322 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1309 } else { 1323 } else {
@@ -1330,6 +1344,14 @@ static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
1330 } 1344 }
1331} 1345}
1332 1346
1347void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1348 if (!sdp->sd_statfs_force_sync) {
1349 sdp->sd_statfs_force_sync = 1;
1350 wake_up(&sdp->sd_quota_wait);
1351 }
1352}
1353
1354
1333/** 1355/**
1334 * gfs2_quotad - Write cached quota changes into the quota file 1356 * gfs2_quotad - Write cached quota changes into the quota file
1335 * @sdp: Pointer to GFS2 superblock 1357 * @sdp: Pointer to GFS2 superblock
@@ -1349,8 +1371,15 @@ int gfs2_quotad(void *data)
1349 while (!kthread_should_stop()) { 1371 while (!kthread_should_stop()) {
1350 1372
1351 /* Update the master statfs file */ 1373 /* Update the master statfs file */
1352 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t, 1374 if (sdp->sd_statfs_force_sync) {
1353 &statfs_timeo, &tune->gt_statfs_quantum); 1375 int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1376 quotad_error(sdp, "statfs", error);
1377 statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1378 }
1379 else
1380 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1381 &statfs_timeo,
1382 &tune->gt_statfs_quantum);
1354 1383
1355 /* Update quota file */ 1384 /* Update quota file */
1356 quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, 1385 quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
@@ -1367,7 +1396,7 @@ int gfs2_quotad(void *data)
1367 spin_lock(&sdp->sd_trunc_lock); 1396 spin_lock(&sdp->sd_trunc_lock);
1368 empty = list_empty(&sdp->sd_trunc_list); 1397 empty = list_empty(&sdp->sd_trunc_list);
1369 spin_unlock(&sdp->sd_trunc_lock); 1398 spin_unlock(&sdp->sd_trunc_lock);
1370 if (empty) 1399 if (empty && !sdp->sd_statfs_force_sync)
1371 t -= schedule_timeout(t); 1400 t -= schedule_timeout(t);
1372 else 1401 else
1373 t = 0; 1402 t = 0;
@@ -1377,3 +1406,181 @@ int gfs2_quotad(void *data)
1377 return 0; 1406 return 0;
1378} 1407}
1379 1408
1409static int gfs2_quota_get_xstate(struct super_block *sb,
1410 struct fs_quota_stat *fqs)
1411{
1412 struct gfs2_sbd *sdp = sb->s_fs_info;
1413
1414 memset(fqs, 0, sizeof(struct fs_quota_stat));
1415 fqs->qs_version = FS_QSTAT_VERSION;
1416 if (sdp->sd_args.ar_quota == GFS2_QUOTA_ON)
1417 fqs->qs_flags = (XFS_QUOTA_UDQ_ENFD | XFS_QUOTA_GDQ_ENFD);
1418 else if (sdp->sd_args.ar_quota == GFS2_QUOTA_ACCOUNT)
1419 fqs->qs_flags = (XFS_QUOTA_UDQ_ACCT | XFS_QUOTA_GDQ_ACCT);
1420 if (sdp->sd_quota_inode) {
1421 fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1422 fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
1423 }
1424 fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
1425 fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
1426 fqs->qs_incoredqs = atomic_read(&qd_lru_count);
1427 return 0;
1428}
1429
1430static int gfs2_xquota_get(struct super_block *sb, int type, qid_t id,
1431 struct fs_disk_quota *fdq)
1432{
1433 struct gfs2_sbd *sdp = sb->s_fs_info;
1434 struct gfs2_quota_lvb *qlvb;
1435 struct gfs2_quota_data *qd;
1436 struct gfs2_holder q_gh;
1437 int error;
1438
1439 memset(fdq, 0, sizeof(struct fs_disk_quota));
1440
1441 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1442 return -ESRCH; /* Crazy XFS error code */
1443
1444 if (type == USRQUOTA)
1445 type = QUOTA_USER;
1446 else if (type == GRPQUOTA)
1447 type = QUOTA_GROUP;
1448 else
1449 return -EINVAL;
1450
1451 error = qd_get(sdp, type, id, &qd);
1452 if (error)
1453 return error;
1454 error = do_glock(qd, FORCE, &q_gh);
1455 if (error)
1456 goto out;
1457
1458 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
1459 fdq->d_version = FS_DQUOT_VERSION;
1460 fdq->d_flags = (type == QUOTA_USER) ? XFS_USER_QUOTA : XFS_GROUP_QUOTA;
1461 fdq->d_id = id;
1462 fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit);
1463 fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn);
1464 fdq->d_bcount = be64_to_cpu(qlvb->qb_value);
1465
1466 gfs2_glock_dq_uninit(&q_gh);
1467out:
1468 qd_put(qd);
1469 return error;
1470}
1471
1472/* GFS2 only supports a subset of the XFS fields */
1473#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD)
1474
1475static int gfs2_xquota_set(struct super_block *sb, int type, qid_t id,
1476 struct fs_disk_quota *fdq)
1477{
1478 struct gfs2_sbd *sdp = sb->s_fs_info;
1479 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1480 struct gfs2_quota_data *qd;
1481 struct gfs2_holder q_gh, i_gh;
1482 unsigned int data_blocks, ind_blocks;
1483 unsigned int blocks = 0;
1484 int alloc_required;
1485 struct gfs2_alloc *al;
1486 loff_t offset;
1487 int error;
1488
1489 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1490 return -ESRCH; /* Crazy XFS error code */
1491
1492 switch(type) {
1493 case USRQUOTA:
1494 type = QUOTA_USER;
1495 if (fdq->d_flags != XFS_USER_QUOTA)
1496 return -EINVAL;
1497 break;
1498 case GRPQUOTA:
1499 type = QUOTA_GROUP;
1500 if (fdq->d_flags != XFS_GROUP_QUOTA)
1501 return -EINVAL;
1502 break;
1503 default:
1504 return -EINVAL;
1505 }
1506
1507 if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1508 return -EINVAL;
1509 if (fdq->d_id != id)
1510 return -EINVAL;
1511
1512 error = qd_get(sdp, type, id, &qd);
1513 if (error)
1514 return error;
1515
1516 mutex_lock(&ip->i_inode.i_mutex);
1517 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1518 if (error)
1519 goto out_put;
1520 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1521 if (error)
1522 goto out_q;
1523
1524 /* Check for existing entry, if none then alloc new blocks */
1525 error = update_qd(sdp, qd);
1526 if (error)
1527 goto out_i;
1528
1529 /* If nothing has changed, this is a no-op */
1530 if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
1531 (fdq->d_blk_softlimit == be64_to_cpu(qd->qd_qb.qb_warn)))
1532 fdq->d_fieldmask ^= FS_DQ_BSOFT;
1533 if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
1534 (fdq->d_blk_hardlimit == be64_to_cpu(qd->qd_qb.qb_limit)))
1535 fdq->d_fieldmask ^= FS_DQ_BHARD;
1536 if (fdq->d_fieldmask == 0)
1537 goto out_i;
1538
1539 offset = qd2offset(qd);
1540 error = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota),
1541 &alloc_required);
1542 if (error)
1543 goto out_i;
1544 if (alloc_required) {
1545 al = gfs2_alloc_get(ip);
1546 if (al == NULL)
1547 goto out_i;
1548 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1549 &data_blocks, &ind_blocks);
1550 blocks = al->al_requested = 1 + data_blocks + ind_blocks;
1551 error = gfs2_inplace_reserve(ip);
1552 if (error)
1553 goto out_alloc;
1554 }
1555
1556 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 1, 0);
1557 if (error)
1558 goto out_release;
1559
1560 /* Apply changes */
1561 error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
1562
1563 gfs2_trans_end(sdp);
1564out_release:
1565 if (alloc_required) {
1566 gfs2_inplace_release(ip);
1567out_alloc:
1568 gfs2_alloc_put(ip);
1569 }
1570out_i:
1571 gfs2_glock_dq_uninit(&i_gh);
1572out_q:
1573 gfs2_glock_dq_uninit(&q_gh);
1574out_put:
1575 mutex_unlock(&ip->i_inode.i_mutex);
1576 qd_put(qd);
1577 return error;
1578}
1579
1580const struct quotactl_ops gfs2_quotactl_ops = {
1581 .quota_sync = gfs2_quota_sync,
1582 .get_xstate = gfs2_quota_get_xstate,
1583 .get_xquota = gfs2_xquota_get,
1584 .set_xquota = gfs2_xquota_set,
1585};
1586
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index 0fa5fa63d0e8..e271fa07ad02 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -25,13 +25,15 @@ extern int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid);
25extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change, 25extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
26 u32 uid, u32 gid); 26 u32 uid, u32 gid);
27 27
28extern int gfs2_quota_sync(struct gfs2_sbd *sdp); 28extern int gfs2_quota_sync(struct super_block *sb, int type);
29extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id); 29extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id);
30 30
31extern int gfs2_quota_init(struct gfs2_sbd *sdp); 31extern int gfs2_quota_init(struct gfs2_sbd *sdp);
32extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp); 32extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
33extern int gfs2_quotad(void *data); 33extern int gfs2_quotad(void *data);
34 34
35extern void gfs2_wake_up_statfs(struct gfs2_sbd *sdp);
36
35static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) 37static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
36{ 38{
37 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 39 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
@@ -50,5 +52,6 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
50} 52}
51 53
52extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask); 54extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask);
55extern const struct quotactl_ops gfs2_quotactl_ops;
53 56
54#endif /* __QUOTA_DOT_H__ */ 57#endif /* __QUOTA_DOT_H__ */
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 59d2695509d3..4b9bece3d437 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -7,6 +7,7 @@
7 * of the GNU General Public License version 2. 7 * of the GNU General Public License version 2.
8 */ 8 */
9 9
10#include <linux/module.h>
10#include <linux/slab.h> 11#include <linux/slab.h>
11#include <linux/spinlock.h> 12#include <linux/spinlock.h>
12#include <linux/completion.h> 13#include <linux/completion.h>
@@ -409,7 +410,9 @@ static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *hea
409 memset(lh, 0, sizeof(struct gfs2_log_header)); 410 memset(lh, 0, sizeof(struct gfs2_log_header));
410 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 411 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
411 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); 412 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
413 lh->lh_header.__pad0 = cpu_to_be64(0);
412 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); 414 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
415 lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
413 lh->lh_sequence = cpu_to_be64(head->lh_sequence + 1); 416 lh->lh_sequence = cpu_to_be64(head->lh_sequence + 1);
414 lh->lh_flags = cpu_to_be32(GFS2_LOG_HEAD_UNMOUNT); 417 lh->lh_flags = cpu_to_be32(GFS2_LOG_HEAD_UNMOUNT);
415 lh->lh_blkno = cpu_to_be32(lblock); 418 lh->lh_blkno = cpu_to_be32(lblock);
@@ -593,6 +596,7 @@ fail:
593} 596}
594 597
595struct slow_work_ops gfs2_recover_ops = { 598struct slow_work_ops gfs2_recover_ops = {
599 .owner = THIS_MODULE,
596 .get_ref = gfs2_recover_get_ref, 600 .get_ref = gfs2_recover_get_ref,
597 .put_ref = gfs2_recover_put_ref, 601 .put_ref = gfs2_recover_put_ref,
598 .execute = gfs2_recover_work, 602 .execute = gfs2_recover_work,
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 8f1cfb02a6cb..0608f490c295 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1710,11 +1710,16 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
1710{ 1710{
1711 struct gfs2_rgrpd *rgd; 1711 struct gfs2_rgrpd *rgd;
1712 struct gfs2_holder ri_gh, rgd_gh; 1712 struct gfs2_holder ri_gh, rgd_gh;
1713 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
1714 int ri_locked = 0;
1713 int error; 1715 int error;
1714 1716
1715 error = gfs2_rindex_hold(sdp, &ri_gh); 1717 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
1716 if (error) 1718 error = gfs2_rindex_hold(sdp, &ri_gh);
1717 goto fail; 1719 if (error)
1720 goto fail;
1721 ri_locked = 1;
1722 }
1718 1723
1719 error = -EINVAL; 1724 error = -EINVAL;
1720 rgd = gfs2_blk2rgrpd(sdp, no_addr); 1725 rgd = gfs2_blk2rgrpd(sdp, no_addr);
@@ -1730,7 +1735,8 @@ int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
1730 1735
1731 gfs2_glock_dq_uninit(&rgd_gh); 1736 gfs2_glock_dq_uninit(&rgd_gh);
1732fail_rindex: 1737fail_rindex:
1733 gfs2_glock_dq_uninit(&ri_gh); 1738 if (ri_locked)
1739 gfs2_glock_dq_uninit(&ri_gh);
1734fail: 1740fail:
1735 return error; 1741 return error;
1736} 1742}
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 0ec3ec672de1..c282ad41f3d1 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -70,6 +70,11 @@ enum {
70 Opt_commit, 70 Opt_commit,
71 Opt_err_withdraw, 71 Opt_err_withdraw,
72 Opt_err_panic, 72 Opt_err_panic,
73 Opt_statfs_quantum,
74 Opt_statfs_percent,
75 Opt_quota_quantum,
76 Opt_barrier,
77 Opt_nobarrier,
73 Opt_error, 78 Opt_error,
74}; 79};
75 80
@@ -101,18 +106,23 @@ static const match_table_t tokens = {
101 {Opt_commit, "commit=%d"}, 106 {Opt_commit, "commit=%d"},
102 {Opt_err_withdraw, "errors=withdraw"}, 107 {Opt_err_withdraw, "errors=withdraw"},
103 {Opt_err_panic, "errors=panic"}, 108 {Opt_err_panic, "errors=panic"},
109 {Opt_statfs_quantum, "statfs_quantum=%d"},
110 {Opt_statfs_percent, "statfs_percent=%d"},
111 {Opt_quota_quantum, "quota_quantum=%d"},
112 {Opt_barrier, "barrier"},
113 {Opt_nobarrier, "nobarrier"},
104 {Opt_error, NULL} 114 {Opt_error, NULL}
105}; 115};
106 116
107/** 117/**
108 * gfs2_mount_args - Parse mount options 118 * gfs2_mount_args - Parse mount options
109 * @sdp: 119 * @args: The structure into which the parsed options will be written
110 * @data: 120 * @options: The options to parse
111 * 121 *
112 * Return: errno 122 * Return: errno
113 */ 123 */
114 124
115int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options) 125int gfs2_mount_args(struct gfs2_args *args, char *options)
116{ 126{
117 char *o; 127 char *o;
118 int token; 128 int token;
@@ -157,7 +167,7 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
157 break; 167 break;
158 case Opt_debug: 168 case Opt_debug:
159 if (args->ar_errors == GFS2_ERRORS_PANIC) { 169 if (args->ar_errors == GFS2_ERRORS_PANIC) {
160 fs_info(sdp, "-o debug and -o errors=panic " 170 printk(KERN_WARNING "GFS2: -o debug and -o errors=panic "
161 "are mutually exclusive.\n"); 171 "are mutually exclusive.\n");
162 return -EINVAL; 172 return -EINVAL;
163 } 173 }
@@ -210,7 +220,29 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
210 case Opt_commit: 220 case Opt_commit:
211 rv = match_int(&tmp[0], &args->ar_commit); 221 rv = match_int(&tmp[0], &args->ar_commit);
212 if (rv || args->ar_commit <= 0) { 222 if (rv || args->ar_commit <= 0) {
213 fs_info(sdp, "commit mount option requires a positive numeric argument\n"); 223 printk(KERN_WARNING "GFS2: commit mount option requires a positive numeric argument\n");
224 return rv ? rv : -EINVAL;
225 }
226 break;
227 case Opt_statfs_quantum:
228 rv = match_int(&tmp[0], &args->ar_statfs_quantum);
229 if (rv || args->ar_statfs_quantum < 0) {
230 printk(KERN_WARNING "GFS2: statfs_quantum mount option requires a non-negative numeric argument\n");
231 return rv ? rv : -EINVAL;
232 }
233 break;
234 case Opt_quota_quantum:
235 rv = match_int(&tmp[0], &args->ar_quota_quantum);
236 if (rv || args->ar_quota_quantum <= 0) {
237 printk(KERN_WARNING "GFS2: quota_quantum mount option requires a positive numeric argument\n");
238 return rv ? rv : -EINVAL;
239 }
240 break;
241 case Opt_statfs_percent:
242 rv = match_int(&tmp[0], &args->ar_statfs_percent);
243 if (rv || args->ar_statfs_percent < 0 ||
244 args->ar_statfs_percent > 100) {
245 printk(KERN_WARNING "statfs_percent mount option requires a numeric argument between 0 and 100\n");
214 return rv ? rv : -EINVAL; 246 return rv ? rv : -EINVAL;
215 } 247 }
216 break; 248 break;
@@ -219,15 +251,21 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
219 break; 251 break;
220 case Opt_err_panic: 252 case Opt_err_panic:
221 if (args->ar_debug) { 253 if (args->ar_debug) {
222 fs_info(sdp, "-o debug and -o errors=panic " 254 printk(KERN_WARNING "GFS2: -o debug and -o errors=panic "
223 "are mutually exclusive.\n"); 255 "are mutually exclusive.\n");
224 return -EINVAL; 256 return -EINVAL;
225 } 257 }
226 args->ar_errors = GFS2_ERRORS_PANIC; 258 args->ar_errors = GFS2_ERRORS_PANIC;
227 break; 259 break;
260 case Opt_barrier:
261 args->ar_nobarrier = 0;
262 break;
263 case Opt_nobarrier:
264 args->ar_nobarrier = 1;
265 break;
228 case Opt_error: 266 case Opt_error:
229 default: 267 default:
230 fs_info(sdp, "invalid mount option: %s\n", o); 268 printk(KERN_WARNING "GFS2: invalid mount option: %s\n", o);
231 return -EINVAL; 269 return -EINVAL;
232 } 270 }
233 } 271 }
@@ -442,7 +480,10 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
442{ 480{
443 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 481 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
444 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 482 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
483 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
445 struct buffer_head *l_bh; 484 struct buffer_head *l_bh;
485 s64 x, y;
486 int need_sync = 0;
446 int error; 487 int error;
447 488
448 error = gfs2_meta_inode_buffer(l_ip, &l_bh); 489 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
@@ -456,9 +497,17 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
456 l_sc->sc_free += free; 497 l_sc->sc_free += free;
457 l_sc->sc_dinodes += dinodes; 498 l_sc->sc_dinodes += dinodes;
458 gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode)); 499 gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
500 if (sdp->sd_args.ar_statfs_percent) {
501 x = 100 * l_sc->sc_free;
502 y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
503 if (x >= y || x <= -y)
504 need_sync = 1;
505 }
459 spin_unlock(&sdp->sd_statfs_spin); 506 spin_unlock(&sdp->sd_statfs_spin);
460 507
461 brelse(l_bh); 508 brelse(l_bh);
509 if (need_sync)
510 gfs2_wake_up_statfs(sdp);
462} 511}
463 512
464void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh, 513void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
@@ -484,8 +533,9 @@ void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
484 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); 533 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
485} 534}
486 535
487int gfs2_statfs_sync(struct gfs2_sbd *sdp) 536int gfs2_statfs_sync(struct super_block *sb, int type)
488{ 537{
538 struct gfs2_sbd *sdp = sb->s_fs_info;
489 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 539 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
490 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 540 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
491 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 541 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
@@ -521,6 +571,7 @@ int gfs2_statfs_sync(struct gfs2_sbd *sdp)
521 goto out_bh2; 571 goto out_bh2;
522 572
523 update_statfs(sdp, m_bh, l_bh); 573 update_statfs(sdp, m_bh, l_bh);
574 sdp->sd_statfs_force_sync = 0;
524 575
525 gfs2_trans_end(sdp); 576 gfs2_trans_end(sdp);
526 577
@@ -712,8 +763,8 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
712 int error; 763 int error;
713 764
714 flush_workqueue(gfs2_delete_workqueue); 765 flush_workqueue(gfs2_delete_workqueue);
715 gfs2_quota_sync(sdp); 766 gfs2_quota_sync(sdp->sd_vfs, 0);
716 gfs2_statfs_sync(sdp); 767 gfs2_statfs_sync(sdp->sd_vfs, 0);
717 768
718 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE, 769 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,
719 &t_gh); 770 &t_gh);
@@ -1061,8 +1112,13 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
1061 1112
1062 spin_lock(&gt->gt_spin); 1113 spin_lock(&gt->gt_spin);
1063 args.ar_commit = gt->gt_log_flush_secs; 1114 args.ar_commit = gt->gt_log_flush_secs;
1115 args.ar_quota_quantum = gt->gt_quota_quantum;
1116 if (gt->gt_statfs_slow)
1117 args.ar_statfs_quantum = 0;
1118 else
1119 args.ar_statfs_quantum = gt->gt_statfs_quantum;
1064 spin_unlock(&gt->gt_spin); 1120 spin_unlock(&gt->gt_spin);
1065 error = gfs2_mount_args(sdp, &args, data); 1121 error = gfs2_mount_args(&args, data);
1066 if (error) 1122 if (error)
1067 return error; 1123 return error;
1068 1124
@@ -1097,8 +1153,21 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
1097 sb->s_flags |= MS_POSIXACL; 1153 sb->s_flags |= MS_POSIXACL;
1098 else 1154 else
1099 sb->s_flags &= ~MS_POSIXACL; 1155 sb->s_flags &= ~MS_POSIXACL;
1156 if (sdp->sd_args.ar_nobarrier)
1157 set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1158 else
1159 clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
1100 spin_lock(&gt->gt_spin); 1160 spin_lock(&gt->gt_spin);
1101 gt->gt_log_flush_secs = args.ar_commit; 1161 gt->gt_log_flush_secs = args.ar_commit;
1162 gt->gt_quota_quantum = args.ar_quota_quantum;
1163 if (args.ar_statfs_quantum) {
1164 gt->gt_statfs_slow = 0;
1165 gt->gt_statfs_quantum = args.ar_statfs_quantum;
1166 }
1167 else {
1168 gt->gt_statfs_slow = 1;
1169 gt->gt_statfs_quantum = 30;
1170 }
1102 spin_unlock(&gt->gt_spin); 1171 spin_unlock(&gt->gt_spin);
1103 1172
1104 gfs2_online_uevent(sdp); 1173 gfs2_online_uevent(sdp);
@@ -1179,7 +1248,7 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
1179{ 1248{
1180 struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info; 1249 struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info;
1181 struct gfs2_args *args = &sdp->sd_args; 1250 struct gfs2_args *args = &sdp->sd_args;
1182 int lfsecs; 1251 int val;
1183 1252
1184 if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir)) 1253 if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir))
1185 seq_printf(s, ",meta"); 1254 seq_printf(s, ",meta");
@@ -1240,9 +1309,17 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
1240 } 1309 }
1241 if (args->ar_discard) 1310 if (args->ar_discard)
1242 seq_printf(s, ",discard"); 1311 seq_printf(s, ",discard");
1243 lfsecs = sdp->sd_tune.gt_log_flush_secs; 1312 val = sdp->sd_tune.gt_log_flush_secs;
1244 if (lfsecs != 60) 1313 if (val != 60)
1245 seq_printf(s, ",commit=%d", lfsecs); 1314 seq_printf(s, ",commit=%d", val);
1315 val = sdp->sd_tune.gt_statfs_quantum;
1316 if (val != 30)
1317 seq_printf(s, ",statfs_quantum=%d", val);
1318 val = sdp->sd_tune.gt_quota_quantum;
1319 if (val != 60)
1320 seq_printf(s, ",quota_quantum=%d", val);
1321 if (args->ar_statfs_percent)
1322 seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
1246 if (args->ar_errors != GFS2_ERRORS_DEFAULT) { 1323 if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1247 const char *state; 1324 const char *state;
1248 1325
@@ -1259,6 +1336,9 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
1259 } 1336 }
1260 seq_printf(s, ",errors=%s", state); 1337 seq_printf(s, ",errors=%s", state);
1261 } 1338 }
1339 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
1340 seq_printf(s, ",nobarrier");
1341
1262 return 0; 1342 return 0;
1263} 1343}
1264 1344
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index 235db3682885..3df60f2d84e3 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -27,7 +27,7 @@ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
27 27
28extern void gfs2_jindex_free(struct gfs2_sbd *sdp); 28extern void gfs2_jindex_free(struct gfs2_sbd *sdp);
29 29
30extern int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *data); 30extern int gfs2_mount_args(struct gfs2_args *args, char *data);
31 31
32extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid); 32extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
33extern int gfs2_jdesc_check(struct gfs2_jdesc *jd); 33extern int gfs2_jdesc_check(struct gfs2_jdesc *jd);
@@ -44,7 +44,7 @@ extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc,
44 const void *buf); 44 const void *buf);
45extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh, 45extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
46 struct buffer_head *l_bh); 46 struct buffer_head *l_bh);
47extern int gfs2_statfs_sync(struct gfs2_sbd *sdp); 47extern int gfs2_statfs_sync(struct super_block *sb, int type);
48 48
49extern int gfs2_freeze_fs(struct gfs2_sbd *sdp); 49extern int gfs2_freeze_fs(struct gfs2_sbd *sdp);
50extern void gfs2_unfreeze_fs(struct gfs2_sbd *sdp); 50extern void gfs2_unfreeze_fs(struct gfs2_sbd *sdp);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 446329728d52..c5dad1eb7b91 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -158,7 +158,7 @@ static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf,
158 if (simple_strtol(buf, NULL, 0) != 1) 158 if (simple_strtol(buf, NULL, 0) != 1)
159 return -EINVAL; 159 return -EINVAL;
160 160
161 gfs2_statfs_sync(sdp); 161 gfs2_statfs_sync(sdp->sd_vfs, 0);
162 return len; 162 return len;
163} 163}
164 164
@@ -171,13 +171,14 @@ static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf,
171 if (simple_strtol(buf, NULL, 0) != 1) 171 if (simple_strtol(buf, NULL, 0) != 1)
172 return -EINVAL; 172 return -EINVAL;
173 173
174 gfs2_quota_sync(sdp); 174 gfs2_quota_sync(sdp->sd_vfs, 0);
175 return len; 175 return len;
176} 176}
177 177
178static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf, 178static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
179 size_t len) 179 size_t len)
180{ 180{
181 int error;
181 u32 id; 182 u32 id;
182 183
183 if (!capable(CAP_SYS_ADMIN)) 184 if (!capable(CAP_SYS_ADMIN))
@@ -185,13 +186,14 @@ static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
185 186
186 id = simple_strtoul(buf, NULL, 0); 187 id = simple_strtoul(buf, NULL, 0);
187 188
188 gfs2_quota_refresh(sdp, 1, id); 189 error = gfs2_quota_refresh(sdp, 1, id);
189 return len; 190 return error ? error : len;
190} 191}
191 192
192static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf, 193static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
193 size_t len) 194 size_t len)
194{ 195{
196 int error;
195 u32 id; 197 u32 id;
196 198
197 if (!capable(CAP_SYS_ADMIN)) 199 if (!capable(CAP_SYS_ADMIN))
@@ -199,8 +201,8 @@ static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
199 201
200 id = simple_strtoul(buf, NULL, 0); 202 id = simple_strtoul(buf, NULL, 0);
201 203
202 gfs2_quota_refresh(sdp, 0, id); 204 error = gfs2_quota_refresh(sdp, 0, id);
203 return len; 205 return error ? error : len;
204} 206}
205 207
206static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 208static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 8a0f8ef6ee27..912f5cbc4740 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -186,8 +186,8 @@ static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
186 return 0; 186 return 0;
187} 187}
188 188
189int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name, 189static int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name,
190 struct gfs2_ea_location *el) 190 struct gfs2_ea_location *el)
191{ 191{
192 struct ea_find ef; 192 struct ea_find ef;
193 int error; 193 int error;
@@ -516,8 +516,8 @@ out:
516 return error; 516 return error;
517} 517}
518 518
519int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el, 519static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
520 char *data, size_t size) 520 char *data, size_t size)
521{ 521{
522 int ret; 522 int ret;
523 size_t len = GFS2_EA_DATA_LEN(el->el_ea); 523 size_t len = GFS2_EA_DATA_LEN(el->el_ea);
@@ -534,6 +534,36 @@ int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
534 return len; 534 return len;
535} 535}
536 536
537int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata)
538{
539 struct gfs2_ea_location el;
540 int error;
541 int len;
542 char *data;
543
544 error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, &el);
545 if (error)
546 return error;
547 if (!el.el_ea)
548 goto out;
549 if (!GFS2_EA_DATA_LEN(el.el_ea))
550 goto out;
551
552 len = GFS2_EA_DATA_LEN(el.el_ea);
553 data = kmalloc(len, GFP_NOFS);
554 error = -ENOMEM;
555 if (data == NULL)
556 goto out;
557
558 error = gfs2_ea_get_copy(ip, &el, data, len);
559 if (error == 0)
560 error = len;
561 *ppdata = data;
562out:
563 brelse(el.el_bh);
564 return error;
565}
566
537/** 567/**
538 * gfs2_xattr_get - Get a GFS2 extended attribute 568 * gfs2_xattr_get - Get a GFS2 extended attribute
539 * @inode: The inode 569 * @inode: The inode
@@ -1259,22 +1289,26 @@ fail:
1259 return error; 1289 return error;
1260} 1290}
1261 1291
1262int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el, 1292int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data)
1263 struct iattr *attr, char *data)
1264{ 1293{
1294 struct gfs2_ea_location el;
1265 struct buffer_head *dibh; 1295 struct buffer_head *dibh;
1266 int error; 1296 int error;
1267 1297
1268 if (GFS2_EA_IS_STUFFED(el->el_ea)) { 1298 error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, GFS2_POSIX_ACL_ACCESS, &el);
1299 if (error)
1300 return error;
1301
1302 if (GFS2_EA_IS_STUFFED(el.el_ea)) {
1269 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0); 1303 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1270 if (error) 1304 if (error)
1271 return error; 1305 return error;
1272 1306
1273 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1); 1307 gfs2_trans_add_bh(ip->i_gl, el.el_bh, 1);
1274 memcpy(GFS2_EA2DATA(el->el_ea), data, 1308 memcpy(GFS2_EA2DATA(el.el_ea), data,
1275 GFS2_EA_DATA_LEN(el->el_ea)); 1309 GFS2_EA_DATA_LEN(el.el_ea));
1276 } else 1310 } else
1277 error = ea_acl_chmod_unstuffed(ip, el->el_ea, data); 1311 error = ea_acl_chmod_unstuffed(ip, el.el_ea, data);
1278 1312
1279 if (error) 1313 if (error)
1280 return error; 1314 return error;
@@ -1507,18 +1541,6 @@ static int gfs2_xattr_user_set(struct inode *inode, const char *name,
1507 return gfs2_xattr_set(inode, GFS2_EATYPE_USR, name, value, size, flags); 1541 return gfs2_xattr_set(inode, GFS2_EATYPE_USR, name, value, size, flags);
1508} 1542}
1509 1543
1510static int gfs2_xattr_system_get(struct inode *inode, const char *name,
1511 void *buffer, size_t size)
1512{
1513 return gfs2_xattr_get(inode, GFS2_EATYPE_SYS, name, buffer, size);
1514}
1515
1516static int gfs2_xattr_system_set(struct inode *inode, const char *name,
1517 const void *value, size_t size, int flags)
1518{
1519 return gfs2_xattr_set(inode, GFS2_EATYPE_SYS, name, value, size, flags);
1520}
1521
1522static int gfs2_xattr_security_get(struct inode *inode, const char *name, 1544static int gfs2_xattr_security_get(struct inode *inode, const char *name,
1523 void *buffer, size_t size) 1545 void *buffer, size_t size)
1524{ 1546{
@@ -1543,12 +1565,6 @@ static struct xattr_handler gfs2_xattr_security_handler = {
1543 .set = gfs2_xattr_security_set, 1565 .set = gfs2_xattr_security_set,
1544}; 1566};
1545 1567
1546static struct xattr_handler gfs2_xattr_system_handler = {
1547 .prefix = XATTR_SYSTEM_PREFIX,
1548 .get = gfs2_xattr_system_get,
1549 .set = gfs2_xattr_system_set,
1550};
1551
1552struct xattr_handler *gfs2_xattr_handlers[] = { 1568struct xattr_handler *gfs2_xattr_handlers[] = {
1553 &gfs2_xattr_user_handler, 1569 &gfs2_xattr_user_handler,
1554 &gfs2_xattr_security_handler, 1570 &gfs2_xattr_security_handler,
diff --git a/fs/gfs2/xattr.h b/fs/gfs2/xattr.h
index cbdfd7743733..8d6ae5813c4d 100644
--- a/fs/gfs2/xattr.h
+++ b/fs/gfs2/xattr.h
@@ -62,11 +62,7 @@ extern int gfs2_ea_dealloc(struct gfs2_inode *ip);
62 62
63/* Exported to acl.c */ 63/* Exported to acl.c */
64 64
65extern int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name, 65extern int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **data);
66 struct gfs2_ea_location *el); 66extern int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data);
67extern int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
68 char *data, size_t size);
69extern int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
70 struct iattr *attr, char *data);
71 67
72#endif /* __EATTR_DOT_H__ */ 68#endif /* __EATTR_DOT_H__ */
diff --git a/fs/inode.c b/fs/inode.c
index 4d8e3be55976..06c1f02de611 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -18,7 +18,6 @@
18#include <linux/hash.h> 18#include <linux/hash.h>
19#include <linux/swap.h> 19#include <linux/swap.h>
20#include <linux/security.h> 20#include <linux/security.h>
21#include <linux/ima.h>
22#include <linux/pagemap.h> 21#include <linux/pagemap.h>
23#include <linux/cdev.h> 22#include <linux/cdev.h>
24#include <linux/bootmem.h> 23#include <linux/bootmem.h>
@@ -157,11 +156,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
157 156
158 if (security_inode_alloc(inode)) 157 if (security_inode_alloc(inode))
159 goto out; 158 goto out;
160
161 /* allocate and initialize an i_integrity */
162 if (ima_inode_alloc(inode))
163 goto out_free_security;
164
165 spin_lock_init(&inode->i_lock); 159 spin_lock_init(&inode->i_lock);
166 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); 160 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
167 161
@@ -201,9 +195,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
201#endif 195#endif
202 196
203 return 0; 197 return 0;
204
205out_free_security:
206 security_inode_free(inode);
207out: 198out:
208 return -ENOMEM; 199 return -ENOMEM;
209} 200}
@@ -235,7 +226,6 @@ static struct inode *alloc_inode(struct super_block *sb)
235void __destroy_inode(struct inode *inode) 226void __destroy_inode(struct inode *inode)
236{ 227{
237 BUG_ON(inode_has_buffers(inode)); 228 BUG_ON(inode_has_buffers(inode));
238 ima_inode_free(inode);
239 security_inode_free(inode); 229 security_inode_free(inode);
240 fsnotify_inode_delete(inode); 230 fsnotify_inode_delete(inode);
241#ifdef CONFIG_FS_POSIX_ACL 231#ifdef CONFIG_FS_POSIX_ACL
diff --git a/fs/jffs2/read.c b/fs/jffs2/read.c
index cfe05c1966a5..3f39be1b0455 100644
--- a/fs/jffs2/read.c
+++ b/fs/jffs2/read.c
@@ -164,12 +164,15 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
164 164
165 /* XXX FIXME: Where a single physical node actually shows up in two 165 /* XXX FIXME: Where a single physical node actually shows up in two
166 frags, we read it twice. Don't do that. */ 166 frags, we read it twice. Don't do that. */
167 /* Now we're pointing at the first frag which overlaps our page */ 167 /* Now we're pointing at the first frag which overlaps our page
168 * (or perhaps is before it, if we've been asked to read off the
169 * end of the file). */
168 while(offset < end) { 170 while(offset < end) {
169 D2(printk(KERN_DEBUG "jffs2_read_inode_range: offset %d, end %d\n", offset, end)); 171 D2(printk(KERN_DEBUG "jffs2_read_inode_range: offset %d, end %d\n", offset, end));
170 if (unlikely(!frag || frag->ofs > offset)) { 172 if (unlikely(!frag || frag->ofs > offset ||
173 frag->ofs + frag->size <= offset)) {
171 uint32_t holesize = end - offset; 174 uint32_t holesize = end - offset;
172 if (frag) { 175 if (frag && frag->ofs > offset) {
173 D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset)); 176 D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset));
174 holesize = min(holesize, frag->ofs - offset); 177 holesize = min(holesize, frag->ofs - offset);
175 } 178 }
diff --git a/fs/namespace.c b/fs/namespace.c
index bdc3cb4fd222..7d70d63ceb29 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1921,6 +1921,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
1921 if (data_page) 1921 if (data_page)
1922 ((char *)data_page)[PAGE_SIZE - 1] = 0; 1922 ((char *)data_page)[PAGE_SIZE - 1] = 0;
1923 1923
1924 /* ... and get the mountpoint */
1925 retval = kern_path(dir_name, LOOKUP_FOLLOW, &path);
1926 if (retval)
1927 return retval;
1928
1929 retval = security_sb_mount(dev_name, &path,
1930 type_page, flags, data_page);
1931 if (retval)
1932 goto dput_out;
1933
1924 /* Default to relatime unless overriden */ 1934 /* Default to relatime unless overriden */
1925 if (!(flags & MS_NOATIME)) 1935 if (!(flags & MS_NOATIME))
1926 mnt_flags |= MNT_RELATIME; 1936 mnt_flags |= MNT_RELATIME;
@@ -1945,16 +1955,6 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
1945 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | 1955 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
1946 MS_STRICTATIME); 1956 MS_STRICTATIME);
1947 1957
1948 /* ... and get the mountpoint */
1949 retval = kern_path(dir_name, LOOKUP_FOLLOW, &path);
1950 if (retval)
1951 return retval;
1952
1953 retval = security_sb_mount(dev_name, &path,
1954 type_page, flags, data_page);
1955 if (retval)
1956 goto dput_out;
1957
1958 if (flags & MS_REMOUNT) 1958 if (flags & MS_REMOUNT)
1959 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, 1959 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
1960 data_page); 1960 data_page);
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index 70fad69eb959..fa588006588d 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -359,17 +359,13 @@ int nfs_fscache_release_page(struct page *page, gfp_t gfp)
359 359
360 BUG_ON(!cookie); 360 BUG_ON(!cookie);
361 361
362 if (fscache_check_page_write(cookie, page)) {
363 if (!(gfp & __GFP_WAIT))
364 return 0;
365 fscache_wait_on_page_write(cookie, page);
366 }
367
368 if (PageFsCache(page)) { 362 if (PageFsCache(page)) {
369 dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n", 363 dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
370 cookie, page, nfsi); 364 cookie, page, nfsi);
371 365
372 fscache_uncache_page(cookie, page); 366 if (!fscache_maybe_release_page(cookie, page, gfp))
367 return 0;
368
373 nfs_add_fscache_stats(page->mapping->host, 369 nfs_add_fscache_stats(page->mapping->host,
374 NFSIOS_FSCACHE_PAGES_UNCACHED, 1); 370 NFSIOS_FSCACHE_PAGES_UNCACHED, 1);
375 } 371 }
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ff37454fa783..741a562177fc 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2767,7 +2767,7 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
2767 .pages = &page, 2767 .pages = &page,
2768 .pgbase = 0, 2768 .pgbase = 0,
2769 .count = count, 2769 .count = count,
2770 .bitmask = NFS_SERVER(dentry->d_inode)->cache_consistency_bitmask, 2770 .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
2771 }; 2771 };
2772 struct nfs4_readdir_res res; 2772 struct nfs4_readdir_res res;
2773 struct rpc_message msg = { 2773 struct rpc_message msg = {
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index edf926e1062f..d0a2ce1b4324 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -958,7 +958,7 @@ encode_entry(struct readdir_cd *ccd, const char *name, int namlen,
958 p1 = encode_entry_baggage(cd, p1, name, namlen, ino); 958 p1 = encode_entry_baggage(cd, p1, name, namlen, ino);
959 959
960 if (plus) 960 if (plus)
961 p = encode_entryplus_baggage(cd, p1, name, namlen); 961 p1 = encode_entryplus_baggage(cd, p1, name, namlen);
962 962
963 /* determine entry word length and lengths to go in pages */ 963 /* determine entry word length and lengths to go in pages */
964 num_entry_words = p1 - tmp; 964 num_entry_words = p1 - tmp;
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index 1c6cfb59128d..3f5d5d06f53c 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -871,7 +871,6 @@ int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
871 * exclusive with a new mount job. Though it doesn't cover 871 * exclusive with a new mount job. Though it doesn't cover
872 * umount, it's enough for the purpose. 872 * umount, it's enough for the purpose.
873 */ 873 */
874 mutex_lock(&nilfs->ns_mount_mutex);
875 if (nilfs_checkpoint_is_mounted(nilfs, cno, 1)) { 874 if (nilfs_checkpoint_is_mounted(nilfs, cno, 1)) {
876 /* Current implementation does not have to protect 875 /* Current implementation does not have to protect
877 plain read-only mounts since they are exclusive 876 plain read-only mounts since they are exclusive
@@ -880,7 +879,6 @@ int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
880 ret = -EBUSY; 879 ret = -EBUSY;
881 } else 880 } else
882 ret = nilfs_cpfile_clear_snapshot(cpfile, cno); 881 ret = nilfs_cpfile_clear_snapshot(cpfile, cno);
883 mutex_unlock(&nilfs->ns_mount_mutex);
884 return ret; 882 return ret;
885 case NILFS_SNAPSHOT: 883 case NILFS_SNAPSHOT:
886 return nilfs_cpfile_set_snapshot(cpfile, cno); 884 return nilfs_cpfile_set_snapshot(cpfile, cno);
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 5040220c3732..2a0a5a3ac134 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -664,7 +664,6 @@ int nilfs_load_inode_block(struct nilfs_sb_info *sbi, struct inode *inode,
664 int err; 664 int err;
665 665
666 spin_lock(&sbi->s_inode_lock); 666 spin_lock(&sbi->s_inode_lock);
667 /* Caller of this function MUST lock s_inode_lock */
668 if (ii->i_bh == NULL) { 667 if (ii->i_bh == NULL) {
669 spin_unlock(&sbi->s_inode_lock); 668 spin_unlock(&sbi->s_inode_lock);
670 err = nilfs_ifile_get_inode_block(sbi->s_ifile, inode->i_ino, 669 err = nilfs_ifile_get_inode_block(sbi->s_ifile, inode->i_ino,
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index d24057d58f17..f6af76042d80 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -99,7 +99,8 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
99static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp, 99static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
100 unsigned int cmd, void __user *argp) 100 unsigned int cmd, void __user *argp)
101{ 101{
102 struct inode *cpfile = NILFS_SB(inode->i_sb)->s_nilfs->ns_cpfile; 102 struct the_nilfs *nilfs = NILFS_SB(inode->i_sb)->s_nilfs;
103 struct inode *cpfile = nilfs->ns_cpfile;
103 struct nilfs_transaction_info ti; 104 struct nilfs_transaction_info ti;
104 struct nilfs_cpmode cpmode; 105 struct nilfs_cpmode cpmode;
105 int ret; 106 int ret;
@@ -109,14 +110,17 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
109 if (copy_from_user(&cpmode, argp, sizeof(cpmode))) 110 if (copy_from_user(&cpmode, argp, sizeof(cpmode)))
110 return -EFAULT; 111 return -EFAULT;
111 112
113 mutex_lock(&nilfs->ns_mount_mutex);
112 nilfs_transaction_begin(inode->i_sb, &ti, 0); 114 nilfs_transaction_begin(inode->i_sb, &ti, 0);
113 ret = nilfs_cpfile_change_cpmode( 115 ret = nilfs_cpfile_change_cpmode(
114 cpfile, cpmode.cm_cno, cpmode.cm_mode); 116 cpfile, cpmode.cm_cno, cpmode.cm_mode);
115 if (unlikely(ret < 0)) { 117 if (unlikely(ret < 0)) {
116 nilfs_transaction_abort(inode->i_sb); 118 nilfs_transaction_abort(inode->i_sb);
119 mutex_unlock(&nilfs->ns_mount_mutex);
117 return ret; 120 return ret;
118 } 121 }
119 nilfs_transaction_commit(inode->i_sb); /* never fails */ 122 nilfs_transaction_commit(inode->i_sb); /* never fails */
123 mutex_unlock(&nilfs->ns_mount_mutex);
120 return ret; 124 return ret;
121} 125}
122 126
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 89fc8ee1f5a5..de059f490586 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1712,7 +1712,8 @@ int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
1712 struct super_block *sb = inode->i_sb; 1712 struct super_block *sb = inode->i_sb;
1713 1713
1714 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) || 1714 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
1715 !(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)) 1715 !(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) ||
1716 OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
1716 return 0; 1717 return 0;
1717 1718
1718 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits; 1719 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index eae404602424..d963d8638709 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -35,12 +35,7 @@
35#include <linux/kref.h> 35#include <linux/kref.h>
36#include <linux/mutex.h> 36#include <linux/mutex.h>
37#include <linux/lockdep.h> 37#include <linux/lockdep.h>
38#ifndef CONFIG_OCFS2_COMPAT_JBD 38#include <linux/jbd2.h>
39# include <linux/jbd2.h>
40#else
41# include <linux/jbd.h>
42# include "ocfs2_jbd_compat.h"
43#endif
44 39
45/* For union ocfs2_dlm_lksb */ 40/* For union ocfs2_dlm_lksb */
46#include "stackglue.h" 41#include "stackglue.h"
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 60287fc56bcb..3a0df7a1b810 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -3743,6 +3743,9 @@ static int ocfs2_attach_refcount_tree(struct inode *inode,
3743 goto out; 3743 goto out;
3744 } 3744 }
3745 3745
3746 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
3747 goto attach_xattr;
3748
3746 ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh); 3749 ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh);
3747 3750
3748 size = i_size_read(inode); 3751 size = i_size_read(inode);
@@ -3769,6 +3772,7 @@ static int ocfs2_attach_refcount_tree(struct inode *inode,
3769 cpos += num_clusters; 3772 cpos += num_clusters;
3770 } 3773 }
3771 3774
3775attach_xattr:
3772 if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) { 3776 if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
3773 ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh, 3777 ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh,
3774 &ref_tree->rf_ci, 3778 &ref_tree->rf_ci,
@@ -3858,6 +3862,49 @@ out:
3858 return ret; 3862 return ret;
3859} 3863}
3860 3864
3865static int ocfs2_duplicate_inline_data(struct inode *s_inode,
3866 struct buffer_head *s_bh,
3867 struct inode *t_inode,
3868 struct buffer_head *t_bh)
3869{
3870 int ret;
3871 handle_t *handle;
3872 struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
3873 struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
3874 struct ocfs2_dinode *t_di = (struct ocfs2_dinode *)t_bh->b_data;
3875
3876 BUG_ON(!(OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
3877
3878 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
3879 if (IS_ERR(handle)) {
3880 ret = PTR_ERR(handle);
3881 mlog_errno(ret);
3882 goto out;
3883 }
3884
3885 ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
3886 OCFS2_JOURNAL_ACCESS_WRITE);
3887 if (ret) {
3888 mlog_errno(ret);
3889 goto out_commit;
3890 }
3891
3892 t_di->id2.i_data.id_count = s_di->id2.i_data.id_count;
3893 memcpy(t_di->id2.i_data.id_data, s_di->id2.i_data.id_data,
3894 le16_to_cpu(s_di->id2.i_data.id_count));
3895 spin_lock(&OCFS2_I(t_inode)->ip_lock);
3896 OCFS2_I(t_inode)->ip_dyn_features |= OCFS2_INLINE_DATA_FL;
3897 t_di->i_dyn_features = cpu_to_le16(OCFS2_I(t_inode)->ip_dyn_features);
3898 spin_unlock(&OCFS2_I(t_inode)->ip_lock);
3899
3900 ocfs2_journal_dirty(handle, t_bh);
3901
3902out_commit:
3903 ocfs2_commit_trans(osb, handle);
3904out:
3905 return ret;
3906}
3907
3861static int ocfs2_duplicate_extent_list(struct inode *s_inode, 3908static int ocfs2_duplicate_extent_list(struct inode *s_inode,
3862 struct inode *t_inode, 3909 struct inode *t_inode,
3863 struct buffer_head *t_bh, 3910 struct buffer_head *t_bh,
@@ -3997,6 +4044,14 @@ static int ocfs2_create_reflink_node(struct inode *s_inode,
3997 goto out; 4044 goto out;
3998 } 4045 }
3999 4046
4047 if (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
4048 ret = ocfs2_duplicate_inline_data(s_inode, s_bh,
4049 t_inode, t_bh);
4050 if (ret)
4051 mlog_errno(ret);
4052 goto out;
4053 }
4054
4000 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc), 4055 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
4001 1, &ref_tree, &ref_root_bh); 4056 1, &ref_tree, &ref_root_bh);
4002 if (ret) { 4057 if (ret) {
@@ -4013,10 +4068,6 @@ static int ocfs2_create_reflink_node(struct inode *s_inode,
4013 goto out_unlock_refcount; 4068 goto out_unlock_refcount;
4014 } 4069 }
4015 4070
4016 ret = ocfs2_complete_reflink(s_inode, s_bh, t_inode, t_bh, preserve);
4017 if (ret)
4018 mlog_errno(ret);
4019
4020out_unlock_refcount: 4071out_unlock_refcount:
4021 ocfs2_unlock_refcount_tree(osb, ref_tree, 1); 4072 ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
4022 brelse(ref_root_bh); 4073 brelse(ref_root_bh);
@@ -4068,9 +4119,17 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
4068 ret = ocfs2_reflink_xattrs(inode, old_bh, 4119 ret = ocfs2_reflink_xattrs(inode, old_bh,
4069 new_inode, new_bh, 4120 new_inode, new_bh,
4070 preserve); 4121 preserve);
4071 if (ret) 4122 if (ret) {
4072 mlog_errno(ret); 4123 mlog_errno(ret);
4124 goto inode_unlock;
4125 }
4073 } 4126 }
4127
4128 ret = ocfs2_complete_reflink(inode, old_bh,
4129 new_inode, new_bh, preserve);
4130 if (ret)
4131 mlog_errno(ret);
4132
4074inode_unlock: 4133inode_unlock:
4075 ocfs2_inode_unlock(new_inode, 1); 4134 ocfs2_inode_unlock(new_inode, 1);
4076 brelse(new_bh); 4135 brelse(new_bh);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index c0e48aeebb1c..14f47d2bfe02 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -773,18 +773,20 @@ static int ocfs2_sb_probe(struct super_block *sb,
773 if (tmpstat < 0) { 773 if (tmpstat < 0) {
774 status = tmpstat; 774 status = tmpstat;
775 mlog_errno(status); 775 mlog_errno(status);
776 goto bail; 776 break;
777 } 777 }
778 di = (struct ocfs2_dinode *) (*bh)->b_data; 778 di = (struct ocfs2_dinode *) (*bh)->b_data;
779 memset(stats, 0, sizeof(struct ocfs2_blockcheck_stats)); 779 memset(stats, 0, sizeof(struct ocfs2_blockcheck_stats));
780 spin_lock_init(&stats->b_lock); 780 spin_lock_init(&stats->b_lock);
781 status = ocfs2_verify_volume(di, *bh, blksize, stats); 781 tmpstat = ocfs2_verify_volume(di, *bh, blksize, stats);
782 if (status >= 0) 782 if (tmpstat < 0) {
783 goto bail; 783 brelse(*bh);
784 brelse(*bh); 784 *bh = NULL;
785 *bh = NULL; 785 }
786 if (status != -EAGAIN) 786 if (tmpstat != -EAGAIN) {
787 status = tmpstat;
787 break; 788 break;
789 }
788 } 790 }
789 791
790bail: 792bail:
@@ -1645,6 +1647,10 @@ static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
1645 buf->f_bavail = buf->f_bfree; 1647 buf->f_bavail = buf->f_bfree;
1646 buf->f_files = numbits; 1648 buf->f_files = numbits;
1647 buf->f_ffree = freebits; 1649 buf->f_ffree = freebits;
1650 buf->f_fsid.val[0] = crc32_le(0, osb->uuid_str, OCFS2_VOL_UUID_LEN)
1651 & 0xFFFFFFFFUL;
1652 buf->f_fsid.val[1] = crc32_le(0, osb->uuid_str + OCFS2_VOL_UUID_LEN,
1653 OCFS2_VOL_UUID_LEN) & 0xFFFFFFFFUL;
1648 1654
1649 brelse(bh); 1655 brelse(bh);
1650 1656
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index b6284f235d2f..c61369342a27 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -53,11 +53,6 @@
53#include <linux/highmem.h> 53#include <linux/highmem.h>
54#include <linux/buffer_head.h> 54#include <linux/buffer_head.h>
55#include <linux/rbtree.h> 55#include <linux/rbtree.h>
56#ifndef CONFIG_OCFS2_COMPAT_JBD
57# include <linux/jbd2.h>
58#else
59# include <linux/jbd.h>
60#endif
61 56
62#define MLOG_MASK_PREFIX ML_UPTODATE 57#define MLOG_MASK_PREFIX ML_UPTODATE
63 58
diff --git a/fs/open.c b/fs/open.c
index 4f01e06227c6..b4b31d277f3a 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -587,6 +587,9 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
587 error = -EPERM; 587 error = -EPERM;
588 if (!capable(CAP_SYS_CHROOT)) 588 if (!capable(CAP_SYS_CHROOT))
589 goto dput_and_out; 589 goto dput_and_out;
590 error = security_path_chroot(&path);
591 if (error)
592 goto dput_and_out;
590 593
591 set_fs_root(current->fs, &path); 594 set_fs_root(current->fs, &path);
592 error = 0; 595 error = 0;
@@ -617,11 +620,15 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
617 if (err) 620 if (err)
618 goto out_putf; 621 goto out_putf;
619 mutex_lock(&inode->i_mutex); 622 mutex_lock(&inode->i_mutex);
623 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
624 if (err)
625 goto out_unlock;
620 if (mode == (mode_t) -1) 626 if (mode == (mode_t) -1)
621 mode = inode->i_mode; 627 mode = inode->i_mode;
622 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); 628 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
623 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; 629 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
624 err = notify_change(dentry, &newattrs); 630 err = notify_change(dentry, &newattrs);
631out_unlock:
625 mutex_unlock(&inode->i_mutex); 632 mutex_unlock(&inode->i_mutex);
626 mnt_drop_write(file->f_path.mnt); 633 mnt_drop_write(file->f_path.mnt);
627out_putf: 634out_putf:
@@ -646,11 +653,15 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
646 if (error) 653 if (error)
647 goto dput_and_out; 654 goto dput_and_out;
648 mutex_lock(&inode->i_mutex); 655 mutex_lock(&inode->i_mutex);
656 error = security_path_chmod(path.dentry, path.mnt, mode);
657 if (error)
658 goto out_unlock;
649 if (mode == (mode_t) -1) 659 if (mode == (mode_t) -1)
650 mode = inode->i_mode; 660 mode = inode->i_mode;
651 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); 661 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
652 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; 662 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
653 error = notify_change(path.dentry, &newattrs); 663 error = notify_change(path.dentry, &newattrs);
664out_unlock:
654 mutex_unlock(&inode->i_mutex); 665 mutex_unlock(&inode->i_mutex);
655 mnt_drop_write(path.mnt); 666 mnt_drop_write(path.mnt);
656dput_and_out: 667dput_and_out:
@@ -664,9 +675,9 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
664 return sys_fchmodat(AT_FDCWD, filename, mode); 675 return sys_fchmodat(AT_FDCWD, filename, mode);
665} 676}
666 677
667static int chown_common(struct dentry * dentry, uid_t user, gid_t group) 678static int chown_common(struct path *path, uid_t user, gid_t group)
668{ 679{
669 struct inode *inode = dentry->d_inode; 680 struct inode *inode = path->dentry->d_inode;
670 int error; 681 int error;
671 struct iattr newattrs; 682 struct iattr newattrs;
672 683
@@ -683,7 +694,9 @@ static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
683 newattrs.ia_valid |= 694 newattrs.ia_valid |=
684 ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV; 695 ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
685 mutex_lock(&inode->i_mutex); 696 mutex_lock(&inode->i_mutex);
686 error = notify_change(dentry, &newattrs); 697 error = security_path_chown(path, user, group);
698 if (!error)
699 error = notify_change(path->dentry, &newattrs);
687 mutex_unlock(&inode->i_mutex); 700 mutex_unlock(&inode->i_mutex);
688 701
689 return error; 702 return error;
@@ -700,7 +713,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
700 error = mnt_want_write(path.mnt); 713 error = mnt_want_write(path.mnt);
701 if (error) 714 if (error)
702 goto out_release; 715 goto out_release;
703 error = chown_common(path.dentry, user, group); 716 error = chown_common(&path, user, group);
704 mnt_drop_write(path.mnt); 717 mnt_drop_write(path.mnt);
705out_release: 718out_release:
706 path_put(&path); 719 path_put(&path);
@@ -725,7 +738,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
725 error = mnt_want_write(path.mnt); 738 error = mnt_want_write(path.mnt);
726 if (error) 739 if (error)
727 goto out_release; 740 goto out_release;
728 error = chown_common(path.dentry, user, group); 741 error = chown_common(&path, user, group);
729 mnt_drop_write(path.mnt); 742 mnt_drop_write(path.mnt);
730out_release: 743out_release:
731 path_put(&path); 744 path_put(&path);
@@ -744,7 +757,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
744 error = mnt_want_write(path.mnt); 757 error = mnt_want_write(path.mnt);
745 if (error) 758 if (error)
746 goto out_release; 759 goto out_release;
747 error = chown_common(path.dentry, user, group); 760 error = chown_common(&path, user, group);
748 mnt_drop_write(path.mnt); 761 mnt_drop_write(path.mnt);
749out_release: 762out_release:
750 path_put(&path); 763 path_put(&path);
@@ -767,7 +780,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
767 goto out_fput; 780 goto out_fput;
768 dentry = file->f_path.dentry; 781 dentry = file->f_path.dentry;
769 audit_inode(NULL, dentry); 782 audit_inode(NULL, dentry);
770 error = chown_common(dentry, user, group); 783 error = chown_common(&file->f_path, user, group);
771 mnt_drop_write(file->f_path.mnt); 784 mnt_drop_write(file->f_path.mnt);
772out_fput: 785out_fput:
773 fput(file); 786 fput(file);
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 07f77a7945c3..822c2d506518 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -571,7 +571,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
571 rsslim, 571 rsslim,
572 mm ? mm->start_code : 0, 572 mm ? mm->start_code : 0,
573 mm ? mm->end_code : 0, 573 mm ? mm->end_code : 0,
574 (permitted) ? task->stack_start : 0, 574 (permitted && mm) ? task->stack_start : 0,
575 esp, 575 esp,
576 eip, 576 eip,
577 /* The signal information here is obsolete. 577 /* The signal information here is obsolete.
diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig
index 8047e01ef46b..353e78a9ebee 100644
--- a/fs/quota/Kconfig
+++ b/fs/quota/Kconfig
@@ -17,7 +17,7 @@ config QUOTA
17 17
18config QUOTA_NETLINK_INTERFACE 18config QUOTA_NETLINK_INTERFACE
19 bool "Report quota messages through netlink interface" 19 bool "Report quota messages through netlink interface"
20 depends on QUOTA && NET 20 depends on QUOTACTL && NET
21 help 21 help
22 If you say Y here, quota warnings (about exceeding softlimit, reaching 22 If you say Y here, quota warnings (about exceeding softlimit, reaching
23 hardlimit, etc.) will be reported through netlink interface. If unsure, 23 hardlimit, etc.) will be reported through netlink interface. If unsure,
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 39b49c42a7ed..9b6ad908dcb2 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -77,10 +77,6 @@
77#include <linux/capability.h> 77#include <linux/capability.h>
78#include <linux/quotaops.h> 78#include <linux/quotaops.h>
79#include <linux/writeback.h> /* for inode_lock, oddly enough.. */ 79#include <linux/writeback.h> /* for inode_lock, oddly enough.. */
80#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
81#include <net/netlink.h>
82#include <net/genetlink.h>
83#endif
84 80
85#include <asm/uaccess.h> 81#include <asm/uaccess.h>
86 82
@@ -1071,73 +1067,6 @@ static void print_warning(struct dquot *dquot, const int warntype)
1071} 1067}
1072#endif 1068#endif
1073 1069
1074#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
1075
1076/* Netlink family structure for quota */
1077static struct genl_family quota_genl_family = {
1078 .id = GENL_ID_GENERATE,
1079 .hdrsize = 0,
1080 .name = "VFS_DQUOT",
1081 .version = 1,
1082 .maxattr = QUOTA_NL_A_MAX,
1083};
1084
1085/* Send warning to userspace about user which exceeded quota */
1086static void send_warning(const struct dquot *dquot, const char warntype)
1087{
1088 static atomic_t seq;
1089 struct sk_buff *skb;
1090 void *msg_head;
1091 int ret;
1092 int msg_size = 4 * nla_total_size(sizeof(u32)) +
1093 2 * nla_total_size(sizeof(u64));
1094
1095 /* We have to allocate using GFP_NOFS as we are called from a
1096 * filesystem performing write and thus further recursion into
1097 * the fs to free some data could cause deadlocks. */
1098 skb = genlmsg_new(msg_size, GFP_NOFS);
1099 if (!skb) {
1100 printk(KERN_ERR
1101 "VFS: Not enough memory to send quota warning.\n");
1102 return;
1103 }
1104 msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
1105 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
1106 if (!msg_head) {
1107 printk(KERN_ERR
1108 "VFS: Cannot store netlink header in quota warning.\n");
1109 goto err_out;
1110 }
1111 ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, dquot->dq_type);
1112 if (ret)
1113 goto attr_err_out;
1114 ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, dquot->dq_id);
1115 if (ret)
1116 goto attr_err_out;
1117 ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
1118 if (ret)
1119 goto attr_err_out;
1120 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR,
1121 MAJOR(dquot->dq_sb->s_dev));
1122 if (ret)
1123 goto attr_err_out;
1124 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR,
1125 MINOR(dquot->dq_sb->s_dev));
1126 if (ret)
1127 goto attr_err_out;
1128 ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
1129 if (ret)
1130 goto attr_err_out;
1131 genlmsg_end(skb, msg_head);
1132
1133 genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
1134 return;
1135attr_err_out:
1136 printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
1137err_out:
1138 kfree_skb(skb);
1139}
1140#endif
1141/* 1070/*
1142 * Write warnings to the console and send warning messages over netlink. 1071 * Write warnings to the console and send warning messages over netlink.
1143 * 1072 *
@@ -1145,18 +1074,20 @@ err_out:
1145 */ 1074 */
1146static void flush_warnings(struct dquot *const *dquots, char *warntype) 1075static void flush_warnings(struct dquot *const *dquots, char *warntype)
1147{ 1076{
1077 struct dquot *dq;
1148 int i; 1078 int i;
1149 1079
1150 for (i = 0; i < MAXQUOTAS; i++) 1080 for (i = 0; i < MAXQUOTAS; i++) {
1151 if (dquots[i] && warntype[i] != QUOTA_NL_NOWARN && 1081 dq = dquots[i];
1152 !warning_issued(dquots[i], warntype[i])) { 1082 if (dq && warntype[i] != QUOTA_NL_NOWARN &&
1083 !warning_issued(dq, warntype[i])) {
1153#ifdef CONFIG_PRINT_QUOTA_WARNING 1084#ifdef CONFIG_PRINT_QUOTA_WARNING
1154 print_warning(dquots[i], warntype[i]); 1085 print_warning(dq, warntype[i]);
1155#endif
1156#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
1157 send_warning(dquots[i], warntype[i]);
1158#endif 1086#endif
1087 quota_send_warning(dq->dq_type, dq->dq_id,
1088 dq->dq_sb->s_dev, warntype[i]);
1159 } 1089 }
1090 }
1160} 1091}
1161 1092
1162static int ignore_hardlimit(struct dquot *dquot) 1093static int ignore_hardlimit(struct dquot *dquot)
@@ -2607,12 +2538,6 @@ static int __init dquot_init(void)
2607 2538
2608 register_shrinker(&dqcache_shrinker); 2539 register_shrinker(&dqcache_shrinker);
2609 2540
2610#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
2611 if (genl_register_family(&quota_genl_family) != 0)
2612 printk(KERN_ERR
2613 "VFS: Failed to create quota netlink interface.\n");
2614#endif
2615
2616 return 0; 2541 return 0;
2617} 2542}
2618module_init(dquot_init); 2543module_init(dquot_init);
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 95c5b42384b2..ee91e2756950 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -18,6 +18,8 @@
18#include <linux/capability.h> 18#include <linux/capability.h>
19#include <linux/quotaops.h> 19#include <linux/quotaops.h>
20#include <linux/types.h> 20#include <linux/types.h>
21#include <net/netlink.h>
22#include <net/genetlink.h>
21 23
22/* Check validity of generic quotactl commands */ 24/* Check validity of generic quotactl commands */
23static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, 25static int generic_quotactl_valid(struct super_block *sb, int type, int cmd,
@@ -525,3 +527,94 @@ asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special,
525 return ret; 527 return ret;
526} 528}
527#endif 529#endif
530
531
532#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
533
534/* Netlink family structure for quota */
535static struct genl_family quota_genl_family = {
536 .id = GENL_ID_GENERATE,
537 .hdrsize = 0,
538 .name = "VFS_DQUOT",
539 .version = 1,
540 .maxattr = QUOTA_NL_A_MAX,
541};
542
543/**
544 * quota_send_warning - Send warning to userspace about exceeded quota
545 * @type: The quota type: USRQQUOTA, GRPQUOTA,...
546 * @id: The user or group id of the quota that was exceeded
547 * @dev: The device on which the fs is mounted (sb->s_dev)
548 * @warntype: The type of the warning: QUOTA_NL_...
549 *
550 * This can be used by filesystems (including those which don't use
551 * dquot) to send a message to userspace relating to quota limits.
552 *
553 */
554
555void quota_send_warning(short type, unsigned int id, dev_t dev,
556 const char warntype)
557{
558 static atomic_t seq;
559 struct sk_buff *skb;
560 void *msg_head;
561 int ret;
562 int msg_size = 4 * nla_total_size(sizeof(u32)) +
563 2 * nla_total_size(sizeof(u64));
564
565 /* We have to allocate using GFP_NOFS as we are called from a
566 * filesystem performing write and thus further recursion into
567 * the fs to free some data could cause deadlocks. */
568 skb = genlmsg_new(msg_size, GFP_NOFS);
569 if (!skb) {
570 printk(KERN_ERR
571 "VFS: Not enough memory to send quota warning.\n");
572 return;
573 }
574 msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
575 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
576 if (!msg_head) {
577 printk(KERN_ERR
578 "VFS: Cannot store netlink header in quota warning.\n");
579 goto err_out;
580 }
581 ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, type);
582 if (ret)
583 goto attr_err_out;
584 ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, id);
585 if (ret)
586 goto attr_err_out;
587 ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
588 if (ret)
589 goto attr_err_out;
590 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev));
591 if (ret)
592 goto attr_err_out;
593 ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev));
594 if (ret)
595 goto attr_err_out;
596 ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid());
597 if (ret)
598 goto attr_err_out;
599 genlmsg_end(skb, msg_head);
600
601 genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
602 return;
603attr_err_out:
604 printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
605err_out:
606 kfree_skb(skb);
607}
608EXPORT_SYMBOL(quota_send_warning);
609
610static int __init quota_init(void)
611{
612 if (genl_register_family(&quota_genl_family) != 0)
613 printk(KERN_ERR
614 "VFS: Failed to create quota netlink interface.\n");
615 return 0;
616};
617
618module_init(quota_init);
619#endif
620
diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
index c6ad7c7e3ee9..05ac0fe9c4d3 100644
--- a/fs/xattr_acl.c
+++ b/fs/xattr_acl.c
@@ -36,7 +36,7 @@ posix_acl_from_xattr(const void *value, size_t size)
36 if (count == 0) 36 if (count == 0)
37 return NULL; 37 return NULL;
38 38
39 acl = posix_acl_alloc(count, GFP_KERNEL); 39 acl = posix_acl_alloc(count, GFP_NOFS);
40 if (!acl) 40 if (!acl)
41 return ERR_PTR(-ENOMEM); 41 return ERR_PTR(-ENOMEM);
42 acl_e = acl->a_entries; 42 acl_e = acl->a_entries;
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 1099395d7d6c..fb17f8226b09 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1980,7 +1980,7 @@ xlog_recover_do_reg_buffer(
1980 "XFS: NULL dquot in %s.", __func__); 1980 "XFS: NULL dquot in %s.", __func__);
1981 goto next; 1981 goto next;
1982 } 1982 }
1983 if (item->ri_buf[i].i_len < sizeof(xfs_dqblk_t)) { 1983 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
1984 cmn_err(CE_ALERT, 1984 cmn_err(CE_ALERT,
1985 "XFS: dquot too small (%d) in %s.", 1985 "XFS: dquot too small (%d) in %s.",
1986 item->ri_buf[i].i_len, __func__); 1986 item->ri_buf[i].i_len, __func__);
@@ -2635,7 +2635,7 @@ xlog_recover_do_dquot_trans(
2635 "XFS: NULL dquot in %s.", __func__); 2635 "XFS: NULL dquot in %s.", __func__);
2636 return XFS_ERROR(EIO); 2636 return XFS_ERROR(EIO);
2637 } 2637 }
2638 if (item->ri_buf[1].i_len < sizeof(xfs_dqblk_t)) { 2638 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
2639 cmn_err(CE_ALERT, 2639 cmn_err(CE_ALERT,
2640 "XFS: dquot too small (%d) in %s.", 2640 "XFS: dquot too small (%d) in %s.",
2641 item->ri_buf[1].i_len, __func__); 2641 item->ri_buf[1].i_len, __func__);
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index f31271c30de9..2ffc570679be 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -467,6 +467,7 @@ xfs_trans_ail_update(
467{ 467{
468 xfs_log_item_t *dlip = NULL; 468 xfs_log_item_t *dlip = NULL;
469 xfs_log_item_t *mlip; /* ptr to minimum lip */ 469 xfs_log_item_t *mlip; /* ptr to minimum lip */
470 xfs_lsn_t tail_lsn;
470 471
471 mlip = xfs_ail_min(ailp); 472 mlip = xfs_ail_min(ailp);
472 473
@@ -483,8 +484,16 @@ xfs_trans_ail_update(
483 484
484 if (mlip == dlip) { 485 if (mlip == dlip) {
485 mlip = xfs_ail_min(ailp); 486 mlip = xfs_ail_min(ailp);
487 /*
488 * It is not safe to access mlip after the AIL lock is
489 * dropped, so we must get a copy of li_lsn before we do
490 * so. This is especially important on 32-bit platforms
491 * where accessing and updating 64-bit values like li_lsn
492 * is not atomic.
493 */
494 tail_lsn = mlip->li_lsn;
486 spin_unlock(&ailp->xa_lock); 495 spin_unlock(&ailp->xa_lock);
487 xfs_log_move_tail(ailp->xa_mount, mlip->li_lsn); 496 xfs_log_move_tail(ailp->xa_mount, tail_lsn);
488 } else { 497 } else {
489 spin_unlock(&ailp->xa_lock); 498 spin_unlock(&ailp->xa_lock);
490 } 499 }
@@ -514,6 +523,7 @@ xfs_trans_ail_delete(
514{ 523{
515 xfs_log_item_t *dlip; 524 xfs_log_item_t *dlip;
516 xfs_log_item_t *mlip; 525 xfs_log_item_t *mlip;
526 xfs_lsn_t tail_lsn;
517 527
518 if (lip->li_flags & XFS_LI_IN_AIL) { 528 if (lip->li_flags & XFS_LI_IN_AIL) {
519 mlip = xfs_ail_min(ailp); 529 mlip = xfs_ail_min(ailp);
@@ -527,9 +537,16 @@ xfs_trans_ail_delete(
527 537
528 if (mlip == dlip) { 538 if (mlip == dlip) {
529 mlip = xfs_ail_min(ailp); 539 mlip = xfs_ail_min(ailp);
540 /*
541 * It is not safe to access mlip after the AIL lock
542 * is dropped, so we must get a copy of li_lsn
543 * before we do so. This is especially important
544 * on 32-bit platforms where accessing and updating
545 * 64-bit values like li_lsn is not atomic.
546 */
547 tail_lsn = mlip ? mlip->li_lsn : 0;
530 spin_unlock(&ailp->xa_lock); 548 spin_unlock(&ailp->xa_lock);
531 xfs_log_move_tail(ailp->xa_mount, 549 xfs_log_move_tail(ailp->xa_mount, tail_lsn);
532 (mlip ? mlip->li_lsn : 0));
533 } else { 550 } else {
534 spin_unlock(&ailp->xa_lock); 551 spin_unlock(&ailp->xa_lock);
535 } 552 }
diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h
index cd2d7896e34b..495dc8af4044 100644
--- a/include/asm-generic/fcntl.h
+++ b/include/asm-generic/fcntl.h
@@ -89,7 +89,7 @@
89 89
90#define F_OWNER_TID 0 90#define F_OWNER_TID 0
91#define F_OWNER_PID 1 91#define F_OWNER_PID 1
92#define F_OWNER_GID 2 92#define F_OWNER_PGRP 2
93 93
94struct f_owner_ex { 94struct f_owner_ex {
95 int type; 95 int type;
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 1feed71551c9..5a5385749e16 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -330,6 +330,7 @@ unifdef-y += scc.h
330unifdef-y += sched.h 330unifdef-y += sched.h
331unifdef-y += screen_info.h 331unifdef-y += screen_info.h
332unifdef-y += sdla.h 332unifdef-y += sdla.h
333unifdef-y += securebits.h
333unifdef-y += selinux_netlink.h 334unifdef-y += selinux_netlink.h
334unifdef-y += sem.h 335unifdef-y += sem.h
335unifdef-y += serial_core.h 336unifdef-y += serial_core.h
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index dd97fb8408a8..b10ec49ee2dd 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -53,6 +53,7 @@ extern void free_bootmem_node(pg_data_t *pgdat,
53 unsigned long addr, 53 unsigned long addr,
54 unsigned long size); 54 unsigned long size);
55extern void free_bootmem(unsigned long addr, unsigned long size); 55extern void free_bootmem(unsigned long addr, unsigned long size);
56extern void free_bootmem_late(unsigned long addr, unsigned long size);
56 57
57/* 58/*
58 * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE, 59 * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE,
diff --git a/include/linux/capability.h b/include/linux/capability.h
index c8f2a5f70ed5..39e5ff512fbe 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -92,9 +92,7 @@ struct vfs_cap_data {
92#define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3 92#define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3
93#define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3 93#define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3
94 94
95#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
96extern int file_caps_enabled; 95extern int file_caps_enabled;
97#endif
98 96
99typedef struct kernel_cap_struct { 97typedef struct kernel_cap_struct {
100 __u32 cap[_KERNEL_CAPABILITY_U32S]; 98 __u32 cap[_KERNEL_CAPABILITY_U32S];
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index a3ed7cb8ca34..73dcf804bc94 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -79,6 +79,7 @@
79#define noinline __attribute__((noinline)) 79#define noinline __attribute__((noinline))
80#define __attribute_const__ __attribute__((__const__)) 80#define __attribute_const__ __attribute__((__const__))
81#define __maybe_unused __attribute__((unused)) 81#define __maybe_unused __attribute__((unused))
82#define __always_unused __attribute__((unused))
82 83
83#define __gcc_header(x) #x 84#define __gcc_header(x) #x
84#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h) 85#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index 450fa597c94d..ab3af40a53c6 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -36,4 +36,18 @@
36 the kernel context */ 36 the kernel context */
37#define __cold __attribute__((__cold__)) 37#define __cold __attribute__((__cold__))
38 38
39
40#if __GNUC_MINOR__ >= 5
41/*
42 * Mark a position in code as unreachable. This can be used to
43 * suppress control flow warnings after asm blocks that transfer
44 * control elsewhere.
45 *
46 * Early snapshots of gcc 4.5 don't support this and we can't detect
47 * this in the preprocessor, but we can live with this because they're
48 * unreleased. Really, we need to have autoconf for the kernel.
49 */
50#define unreachable() __builtin_unreachable()
51#endif
52
39#endif 53#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 04fb5135b4e1..acbd654cc850 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -144,6 +144,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
144# define barrier() __memory_barrier() 144# define barrier() __memory_barrier()
145#endif 145#endif
146 146
147/* Unreachable code */
148#ifndef unreachable
149# define unreachable() do { } while (1)
150#endif
151
147#ifndef RELOC_HIDE 152#ifndef RELOC_HIDE
148# define RELOC_HIDE(ptr, off) \ 153# define RELOC_HIDE(ptr, off) \
149 ({ unsigned long __ptr; \ 154 ({ unsigned long __ptr; \
@@ -213,6 +218,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
213# define __maybe_unused /* unimplemented */ 218# define __maybe_unused /* unimplemented */
214#endif 219#endif
215 220
221#ifndef __always_unused
222# define __always_unused /* unimplemented */
223#endif
224
216#ifndef noinline 225#ifndef noinline
217#define noinline 226#define noinline
218#endif 227#endif
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 4a2b162c256a..5de4c9e5856d 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -208,16 +208,9 @@ struct dmar_atsr_unit {
208 u8 include_all:1; /* include all ports */ 208 u8 include_all:1; /* include all ports */
209}; 209};
210 210
211/* Intel DMAR initialization functions */
212extern int intel_iommu_init(void); 211extern int intel_iommu_init(void);
213#else 212#else /* !CONFIG_DMAR: */
214static inline int intel_iommu_init(void) 213static inline int intel_iommu_init(void) { return -ENODEV; }
215{ 214#endif /* CONFIG_DMAR */
216#ifdef CONFIG_INTR_REMAP 215
217 return dmar_dev_scope_init();
218#else
219 return -ENODEV;
220#endif
221}
222#endif /* !CONFIG_DMAR */
223#endif /* __DMAR_H__ */ 216#endif /* __DMAR_H__ */
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 84d3532dd3ea..7be0c6fbe880 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -91,6 +91,8 @@ struct fscache_operation {
91#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ 91#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
92#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ 92#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
93#define FSCACHE_OP_DEAD 6 /* op is now dead */ 93#define FSCACHE_OP_DEAD 6 /* op is now dead */
94#define FSCACHE_OP_DEC_READ_CNT 7 /* decrement object->n_reads on destruction */
95#define FSCACHE_OP_KEEP_FLAGS 0xc0 /* flags to keep when repurposing an op */
94 96
95 atomic_t usage; 97 atomic_t usage;
96 unsigned debug_id; /* debugging ID */ 98 unsigned debug_id; /* debugging ID */
@@ -102,6 +104,16 @@ struct fscache_operation {
102 104
103 /* operation releaser */ 105 /* operation releaser */
104 fscache_operation_release_t release; 106 fscache_operation_release_t release;
107
108#ifdef CONFIG_SLOW_WORK_PROC
109 const char *name; /* operation name */
110 const char *state; /* operation state */
111#define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0)
112#define fscache_set_op_state(OP, S) do { (OP)->state = (S); } while(0)
113#else
114#define fscache_set_op_name(OP, N) do { } while(0)
115#define fscache_set_op_state(OP, S) do { } while(0)
116#endif
105}; 117};
106 118
107extern atomic_t fscache_op_debug_id; 119extern atomic_t fscache_op_debug_id;
@@ -125,6 +137,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
125 op->debug_id = atomic_inc_return(&fscache_op_debug_id); 137 op->debug_id = atomic_inc_return(&fscache_op_debug_id);
126 op->release = release; 138 op->release = release;
127 INIT_LIST_HEAD(&op->pend_link); 139 INIT_LIST_HEAD(&op->pend_link);
140 fscache_set_op_state(op, "Init");
128} 141}
129 142
130/** 143/**
@@ -221,8 +234,10 @@ struct fscache_cache_ops {
221 struct fscache_object *(*alloc_object)(struct fscache_cache *cache, 234 struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
222 struct fscache_cookie *cookie); 235 struct fscache_cookie *cookie);
223 236
224 /* look up the object for a cookie */ 237 /* look up the object for a cookie
225 void (*lookup_object)(struct fscache_object *object); 238 * - return -ETIMEDOUT to be requeued
239 */
240 int (*lookup_object)(struct fscache_object *object);
226 241
227 /* finished looking up */ 242 /* finished looking up */
228 void (*lookup_complete)(struct fscache_object *object); 243 void (*lookup_complete)(struct fscache_object *object);
@@ -297,12 +312,14 @@ struct fscache_cookie {
297 atomic_t usage; /* number of users of this cookie */ 312 atomic_t usage; /* number of users of this cookie */
298 atomic_t n_children; /* number of children of this cookie */ 313 atomic_t n_children; /* number of children of this cookie */
299 spinlock_t lock; 314 spinlock_t lock;
315 spinlock_t stores_lock; /* lock on page store tree */
300 struct hlist_head backing_objects; /* object(s) backing this file/index */ 316 struct hlist_head backing_objects; /* object(s) backing this file/index */
301 const struct fscache_cookie_def *def; /* definition */ 317 const struct fscache_cookie_def *def; /* definition */
302 struct fscache_cookie *parent; /* parent of this entry */ 318 struct fscache_cookie *parent; /* parent of this entry */
303 void *netfs_data; /* back pointer to netfs */ 319 void *netfs_data; /* back pointer to netfs */
304 struct radix_tree_root stores; /* pages to be stored on this cookie */ 320 struct radix_tree_root stores; /* pages to be stored on this cookie */
305#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */ 321#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
322#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */
306 323
307 unsigned long flags; 324 unsigned long flags;
308#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */ 325#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
@@ -337,6 +354,7 @@ struct fscache_object {
337 FSCACHE_OBJECT_RECYCLING, /* retiring object */ 354 FSCACHE_OBJECT_RECYCLING, /* retiring object */
338 FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */ 355 FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */
339 FSCACHE_OBJECT_DEAD, /* object is now dead */ 356 FSCACHE_OBJECT_DEAD, /* object is now dead */
357 FSCACHE_OBJECT__NSTATES
340 } state; 358 } state;
341 359
342 int debug_id; /* debugging ID */ 360 int debug_id; /* debugging ID */
@@ -345,6 +363,7 @@ struct fscache_object {
345 int n_obj_ops; /* number of object ops outstanding on object */ 363 int n_obj_ops; /* number of object ops outstanding on object */
346 int n_in_progress; /* number of ops in progress */ 364 int n_in_progress; /* number of ops in progress */
347 int n_exclusive; /* number of exclusive ops queued */ 365 int n_exclusive; /* number of exclusive ops queued */
366 atomic_t n_reads; /* number of read ops in progress */
348 spinlock_t lock; /* state and operations lock */ 367 spinlock_t lock; /* state and operations lock */
349 368
350 unsigned long lookup_jif; /* time at which lookup started */ 369 unsigned long lookup_jif; /* time at which lookup started */
@@ -358,6 +377,7 @@ struct fscache_object {
358#define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */ 377#define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */
359#define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */ 378#define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */
360#define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */ 379#define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */
380#define FSCACHE_OBJECT_EVENTS_MASK 0x7f /* mask of all events*/
361 381
362 unsigned long flags; 382 unsigned long flags;
363#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */ 383#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
@@ -373,7 +393,11 @@ struct fscache_object {
373 struct list_head dependents; /* FIFO of dependent objects */ 393 struct list_head dependents; /* FIFO of dependent objects */
374 struct list_head dep_link; /* link in parent's dependents list */ 394 struct list_head dep_link; /* link in parent's dependents list */
375 struct list_head pending_ops; /* unstarted operations on this object */ 395 struct list_head pending_ops; /* unstarted operations on this object */
396#ifdef CONFIG_FSCACHE_OBJECT_LIST
397 struct rb_node objlist_link; /* link in global object list */
398#endif
376 pgoff_t store_limit; /* current storage limit */ 399 pgoff_t store_limit; /* current storage limit */
400 loff_t store_limit_l; /* current storage limit */
377}; 401};
378 402
379extern const char *fscache_object_states[]; 403extern const char *fscache_object_states[];
@@ -383,6 +407,10 @@ extern const char *fscache_object_states[];
383 (obj)->state >= FSCACHE_OBJECT_AVAILABLE && \ 407 (obj)->state >= FSCACHE_OBJECT_AVAILABLE && \
384 (obj)->state < FSCACHE_OBJECT_DYING) 408 (obj)->state < FSCACHE_OBJECT_DYING)
385 409
410#define fscache_object_is_dead(obj) \
411 (test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \
412 (obj)->state >= FSCACHE_OBJECT_DYING)
413
386extern const struct slow_work_ops fscache_object_slow_work_ops; 414extern const struct slow_work_ops fscache_object_slow_work_ops;
387 415
388/** 416/**
@@ -414,6 +442,7 @@ void fscache_object_init(struct fscache_object *object,
414 object->events = object->event_mask = 0; 442 object->events = object->event_mask = 0;
415 object->flags = 0; 443 object->flags = 0;
416 object->store_limit = 0; 444 object->store_limit = 0;
445 object->store_limit_l = 0;
417 object->cache = cache; 446 object->cache = cache;
418 object->cookie = cookie; 447 object->cookie = cookie;
419 object->parent = NULL; 448 object->parent = NULL;
@@ -422,6 +451,12 @@ void fscache_object_init(struct fscache_object *object,
422extern void fscache_object_lookup_negative(struct fscache_object *object); 451extern void fscache_object_lookup_negative(struct fscache_object *object);
423extern void fscache_obtained_object(struct fscache_object *object); 452extern void fscache_obtained_object(struct fscache_object *object);
424 453
454#ifdef CONFIG_FSCACHE_OBJECT_LIST
455extern void fscache_object_destroy(struct fscache_object *object);
456#else
457#define fscache_object_destroy(object) do {} while(0)
458#endif
459
425/** 460/**
426 * fscache_object_destroyed - Note destruction of an object in a cache 461 * fscache_object_destroyed - Note destruction of an object in a cache
427 * @cache: The cache from which the object came 462 * @cache: The cache from which the object came
@@ -460,6 +495,7 @@ static inline void fscache_object_lookup_error(struct fscache_object *object)
460static inline 495static inline
461void fscache_set_store_limit(struct fscache_object *object, loff_t i_size) 496void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
462{ 497{
498 object->store_limit_l = i_size;
463 object->store_limit = i_size >> PAGE_SHIFT; 499 object->store_limit = i_size >> PAGE_SHIFT;
464 if (i_size & ~PAGE_MASK) 500 if (i_size & ~PAGE_MASK)
465 object->store_limit++; 501 object->store_limit++;
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 6d8ee466e0a0..595ce49288b7 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -202,6 +202,8 @@ extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t);
202extern void __fscache_uncache_page(struct fscache_cookie *, struct page *); 202extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
203extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *); 203extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
204extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *); 204extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
205extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *,
206 gfp_t);
205 207
206/** 208/**
207 * fscache_register_netfs - Register a filesystem as desiring caching services 209 * fscache_register_netfs - Register a filesystem as desiring caching services
@@ -615,4 +617,29 @@ void fscache_wait_on_page_write(struct fscache_cookie *cookie,
615 __fscache_wait_on_page_write(cookie, page); 617 __fscache_wait_on_page_write(cookie, page);
616} 618}
617 619
620/**
621 * fscache_maybe_release_page - Consider releasing a page, cancelling a store
622 * @cookie: The cookie representing the cache object
623 * @page: The netfs page that is being cached.
624 * @gfp: The gfp flags passed to releasepage()
625 *
626 * Consider releasing a page for the vmscan algorithm, on behalf of the netfs's
627 * releasepage() call. A storage request on the page may cancelled if it is
628 * not currently being processed.
629 *
630 * The function returns true if the page no longer has a storage request on it,
631 * and false if a storage request is left in place. If true is returned, the
632 * page will have been passed to fscache_uncache_page(). If false is returned
633 * the page cannot be freed yet.
634 */
635static inline
636bool fscache_maybe_release_page(struct fscache_cookie *cookie,
637 struct page *page,
638 gfp_t gfp)
639{
640 if (fscache_cookie_valid(cookie) && PageFsCache(page))
641 return __fscache_maybe_release_page(cookie, page, gfp);
642 return false;
643}
644
618#endif /* _LINUX_FSCACHE_H */ 645#endif /* _LINUX_FSCACHE_H */
diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h
index b80c88dedbbb..81f90a59cda6 100644
--- a/include/linux/gfs2_ondisk.h
+++ b/include/linux/gfs2_ondisk.h
@@ -81,7 +81,11 @@ struct gfs2_meta_header {
81 __be32 mh_type; 81 __be32 mh_type;
82 __be64 __pad0; /* Was generation number in gfs1 */ 82 __be64 __pad0; /* Was generation number in gfs1 */
83 __be32 mh_format; 83 __be32 mh_format;
84 __be32 __pad1; /* Was incarnation number in gfs1 */ 84 /* This union is to keep userspace happy */
85 union {
86 __be32 mh_jid; /* Was incarnation number in gfs1 */
87 __be32 __pad1;
88 };
85}; 89};
86 90
87/* 91/*
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 6d527ee82b2b..d5b387669dab 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -139,10 +139,34 @@ static inline void account_system_vtime(struct task_struct *tsk)
139#endif 139#endif
140 140
141#if defined(CONFIG_NO_HZ) 141#if defined(CONFIG_NO_HZ)
142#if defined(CONFIG_TINY_RCU)
143extern void rcu_enter_nohz(void);
144extern void rcu_exit_nohz(void);
145
146static inline void rcu_irq_enter(void)
147{
148 rcu_exit_nohz();
149}
150
151static inline void rcu_irq_exit(void)
152{
153 rcu_enter_nohz();
154}
155
156static inline void rcu_nmi_enter(void)
157{
158}
159
160static inline void rcu_nmi_exit(void)
161{
162}
163
164#else
142extern void rcu_irq_enter(void); 165extern void rcu_irq_enter(void);
143extern void rcu_irq_exit(void); 166extern void rcu_irq_exit(void);
144extern void rcu_nmi_enter(void); 167extern void rcu_nmi_enter(void);
145extern void rcu_nmi_exit(void); 168extern void rcu_nmi_exit(void);
169#endif
146#else 170#else
147# define rcu_irq_enter() do { } while (0) 171# define rcu_irq_enter() do { } while (0)
148# define rcu_irq_exit() do { } while (0) 172# define rcu_irq_exit() do { } while (0)
diff --git a/include/linux/i2c-pnx.h b/include/linux/i2c-pnx.h
index f13255e06406..9eb07bbc6522 100644
--- a/include/linux/i2c-pnx.h
+++ b/include/linux/i2c-pnx.h
@@ -21,7 +21,7 @@ struct i2c_pnx_mif {
21 int mode; /* Interface mode */ 21 int mode; /* Interface mode */
22 struct completion complete; /* I/O completion */ 22 struct completion complete; /* I/O completion */
23 struct timer_list timer; /* Timeout */ 23 struct timer_list timer; /* Timeout */
24 char * buf; /* Data buffer */ 24 u8 * buf; /* Data buffer */
25 int len; /* Length of data buffer */ 25 int len; /* Length of data buffer */
26}; 26};
27 27
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 21a6f5d9af22..8d10aa7fd4c9 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -83,16 +83,12 @@ extern struct group_info init_groups;
83#define INIT_IDS 83#define INIT_IDS
84#endif 84#endif
85 85
86#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
87/* 86/*
88 * Because of the reduced scope of CAP_SETPCAP when filesystem 87 * Because of the reduced scope of CAP_SETPCAP when filesystem
89 * capabilities are in effect, it is safe to allow CAP_SETPCAP to 88 * capabilities are in effect, it is safe to allow CAP_SETPCAP to
90 * be available in the default configuration. 89 * be available in the default configuration.
91 */ 90 */
92# define CAP_INIT_BSET CAP_FULL_SET 91# define CAP_INIT_BSET CAP_FULL_SET
93#else
94# define CAP_INIT_BSET CAP_INIT_EFF_SET
95#endif
96 92
97#ifdef CONFIG_TREE_PREEMPT_RCU 93#ifdef CONFIG_TREE_PREEMPT_RCU
98#define INIT_TASK_RCU_PREEMPT(tsk) \ 94#define INIT_TASK_RCU_PREEMPT(tsk) \
diff --git a/include/linux/input.h b/include/linux/input.h
index 0ccfc30cd40f..c2b1a7d244d9 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -1377,6 +1377,10 @@ extern struct class input_class;
1377 * methods; erase() is optional. set_gain() and set_autocenter() need 1377 * methods; erase() is optional. set_gain() and set_autocenter() need
1378 * only be implemented if driver sets up FF_GAIN and FF_AUTOCENTER 1378 * only be implemented if driver sets up FF_GAIN and FF_AUTOCENTER
1379 * bits. 1379 * bits.
1380 *
1381 * Note that playback(), set_gain() and set_autocenter() are called with
1382 * dev->event_lock spinlock held and interrupts off and thus may not
1383 * sleep.
1380 */ 1384 */
1381struct ff_device { 1385struct ff_device {
1382 int (*upload)(struct input_dev *dev, struct ff_effect *effect, 1386 int (*upload)(struct input_dev *dev, struct ff_effect *effect,
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 7ca72b74eec7..75f3f00ac1e5 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -603,12 +603,6 @@ static inline void init_irq_proc(void)
603} 603}
604#endif 604#endif
605 605
606#if defined(CONFIG_GENERIC_HARDIRQS) && defined(CONFIG_DEBUG_SHIRQ)
607extern void debug_poll_all_shared_irqs(void);
608#else
609static inline void debug_poll_all_shared_irqs(void) { }
610#endif
611
612struct seq_file; 606struct seq_file;
613int show_interrupts(struct seq_file *p, void *v); 607int show_interrupts(struct seq_file *p, void *v);
614 608
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index b02a3f1d46a0..006bf45eae30 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -124,6 +124,6 @@
124 typecheck(unsigned long, flags); \ 124 typecheck(unsigned long, flags); \
125 raw_irqs_disabled_flags(flags); \ 125 raw_irqs_disabled_flags(flags); \
126}) 126})
127#endif /* CONFIG_X86 */ 127#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
128 128
129#endif 129#endif
diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h
index 4c218ee7587a..8687a7dc0632 100644
--- a/include/linux/isdn_ppp.h
+++ b/include/linux/isdn_ppp.h
@@ -157,7 +157,7 @@ typedef struct {
157 157
158typedef struct { 158typedef struct {
159 int mp_mrru; /* unused */ 159 int mp_mrru; /* unused */
160 struct sk_buff_head frags; /* fragments sl list */ 160 struct sk_buff * frags; /* fragments sl list -- use skb->next */
161 long frames; /* number of frames in the frame list */ 161 long frames; /* number of frames in the frame list */
162 unsigned int seq; /* last processed packet seq #: any packets 162 unsigned int seq; /* last processed packet seq #: any packets
163 * with smaller seq # will be dropped 163 * with smaller seq # will be dropped
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index f4e3184fa054..3fa4c590cf12 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -15,7 +15,6 @@
15#include <linux/bitops.h> 15#include <linux/bitops.h>
16#include <linux/log2.h> 16#include <linux/log2.h>
17#include <linux/typecheck.h> 17#include <linux/typecheck.h>
18#include <linux/ratelimit.h>
19#include <linux/dynamic_debug.h> 18#include <linux/dynamic_debug.h>
20#include <asm/byteorder.h> 19#include <asm/byteorder.h>
21#include <asm/bug.h> 20#include <asm/bug.h>
@@ -241,8 +240,8 @@ asmlinkage int vprintk(const char *fmt, va_list args)
241asmlinkage int printk(const char * fmt, ...) 240asmlinkage int printk(const char * fmt, ...)
242 __attribute__ ((format (printf, 1, 2))) __cold; 241 __attribute__ ((format (printf, 1, 2))) __cold;
243 242
244extern struct ratelimit_state printk_ratelimit_state; 243extern int __printk_ratelimit(const char *func);
245extern int printk_ratelimit(void); 244#define printk_ratelimit() __printk_ratelimit(__func__)
246extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, 245extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
247 unsigned int interval_msec); 246 unsigned int interval_msec);
248 247
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index 190c37854870..f78f83d7663f 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -26,14 +26,15 @@
26 26
27/* Auxiliary data to use in generating the audit record. */ 27/* Auxiliary data to use in generating the audit record. */
28struct common_audit_data { 28struct common_audit_data {
29 char type; 29 char type;
30#define LSM_AUDIT_DATA_FS 1 30#define LSM_AUDIT_DATA_FS 1
31#define LSM_AUDIT_DATA_NET 2 31#define LSM_AUDIT_DATA_NET 2
32#define LSM_AUDIT_DATA_CAP 3 32#define LSM_AUDIT_DATA_CAP 3
33#define LSM_AUDIT_DATA_IPC 4 33#define LSM_AUDIT_DATA_IPC 4
34#define LSM_AUDIT_DATA_TASK 5 34#define LSM_AUDIT_DATA_TASK 5
35#define LSM_AUDIT_DATA_KEY 6 35#define LSM_AUDIT_DATA_KEY 6
36#define LSM_AUDIT_NO_AUDIT 7 36#define LSM_AUDIT_NO_AUDIT 7
37#define LSM_AUDIT_DATA_KMOD 8
37 struct task_struct *tsk; 38 struct task_struct *tsk;
38 union { 39 union {
39 struct { 40 struct {
@@ -66,6 +67,7 @@ struct common_audit_data {
66 char *key_desc; 67 char *key_desc;
67 } key_struct; 68 } key_struct;
68#endif 69#endif
70 char *kmod_name;
69 } u; 71 } u;
70 /* this union contains LSM specific data */ 72 /* this union contains LSM specific data */
71 union { 73 union {
diff --git a/include/linux/mfd/wm831x/regulator.h b/include/linux/mfd/wm831x/regulator.h
index f95466343fb2..955d30fc6a27 100644
--- a/include/linux/mfd/wm831x/regulator.h
+++ b/include/linux/mfd/wm831x/regulator.h
@@ -1212,7 +1212,7 @@
1212#define WM831X_LDO1_OK_SHIFT 0 /* LDO1_OK */ 1212#define WM831X_LDO1_OK_SHIFT 0 /* LDO1_OK */
1213#define WM831X_LDO1_OK_WIDTH 1 /* LDO1_OK */ 1213#define WM831X_LDO1_OK_WIDTH 1 /* LDO1_OK */
1214 1214
1215#define WM831X_ISINK_MAX_ISEL 56 1215#define WM831X_ISINK_MAX_ISEL 55
1216extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL]; 1216extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1];
1217 1217
1218#endif 1218#endif
diff --git a/include/linux/net.h b/include/linux/net.h
index 529a0931711d..d7e26e30c8c2 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -358,6 +358,7 @@ static const struct proto_ops name##_ops = { \
358 358
359#ifdef CONFIG_SYSCTL 359#ifdef CONFIG_SYSCTL
360#include <linux/sysctl.h> 360#include <linux/sysctl.h>
361#include <linux/ratelimit.h>
361extern struct ratelimit_state net_ratelimit_state; 362extern struct ratelimit_state net_ratelimit_state;
362#endif 363#endif
363 364
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index 79fec6af3f9f..ce520402e840 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -425,15 +425,6 @@ struct nilfs_dat_entry {
425}; 425};
426 426
427/** 427/**
428 * struct nilfs_dat_group_desc - block group descriptor
429 * @dg_nfrees: number of free virtual block numbers in block group
430 */
431struct nilfs_dat_group_desc {
432 __le32 dg_nfrees;
433};
434
435
436/**
437 * struct nilfs_snapshot_list - snapshot list 428 * struct nilfs_snapshot_list - snapshot list
438 * @ssl_next: next checkpoint number on snapshot list 429 * @ssl_next: next checkpoint number on snapshot list
439 * @ssl_prev: previous checkpoint number on snapshot list 430 * @ssl_prev: previous checkpoint number on snapshot list
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 84cf1f3b7838..daecca3c8300 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1633,6 +1633,8 @@
1633#define PCI_DEVICE_ID_O2_6730 0x673a 1633#define PCI_DEVICE_ID_O2_6730 0x673a
1634#define PCI_DEVICE_ID_O2_6832 0x6832 1634#define PCI_DEVICE_ID_O2_6832 0x6832
1635#define PCI_DEVICE_ID_O2_6836 0x6836 1635#define PCI_DEVICE_ID_O2_6836 0x6836
1636#define PCI_DEVICE_ID_O2_6812 0x6872
1637#define PCI_DEVICE_ID_O2_6933 0x6933
1636 1638
1637#define PCI_VENDOR_ID_3DFX 0x121a 1639#define PCI_VENDOR_ID_3DFX 0x121a
1638#define PCI_DEVICE_ID_3DFX_VOODOO 0x0001 1640#define PCI_DEVICE_ID_3DFX_VOODOO 0x0001
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 065a3652a3ea..67608161df6b 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -147,6 +147,20 @@ static inline void forget_cached_acl(struct inode *inode, int type)
147 if (old != ACL_NOT_CACHED) 147 if (old != ACL_NOT_CACHED)
148 posix_acl_release(old); 148 posix_acl_release(old);
149} 149}
150
151static inline void forget_all_cached_acls(struct inode *inode)
152{
153 struct posix_acl *old_access, *old_default;
154 spin_lock(&inode->i_lock);
155 old_access = inode->i_acl;
156 old_default = inode->i_default_acl;
157 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
158 spin_unlock(&inode->i_lock);
159 if (old_access != ACL_NOT_CACHED)
160 posix_acl_release(old_access);
161 if (old_default != ACL_NOT_CACHED)
162 posix_acl_release(old_default);
163}
150#endif 164#endif
151 165
152static inline void cache_no_acl(struct inode *inode) 166static inline void cache_no_acl(struct inode *inode)
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 78c48895b12a..ce9a9b2e5cd4 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -376,6 +376,17 @@ static inline unsigned int dquot_generic_flag(unsigned int flags, int type)
376 return flags >> _DQUOT_STATE_FLAGS; 376 return flags >> _DQUOT_STATE_FLAGS;
377} 377}
378 378
379#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
380extern void quota_send_warning(short type, unsigned int id, dev_t dev,
381 const char warntype);
382#else
383static inline void quota_send_warning(short type, unsigned int id, dev_t dev,
384 const char warntype)
385{
386 return;
387}
388#endif /* CONFIG_QUOTA_NETLINK_INTERFACE */
389
379struct quota_info { 390struct quota_info {
380 unsigned int flags; /* Flags for diskquotas on this device */ 391 unsigned int flags; /* Flags for diskquotas on this device */
381 struct mutex dqio_mutex; /* lock device while I/O in progress */ 392 struct mutex dqio_mutex; /* lock device while I/O in progress */
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 00044b856453..668cf1bef030 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -1,20 +1,31 @@
1#ifndef _LINUX_RATELIMIT_H 1#ifndef _LINUX_RATELIMIT_H
2#define _LINUX_RATELIMIT_H 2#define _LINUX_RATELIMIT_H
3
3#include <linux/param.h> 4#include <linux/param.h>
5#include <linux/spinlock_types.h>
4 6
5#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) 7#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ)
6#define DEFAULT_RATELIMIT_BURST 10 8#define DEFAULT_RATELIMIT_BURST 10
7 9
8struct ratelimit_state { 10struct ratelimit_state {
9 int interval; 11 spinlock_t lock; /* protect the state */
10 int burst; 12
11 int printed; 13 int interval;
12 int missed; 14 int burst;
13 unsigned long begin; 15 int printed;
16 int missed;
17 unsigned long begin;
14}; 18};
15 19
16#define DEFINE_RATELIMIT_STATE(name, interval, burst) \ 20#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
17 struct ratelimit_state name = {interval, burst,} 21 \
22 struct ratelimit_state name = { \
23 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
24 .interval = interval_init, \
25 .burst = burst_init, \
26 }
27
28extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
29#define __ratelimit(state) ___ratelimit(state, __func__)
18 30
19extern int __ratelimit(struct ratelimit_state *rs); 31#endif /* _LINUX_RATELIMIT_H */
20#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 3ebd0b7bcb08..24440f4bf476 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -52,11 +52,6 @@ struct rcu_head {
52}; 52};
53 53
54/* Exported common interfaces */ 54/* Exported common interfaces */
55#ifdef CONFIG_TREE_PREEMPT_RCU
56extern void synchronize_rcu(void);
57#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
58#define synchronize_rcu synchronize_sched
59#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
60extern void synchronize_rcu_bh(void); 55extern void synchronize_rcu_bh(void);
61extern void synchronize_sched(void); 56extern void synchronize_sched(void);
62extern void rcu_barrier(void); 57extern void rcu_barrier(void);
@@ -67,12 +62,11 @@ extern int sched_expedited_torture_stats(char *page);
67 62
68/* Internal to kernel */ 63/* Internal to kernel */
69extern void rcu_init(void); 64extern void rcu_init(void);
70extern void rcu_scheduler_starting(void);
71extern int rcu_needs_cpu(int cpu);
72extern int rcu_scheduler_active;
73 65
74#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) 66#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
75#include <linux/rcutree.h> 67#include <linux/rcutree.h>
68#elif defined(CONFIG_TINY_RCU)
69#include <linux/rcutiny.h>
76#else 70#else
77#error "Unknown RCU implementation specified to kernel configuration" 71#error "Unknown RCU implementation specified to kernel configuration"
78#endif 72#endif
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
new file mode 100644
index 000000000000..c4ba9a78721e
--- /dev/null
+++ b/include/linux/rcutiny.h
@@ -0,0 +1,104 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
23 * Documentation/RCU
24 */
25#ifndef __LINUX_TINY_H
26#define __LINUX_TINY_H
27
28#include <linux/cache.h>
29
30void rcu_sched_qs(int cpu);
31void rcu_bh_qs(int cpu);
32
33#define __rcu_read_lock() preempt_disable()
34#define __rcu_read_unlock() preempt_enable()
35#define __rcu_read_lock_bh() local_bh_disable()
36#define __rcu_read_unlock_bh() local_bh_enable()
37#define call_rcu_sched call_rcu
38
39#define rcu_init_sched() do { } while (0)
40extern void rcu_check_callbacks(int cpu, int user);
41
42static inline int rcu_needs_cpu(int cpu)
43{
44 return 0;
45}
46
47/*
48 * Return the number of grace periods.
49 */
50static inline long rcu_batches_completed(void)
51{
52 return 0;
53}
54
55/*
56 * Return the number of bottom-half grace periods.
57 */
58static inline long rcu_batches_completed_bh(void)
59{
60 return 0;
61}
62
63extern int rcu_expedited_torture_stats(char *page);
64
65#define synchronize_rcu synchronize_sched
66
67static inline void synchronize_rcu_expedited(void)
68{
69 synchronize_sched();
70}
71
72static inline void synchronize_rcu_bh_expedited(void)
73{
74 synchronize_sched();
75}
76
77struct notifier_block;
78
79#ifdef CONFIG_NO_HZ
80
81extern void rcu_enter_nohz(void);
82extern void rcu_exit_nohz(void);
83
84#else /* #ifdef CONFIG_NO_HZ */
85
86static inline void rcu_enter_nohz(void)
87{
88}
89
90static inline void rcu_exit_nohz(void)
91{
92}
93
94#endif /* #else #ifdef CONFIG_NO_HZ */
95
96static inline void rcu_scheduler_starting(void)
97{
98}
99
100static inline void exit_rcu(void)
101{
102}
103
104#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 9642c6bcb399..c93eee5911b0 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -34,15 +34,15 @@ struct notifier_block;
34 34
35extern void rcu_sched_qs(int cpu); 35extern void rcu_sched_qs(int cpu);
36extern void rcu_bh_qs(int cpu); 36extern void rcu_bh_qs(int cpu);
37extern int rcu_cpu_notify(struct notifier_block *self,
38 unsigned long action, void *hcpu);
39extern int rcu_needs_cpu(int cpu); 37extern int rcu_needs_cpu(int cpu);
38extern void rcu_scheduler_starting(void);
40extern int rcu_expedited_torture_stats(char *page); 39extern int rcu_expedited_torture_stats(char *page);
41 40
42#ifdef CONFIG_TREE_PREEMPT_RCU 41#ifdef CONFIG_TREE_PREEMPT_RCU
43 42
44extern void __rcu_read_lock(void); 43extern void __rcu_read_lock(void);
45extern void __rcu_read_unlock(void); 44extern void __rcu_read_unlock(void);
45extern void synchronize_rcu(void);
46extern void exit_rcu(void); 46extern void exit_rcu(void);
47 47
48#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 48#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
@@ -57,7 +57,7 @@ static inline void __rcu_read_unlock(void)
57 preempt_enable(); 57 preempt_enable();
58} 58}
59 59
60#define __synchronize_sched() synchronize_rcu() 60#define synchronize_rcu synchronize_sched
61 61
62static inline void exit_rcu(void) 62static inline void exit_rcu(void)
63{ 63{
@@ -83,7 +83,6 @@ static inline void synchronize_rcu_bh_expedited(void)
83 synchronize_sched_expedited(); 83 synchronize_sched_expedited();
84} 84}
85 85
86extern void __rcu_init(void);
87extern void rcu_check_callbacks(int cpu, int user); 86extern void rcu_check_callbacks(int cpu, int user);
88 87
89extern long rcu_batches_completed(void); 88extern long rcu_batches_completed(void);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 75e6e60bf583..882dc48163b4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1421,17 +1421,17 @@ struct task_struct {
1421#endif 1421#endif
1422#ifdef CONFIG_TRACE_IRQFLAGS 1422#ifdef CONFIG_TRACE_IRQFLAGS
1423 unsigned int irq_events; 1423 unsigned int irq_events;
1424 int hardirqs_enabled;
1425 unsigned long hardirq_enable_ip; 1424 unsigned long hardirq_enable_ip;
1426 unsigned int hardirq_enable_event;
1427 unsigned long hardirq_disable_ip; 1425 unsigned long hardirq_disable_ip;
1426 unsigned int hardirq_enable_event;
1428 unsigned int hardirq_disable_event; 1427 unsigned int hardirq_disable_event;
1429 int softirqs_enabled; 1428 int hardirqs_enabled;
1429 int hardirq_context;
1430 unsigned long softirq_disable_ip; 1430 unsigned long softirq_disable_ip;
1431 unsigned int softirq_disable_event;
1432 unsigned long softirq_enable_ip; 1431 unsigned long softirq_enable_ip;
1432 unsigned int softirq_disable_event;
1433 unsigned int softirq_enable_event; 1433 unsigned int softirq_enable_event;
1434 int hardirq_context; 1434 int softirqs_enabled;
1435 int softirq_context; 1435 int softirq_context;
1436#endif 1436#endif
1437#ifdef CONFIG_LOCKDEP 1437#ifdef CONFIG_LOCKDEP
@@ -2086,11 +2086,18 @@ static inline int is_si_special(const struct siginfo *info)
2086 return info <= SEND_SIG_FORCED; 2086 return info <= SEND_SIG_FORCED;
2087} 2087}
2088 2088
2089/* True if we are on the alternate signal stack. */ 2089/*
2090 2090 * True if we are on the alternate signal stack.
2091 */
2091static inline int on_sig_stack(unsigned long sp) 2092static inline int on_sig_stack(unsigned long sp)
2092{ 2093{
2093 return (sp - current->sas_ss_sp < current->sas_ss_size); 2094#ifdef CONFIG_STACK_GROWSUP
2095 return sp >= current->sas_ss_sp &&
2096 sp - current->sas_ss_sp < current->sas_ss_size;
2097#else
2098 return sp > current->sas_ss_sp &&
2099 sp - current->sas_ss_sp <= current->sas_ss_size;
2100#endif
2094} 2101}
2095 2102
2096static inline int sas_ss_flags(unsigned long sp) 2103static inline int sas_ss_flags(unsigned long sp)
diff --git a/include/linux/securebits.h b/include/linux/securebits.h
index d2c5ed845bcc..33406174cbe8 100644
--- a/include/linux/securebits.h
+++ b/include/linux/securebits.h
@@ -1,6 +1,15 @@
1#ifndef _LINUX_SECUREBITS_H 1#ifndef _LINUX_SECUREBITS_H
2#define _LINUX_SECUREBITS_H 1 2#define _LINUX_SECUREBITS_H 1
3 3
4/* Each securesetting is implemented using two bits. One bit specifies
5 whether the setting is on or off. The other bit specify whether the
6 setting is locked or not. A setting which is locked cannot be
7 changed from user-level. */
8#define issecure_mask(X) (1 << (X))
9#ifdef __KERNEL__
10#define issecure(X) (issecure_mask(X) & current_cred_xxx(securebits))
11#endif
12
4#define SECUREBITS_DEFAULT 0x00000000 13#define SECUREBITS_DEFAULT 0x00000000
5 14
6/* When set UID 0 has no special privileges. When unset, we support 15/* When set UID 0 has no special privileges. When unset, we support
@@ -12,6 +21,9 @@
12#define SECURE_NOROOT 0 21#define SECURE_NOROOT 0
13#define SECURE_NOROOT_LOCKED 1 /* make bit-0 immutable */ 22#define SECURE_NOROOT_LOCKED 1 /* make bit-0 immutable */
14 23
24#define SECBIT_NOROOT (issecure_mask(SECURE_NOROOT))
25#define SECBIT_NOROOT_LOCKED (issecure_mask(SECURE_NOROOT_LOCKED))
26
15/* When set, setuid to/from uid 0 does not trigger capability-"fixup". 27/* When set, setuid to/from uid 0 does not trigger capability-"fixup".
16 When unset, to provide compatiblility with old programs relying on 28 When unset, to provide compatiblility with old programs relying on
17 set*uid to gain/lose privilege, transitions to/from uid 0 cause 29 set*uid to gain/lose privilege, transitions to/from uid 0 cause
@@ -19,6 +31,10 @@
19#define SECURE_NO_SETUID_FIXUP 2 31#define SECURE_NO_SETUID_FIXUP 2
20#define SECURE_NO_SETUID_FIXUP_LOCKED 3 /* make bit-2 immutable */ 32#define SECURE_NO_SETUID_FIXUP_LOCKED 3 /* make bit-2 immutable */
21 33
34#define SECBIT_NO_SETUID_FIXUP (issecure_mask(SECURE_NO_SETUID_FIXUP))
35#define SECBIT_NO_SETUID_FIXUP_LOCKED \
36 (issecure_mask(SECURE_NO_SETUID_FIXUP_LOCKED))
37
22/* When set, a process can retain its capabilities even after 38/* When set, a process can retain its capabilities even after
23 transitioning to a non-root user (the set-uid fixup suppressed by 39 transitioning to a non-root user (the set-uid fixup suppressed by
24 bit 2). Bit-4 is cleared when a process calls exec(); setting both 40 bit 2). Bit-4 is cleared when a process calls exec(); setting both
@@ -27,12 +43,8 @@
27#define SECURE_KEEP_CAPS 4 43#define SECURE_KEEP_CAPS 4
28#define SECURE_KEEP_CAPS_LOCKED 5 /* make bit-4 immutable */ 44#define SECURE_KEEP_CAPS_LOCKED 5 /* make bit-4 immutable */
29 45
30/* Each securesetting is implemented using two bits. One bit specifies 46#define SECBIT_KEEP_CAPS (issecure_mask(SECURE_KEEP_CAPS))
31 whether the setting is on or off. The other bit specify whether the 47#define SECBIT_KEEP_CAPS_LOCKED (issecure_mask(SECURE_KEEP_CAPS_LOCKED))
32 setting is locked or not. A setting which is locked cannot be
33 changed from user-level. */
34#define issecure_mask(X) (1 << (X))
35#define issecure(X) (issecure_mask(X) & current_cred_xxx(securebits))
36 48
37#define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \ 49#define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \
38 issecure_mask(SECURE_NO_SETUID_FIXUP) | \ 50 issecure_mask(SECURE_NO_SETUID_FIXUP) | \
diff --git a/include/linux/security.h b/include/linux/security.h
index 239e40d0450b..466cbadbd1ef 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -447,6 +447,22 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
447 * @new_dir contains the path structure for parent of the new link. 447 * @new_dir contains the path structure for parent of the new link.
448 * @new_dentry contains the dentry structure of the new link. 448 * @new_dentry contains the dentry structure of the new link.
449 * Return 0 if permission is granted. 449 * Return 0 if permission is granted.
450 * @path_chmod:
451 * Check for permission to change DAC's permission of a file or directory.
452 * @dentry contains the dentry structure.
453 * @mnt contains the vfsmnt structure.
454 * @mode contains DAC's mode.
455 * Return 0 if permission is granted.
456 * @path_chown:
457 * Check for permission to change owner/group of a file or directory.
458 * @path contains the path structure.
459 * @uid contains new owner's ID.
460 * @gid contains new group's ID.
461 * Return 0 if permission is granted.
462 * @path_chroot:
463 * Check for permission to change root directory.
464 * @path contains the path structure.
465 * Return 0 if permission is granted.
450 * @inode_readlink: 466 * @inode_readlink:
451 * Check the permission to read the symbolic link. 467 * Check the permission to read the symbolic link.
452 * @dentry contains the dentry structure for the file link. 468 * @dentry contains the dentry structure for the file link.
@@ -690,6 +706,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
690 * @kernel_module_request: 706 * @kernel_module_request:
691 * Ability to trigger the kernel to automatically upcall to userspace for 707 * Ability to trigger the kernel to automatically upcall to userspace for
692 * userspace to load a kernel module with the given name. 708 * userspace to load a kernel module with the given name.
709 * @kmod_name name of the module requested by the kernel
693 * Return 0 if successful. 710 * Return 0 if successful.
694 * @task_setuid: 711 * @task_setuid:
695 * Check permission before setting one or more of the user identity 712 * Check permission before setting one or more of the user identity
@@ -1488,6 +1505,10 @@ struct security_operations {
1488 struct dentry *new_dentry); 1505 struct dentry *new_dentry);
1489 int (*path_rename) (struct path *old_dir, struct dentry *old_dentry, 1506 int (*path_rename) (struct path *old_dir, struct dentry *old_dentry,
1490 struct path *new_dir, struct dentry *new_dentry); 1507 struct path *new_dir, struct dentry *new_dentry);
1508 int (*path_chmod) (struct dentry *dentry, struct vfsmount *mnt,
1509 mode_t mode);
1510 int (*path_chown) (struct path *path, uid_t uid, gid_t gid);
1511 int (*path_chroot) (struct path *path);
1491#endif 1512#endif
1492 1513
1493 int (*inode_alloc_security) (struct inode *inode); 1514 int (*inode_alloc_security) (struct inode *inode);
@@ -1557,7 +1578,7 @@ struct security_operations {
1557 void (*cred_transfer)(struct cred *new, const struct cred *old); 1578 void (*cred_transfer)(struct cred *new, const struct cred *old);
1558 int (*kernel_act_as)(struct cred *new, u32 secid); 1579 int (*kernel_act_as)(struct cred *new, u32 secid);
1559 int (*kernel_create_files_as)(struct cred *new, struct inode *inode); 1580 int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
1560 int (*kernel_module_request)(void); 1581 int (*kernel_module_request)(char *kmod_name);
1561 int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags); 1582 int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags);
1562 int (*task_fix_setuid) (struct cred *new, const struct cred *old, 1583 int (*task_fix_setuid) (struct cred *new, const struct cred *old,
1563 int flags); 1584 int flags);
@@ -1822,7 +1843,7 @@ void security_commit_creds(struct cred *new, const struct cred *old);
1822void security_transfer_creds(struct cred *new, const struct cred *old); 1843void security_transfer_creds(struct cred *new, const struct cred *old);
1823int security_kernel_act_as(struct cred *new, u32 secid); 1844int security_kernel_act_as(struct cred *new, u32 secid);
1824int security_kernel_create_files_as(struct cred *new, struct inode *inode); 1845int security_kernel_create_files_as(struct cred *new, struct inode *inode);
1825int security_kernel_module_request(void); 1846int security_kernel_module_request(char *kmod_name);
1826int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags); 1847int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags);
1827int security_task_fix_setuid(struct cred *new, const struct cred *old, 1848int security_task_fix_setuid(struct cred *new, const struct cred *old,
1828 int flags); 1849 int flags);
@@ -2387,7 +2408,7 @@ static inline int security_kernel_create_files_as(struct cred *cred,
2387 return 0; 2408 return 0;
2388} 2409}
2389 2410
2390static inline int security_kernel_module_request(void) 2411static inline int security_kernel_module_request(char *kmod_name)
2391{ 2412{
2392 return 0; 2413 return 0;
2393} 2414}
@@ -2952,6 +2973,10 @@ int security_path_link(struct dentry *old_dentry, struct path *new_dir,
2952 struct dentry *new_dentry); 2973 struct dentry *new_dentry);
2953int security_path_rename(struct path *old_dir, struct dentry *old_dentry, 2974int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
2954 struct path *new_dir, struct dentry *new_dentry); 2975 struct path *new_dir, struct dentry *new_dentry);
2976int security_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
2977 mode_t mode);
2978int security_path_chown(struct path *path, uid_t uid, gid_t gid);
2979int security_path_chroot(struct path *path);
2955#else /* CONFIG_SECURITY_PATH */ 2980#else /* CONFIG_SECURITY_PATH */
2956static inline int security_path_unlink(struct path *dir, struct dentry *dentry) 2981static inline int security_path_unlink(struct path *dir, struct dentry *dentry)
2957{ 2982{
@@ -3001,6 +3026,23 @@ static inline int security_path_rename(struct path *old_dir,
3001{ 3026{
3002 return 0; 3027 return 0;
3003} 3028}
3029
3030static inline int security_path_chmod(struct dentry *dentry,
3031 struct vfsmount *mnt,
3032 mode_t mode)
3033{
3034 return 0;
3035}
3036
3037static inline int security_path_chown(struct path *path, uid_t uid, gid_t gid)
3038{
3039 return 0;
3040}
3041
3042static inline int security_path_chroot(struct path *path)
3043{
3044 return 0;
3045}
3004#endif /* CONFIG_SECURITY_PATH */ 3046#endif /* CONFIG_SECURITY_PATH */
3005 3047
3006#ifdef CONFIG_KEYS 3048#ifdef CONFIG_KEYS
diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h
index b65c8881f07a..13337bf6c3f5 100644
--- a/include/linux/slow-work.h
+++ b/include/linux/slow-work.h
@@ -17,13 +17,20 @@
17#ifdef CONFIG_SLOW_WORK 17#ifdef CONFIG_SLOW_WORK
18 18
19#include <linux/sysctl.h> 19#include <linux/sysctl.h>
20#include <linux/timer.h>
20 21
21struct slow_work; 22struct slow_work;
23#ifdef CONFIG_SLOW_WORK_DEBUG
24struct seq_file;
25#endif
22 26
23/* 27/*
24 * The operations used to support slow work items 28 * The operations used to support slow work items
25 */ 29 */
26struct slow_work_ops { 30struct slow_work_ops {
31 /* owner */
32 struct module *owner;
33
27 /* get a ref on a work item 34 /* get a ref on a work item
28 * - return 0 if successful, -ve if not 35 * - return 0 if successful, -ve if not
29 */ 36 */
@@ -34,6 +41,11 @@ struct slow_work_ops {
34 41
35 /* execute a work item */ 42 /* execute a work item */
36 void (*execute)(struct slow_work *work); 43 void (*execute)(struct slow_work *work);
44
45#ifdef CONFIG_SLOW_WORK_DEBUG
46 /* describe a work item for debugfs */
47 void (*desc)(struct slow_work *work, struct seq_file *m);
48#endif
37}; 49};
38 50
39/* 51/*
@@ -42,13 +54,24 @@ struct slow_work_ops {
42 * queued 54 * queued
43 */ 55 */
44struct slow_work { 56struct slow_work {
57 struct module *owner; /* the owning module */
45 unsigned long flags; 58 unsigned long flags;
46#define SLOW_WORK_PENDING 0 /* item pending (further) execution */ 59#define SLOW_WORK_PENDING 0 /* item pending (further) execution */
47#define SLOW_WORK_EXECUTING 1 /* item currently executing */ 60#define SLOW_WORK_EXECUTING 1 /* item currently executing */
48#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */ 61#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */
49#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */ 62#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */
63#define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */
64#define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */
50 const struct slow_work_ops *ops; /* operations table for this item */ 65 const struct slow_work_ops *ops; /* operations table for this item */
51 struct list_head link; /* link in queue */ 66 struct list_head link; /* link in queue */
67#ifdef CONFIG_SLOW_WORK_DEBUG
68 struct timespec mark; /* jiffies at which queued or exec begun */
69#endif
70};
71
72struct delayed_slow_work {
73 struct slow_work work;
74 struct timer_list timer;
52}; 75};
53 76
54/** 77/**
@@ -67,6 +90,20 @@ static inline void slow_work_init(struct slow_work *work,
67} 90}
68 91
69/** 92/**
93 * slow_work_init - Initialise a delayed slow work item
94 * @work: The work item to initialise
95 * @ops: The operations to use to handle the slow work item
96 *
97 * Initialise a delayed slow work item.
98 */
99static inline void delayed_slow_work_init(struct delayed_slow_work *dwork,
100 const struct slow_work_ops *ops)
101{
102 init_timer(&dwork->timer);
103 slow_work_init(&dwork->work, ops);
104}
105
106/**
70 * vslow_work_init - Initialise a very slow work item 107 * vslow_work_init - Initialise a very slow work item
71 * @work: The work item to initialise 108 * @work: The work item to initialise
72 * @ops: The operations to use to handle the slow work item 109 * @ops: The operations to use to handle the slow work item
@@ -83,9 +120,40 @@ static inline void vslow_work_init(struct slow_work *work,
83 INIT_LIST_HEAD(&work->link); 120 INIT_LIST_HEAD(&work->link);
84} 121}
85 122
123/**
124 * slow_work_is_queued - Determine if a slow work item is on the work queue
125 * work: The work item to test
126 *
127 * Determine if the specified slow-work item is on the work queue. This
128 * returns true if it is actually on the queue.
129 *
130 * If the item is executing and has been marked for requeue when execution
131 * finishes, then false will be returned.
132 *
133 * Anyone wishing to wait for completion of execution can wait on the
134 * SLOW_WORK_EXECUTING bit.
135 */
136static inline bool slow_work_is_queued(struct slow_work *work)
137{
138 unsigned long flags = work->flags;
139 return flags & SLOW_WORK_PENDING && !(flags & SLOW_WORK_EXECUTING);
140}
141
86extern int slow_work_enqueue(struct slow_work *work); 142extern int slow_work_enqueue(struct slow_work *work);
87extern int slow_work_register_user(void); 143extern void slow_work_cancel(struct slow_work *work);
88extern void slow_work_unregister_user(void); 144extern int slow_work_register_user(struct module *owner);
145extern void slow_work_unregister_user(struct module *owner);
146
147extern int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
148 unsigned long delay);
149
150static inline void delayed_slow_work_cancel(struct delayed_slow_work *dwork)
151{
152 slow_work_cancel(&dwork->work);
153}
154
155extern bool slow_work_sleep_till_thread_needed(struct slow_work *work,
156 signed long *_timeout);
89 157
90#ifdef CONFIG_SYSCTL 158#ifdef CONFIG_SYSCTL
91extern ctl_table slow_work_sysctls[]; 159extern ctl_table slow_work_sysctls[];
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 39c64bae776d..7a0570e6a596 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -76,6 +76,9 @@ void smp_call_function_many(const struct cpumask *mask,
76void __smp_call_function_single(int cpuid, struct call_single_data *data, 76void __smp_call_function_single(int cpuid, struct call_single_data *data,
77 int wait); 77 int wait);
78 78
79int smp_call_function_any(const struct cpumask *mask,
80 void (*func)(void *info), void *info, int wait);
81
79/* 82/*
80 * Generic and arch helpers 83 * Generic and arch helpers
81 */ 84 */
@@ -137,9 +140,15 @@ static inline void smp_send_reschedule(int cpu) { }
137#define smp_prepare_boot_cpu() do {} while (0) 140#define smp_prepare_boot_cpu() do {} while (0)
138#define smp_call_function_many(mask, func, info, wait) \ 141#define smp_call_function_many(mask, func, info, wait) \
139 (up_smp_call_function(func, info)) 142 (up_smp_call_function(func, info))
140static inline void init_call_single_data(void) 143static inline void init_call_single_data(void) { }
144
145static inline int
146smp_call_function_any(const struct cpumask *mask, void (*func)(void *info),
147 void *info, int wait)
141{ 148{
149 return smp_call_function_single(0, func, info, wait);
142} 150}
151
143#endif /* !SMP */ 152#endif /* !SMP */
144 153
145/* 154/*
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index f0ca7a7a1757..71dccfeb0d88 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -79,8 +79,6 @@
79 */ 79 */
80#include <linux/spinlock_types.h> 80#include <linux/spinlock_types.h>
81 81
82extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
83
84/* 82/*
85 * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): 83 * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
86 */ 84 */
@@ -102,7 +100,7 @@ do { \
102 100
103#else 101#else
104# define spin_lock_init(lock) \ 102# define spin_lock_init(lock) \
105 do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) 103 do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0)
106#endif 104#endif
107 105
108#ifdef CONFIG_DEBUG_SPINLOCK 106#ifdef CONFIG_DEBUG_SPINLOCK
@@ -116,7 +114,7 @@ do { \
116} while (0) 114} while (0)
117#else 115#else
118# define rwlock_init(lock) \ 116# define rwlock_init(lock) \
119 do { *(lock) = RW_LOCK_UNLOCKED; } while (0) 117 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
120#endif 118#endif
121 119
122#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) 120#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 7a7e18fc2415..8264a7f459bc 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -60,137 +60,118 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
60void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 60void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
61 __releases(lock); 61 __releases(lock);
62 62
63/* 63#ifdef CONFIG_INLINE_SPIN_LOCK
64 * We inline the unlock functions in the nondebug case:
65 */
66#if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_PREEMPT)
67#define __always_inline__spin_unlock
68#define __always_inline__read_unlock
69#define __always_inline__write_unlock
70#define __always_inline__spin_unlock_irq
71#define __always_inline__read_unlock_irq
72#define __always_inline__write_unlock_irq
73#endif
74
75#ifndef CONFIG_DEBUG_SPINLOCK
76#ifndef CONFIG_GENERIC_LOCKBREAK
77
78#ifdef __always_inline__spin_lock
79#define _spin_lock(lock) __spin_lock(lock) 64#define _spin_lock(lock) __spin_lock(lock)
80#endif 65#endif
81 66
82#ifdef __always_inline__read_lock 67#ifdef CONFIG_INLINE_READ_LOCK
83#define _read_lock(lock) __read_lock(lock) 68#define _read_lock(lock) __read_lock(lock)
84#endif 69#endif
85 70
86#ifdef __always_inline__write_lock 71#ifdef CONFIG_INLINE_WRITE_LOCK
87#define _write_lock(lock) __write_lock(lock) 72#define _write_lock(lock) __write_lock(lock)
88#endif 73#endif
89 74
90#ifdef __always_inline__spin_lock_bh 75#ifdef CONFIG_INLINE_SPIN_LOCK_BH
91#define _spin_lock_bh(lock) __spin_lock_bh(lock) 76#define _spin_lock_bh(lock) __spin_lock_bh(lock)
92#endif 77#endif
93 78
94#ifdef __always_inline__read_lock_bh 79#ifdef CONFIG_INLINE_READ_LOCK_BH
95#define _read_lock_bh(lock) __read_lock_bh(lock) 80#define _read_lock_bh(lock) __read_lock_bh(lock)
96#endif 81#endif
97 82
98#ifdef __always_inline__write_lock_bh 83#ifdef CONFIG_INLINE_WRITE_LOCK_BH
99#define _write_lock_bh(lock) __write_lock_bh(lock) 84#define _write_lock_bh(lock) __write_lock_bh(lock)
100#endif 85#endif
101 86
102#ifdef __always_inline__spin_lock_irq 87#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
103#define _spin_lock_irq(lock) __spin_lock_irq(lock) 88#define _spin_lock_irq(lock) __spin_lock_irq(lock)
104#endif 89#endif
105 90
106#ifdef __always_inline__read_lock_irq 91#ifdef CONFIG_INLINE_READ_LOCK_IRQ
107#define _read_lock_irq(lock) __read_lock_irq(lock) 92#define _read_lock_irq(lock) __read_lock_irq(lock)
108#endif 93#endif
109 94
110#ifdef __always_inline__write_lock_irq 95#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
111#define _write_lock_irq(lock) __write_lock_irq(lock) 96#define _write_lock_irq(lock) __write_lock_irq(lock)
112#endif 97#endif
113 98
114#ifdef __always_inline__spin_lock_irqsave 99#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
115#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) 100#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
116#endif 101#endif
117 102
118#ifdef __always_inline__read_lock_irqsave 103#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
119#define _read_lock_irqsave(lock) __read_lock_irqsave(lock) 104#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
120#endif 105#endif
121 106
122#ifdef __always_inline__write_lock_irqsave 107#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
123#define _write_lock_irqsave(lock) __write_lock_irqsave(lock) 108#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
124#endif 109#endif
125 110
126#endif /* !CONFIG_GENERIC_LOCKBREAK */ 111#ifdef CONFIG_INLINE_SPIN_TRYLOCK
127
128#ifdef __always_inline__spin_trylock
129#define _spin_trylock(lock) __spin_trylock(lock) 112#define _spin_trylock(lock) __spin_trylock(lock)
130#endif 113#endif
131 114
132#ifdef __always_inline__read_trylock 115#ifdef CONFIG_INLINE_READ_TRYLOCK
133#define _read_trylock(lock) __read_trylock(lock) 116#define _read_trylock(lock) __read_trylock(lock)
134#endif 117#endif
135 118
136#ifdef __always_inline__write_trylock 119#ifdef CONFIG_INLINE_WRITE_TRYLOCK
137#define _write_trylock(lock) __write_trylock(lock) 120#define _write_trylock(lock) __write_trylock(lock)
138#endif 121#endif
139 122
140#ifdef __always_inline__spin_trylock_bh 123#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
141#define _spin_trylock_bh(lock) __spin_trylock_bh(lock) 124#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
142#endif 125#endif
143 126
144#ifdef __always_inline__spin_unlock 127#ifdef CONFIG_INLINE_SPIN_UNLOCK
145#define _spin_unlock(lock) __spin_unlock(lock) 128#define _spin_unlock(lock) __spin_unlock(lock)
146#endif 129#endif
147 130
148#ifdef __always_inline__read_unlock 131#ifdef CONFIG_INLINE_READ_UNLOCK
149#define _read_unlock(lock) __read_unlock(lock) 132#define _read_unlock(lock) __read_unlock(lock)
150#endif 133#endif
151 134
152#ifdef __always_inline__write_unlock 135#ifdef CONFIG_INLINE_WRITE_UNLOCK
153#define _write_unlock(lock) __write_unlock(lock) 136#define _write_unlock(lock) __write_unlock(lock)
154#endif 137#endif
155 138
156#ifdef __always_inline__spin_unlock_bh 139#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
157#define _spin_unlock_bh(lock) __spin_unlock_bh(lock) 140#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
158#endif 141#endif
159 142
160#ifdef __always_inline__read_unlock_bh 143#ifdef CONFIG_INLINE_READ_UNLOCK_BH
161#define _read_unlock_bh(lock) __read_unlock_bh(lock) 144#define _read_unlock_bh(lock) __read_unlock_bh(lock)
162#endif 145#endif
163 146
164#ifdef __always_inline__write_unlock_bh 147#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
165#define _write_unlock_bh(lock) __write_unlock_bh(lock) 148#define _write_unlock_bh(lock) __write_unlock_bh(lock)
166#endif 149#endif
167 150
168#ifdef __always_inline__spin_unlock_irq 151#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
169#define _spin_unlock_irq(lock) __spin_unlock_irq(lock) 152#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
170#endif 153#endif
171 154
172#ifdef __always_inline__read_unlock_irq 155#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
173#define _read_unlock_irq(lock) __read_unlock_irq(lock) 156#define _read_unlock_irq(lock) __read_unlock_irq(lock)
174#endif 157#endif
175 158
176#ifdef __always_inline__write_unlock_irq 159#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
177#define _write_unlock_irq(lock) __write_unlock_irq(lock) 160#define _write_unlock_irq(lock) __write_unlock_irq(lock)
178#endif 161#endif
179 162
180#ifdef __always_inline__spin_unlock_irqrestore 163#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
181#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) 164#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
182#endif 165#endif
183 166
184#ifdef __always_inline__read_unlock_irqrestore 167#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
185#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) 168#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
186#endif 169#endif
187 170
188#ifdef __always_inline__write_unlock_irqrestore 171#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
189#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) 172#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
190#endif 173#endif
191 174
192#endif /* CONFIG_DEBUG_SPINLOCK */
193
194static inline int __spin_trylock(spinlock_t *lock) 175static inline int __spin_trylock(spinlock_t *lock)
195{ 176{
196 preempt_disable(); 177 preempt_disable();
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index aca0eee53930..4765d97dcafb 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -48,6 +48,7 @@ void cleanup_srcu_struct(struct srcu_struct *sp);
48int srcu_read_lock(struct srcu_struct *sp) __acquires(sp); 48int srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
49void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); 49void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
50void synchronize_srcu(struct srcu_struct *sp); 50void synchronize_srcu(struct srcu_struct *sp);
51void synchronize_srcu_expedited(struct srcu_struct *sp);
51long srcu_batches_completed(struct srcu_struct *sp); 52long srcu_batches_completed(struct srcu_struct *sp);
52 53
53#endif 54#endif
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index cd15df6c63cd..5e781d824e6d 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -301,6 +301,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
301#define pm_notifier(fn, pri) do { (void)(fn); } while (0) 301#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
302#endif /* !CONFIG_PM_SLEEP */ 302#endif /* !CONFIG_PM_SLEEP */
303 303
304extern struct mutex pm_mutex;
305
304#ifndef CONFIG_HIBERNATION 306#ifndef CONFIG_HIBERNATION
305static inline void register_nosave_region(unsigned long b, unsigned long e) 307static inline void register_nosave_region(unsigned long b, unsigned long e)
306{ 308{
@@ -308,8 +310,23 @@ static inline void register_nosave_region(unsigned long b, unsigned long e)
308static inline void register_nosave_region_late(unsigned long b, unsigned long e) 310static inline void register_nosave_region_late(unsigned long b, unsigned long e)
309{ 311{
310} 312}
311#endif
312 313
313extern struct mutex pm_mutex; 314static inline void lock_system_sleep(void) {}
315static inline void unlock_system_sleep(void) {}
316
317#else
318
319/* Let some subsystems like memory hotadd exclude hibernation */
320
321static inline void lock_system_sleep(void)
322{
323 mutex_lock(&pm_mutex);
324}
325
326static inline void unlock_system_sleep(void)
327{
328 mutex_unlock(&pm_mutex);
329}
330#endif
314 331
315#endif /* _LINUX_SUSPEND_H */ 332#endif /* _LINUX_SUSPEND_H */
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 73b1f1cec423..febedcf67c7e 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -7,6 +7,8 @@ struct device;
7struct dma_attrs; 7struct dma_attrs;
8struct scatterlist; 8struct scatterlist;
9 9
10extern int swiotlb_force;
11
10/* 12/*
11 * Maximum allowable number of contiguous slabs to map, 13 * Maximum allowable number of contiguous slabs to map,
12 * must be a power of 2. What is the appropriate value ? 14 * must be a power of 2. What is the appropriate value ?
@@ -20,8 +22,7 @@ struct scatterlist;
20 */ 22 */
21#define IO_TLB_SHIFT 11 23#define IO_TLB_SHIFT 11
22 24
23extern void 25extern void swiotlb_init(int verbose);
24swiotlb_init(void);
25 26
26extern void 27extern void
27*swiotlb_alloc_coherent(struct device *hwdev, size_t size, 28*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
@@ -88,4 +89,11 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
88extern int 89extern int
89swiotlb_dma_supported(struct device *hwdev, u64 mask); 90swiotlb_dma_supported(struct device *hwdev, u64 mask);
90 91
92#ifdef CONFIG_SWIOTLB
93extern void __init swiotlb_free(void);
94#else
95static inline void swiotlb_free(void) { }
96#endif
97
98extern void swiotlb_print_info(void);
91#endif /* __LINUX_SWIOTLB_H */ 99#endif /* __LINUX_SWIOTLB_H */
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 3338b3f5c21a..ac5d1c1285d9 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -27,9 +27,16 @@
27 */ 27 */
28#define TPM_ANY_NUM 0xFFFF 28#define TPM_ANY_NUM 0xFFFF
29 29
30#if defined(CONFIG_TCG_TPM) 30#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
31 31
32extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf); 32extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf);
33extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash); 33extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash);
34#else
35static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) {
36 return -ENODEV;
37}
38static inline int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) {
39 return -ENODEV;
40}
34#endif 41#endif
35#endif 42#endif
diff --git a/include/linux/vt.h b/include/linux/vt.h
index 7afca0d72139..7ffa11f06232 100644
--- a/include/linux/vt.h
+++ b/include/linux/vt.h
@@ -70,8 +70,8 @@ struct vt_event {
70#define VT_EVENT_UNBLANK 0x0004 /* Screen unblank */ 70#define VT_EVENT_UNBLANK 0x0004 /* Screen unblank */
71#define VT_EVENT_RESIZE 0x0008 /* Resize display */ 71#define VT_EVENT_RESIZE 0x0008 /* Resize display */
72#define VT_MAX_EVENT 0x000F 72#define VT_MAX_EVENT 0x000F
73 unsigned int old; /* Old console */ 73 unsigned int oldev; /* Old console */
74 unsigned int new; /* New console (if changing) */ 74 unsigned int newev; /* New console (if changing) */
75 unsigned int pad[4]; /* Padding for expansion */ 75 unsigned int pad[4]; /* Padding for expansion */
76}; 76};
77 77
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index c75b960c8ac8..998c30fc8981 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1283,6 +1283,12 @@ enum ieee80211_filter_flags {
1283 * 1283 *
1284 * These flags are used with the ampdu_action() callback in 1284 * These flags are used with the ampdu_action() callback in
1285 * &struct ieee80211_ops to indicate which action is needed. 1285 * &struct ieee80211_ops to indicate which action is needed.
1286 *
1287 * Note that drivers MUST be able to deal with a TX aggregation
1288 * session being stopped even before they OK'ed starting it by
1289 * calling ieee80211_start_tx_ba_cb(_irqsafe), because the peer
1290 * might receive the addBA frame and send a delBA right away!
1291 *
1286 * @IEEE80211_AMPDU_RX_START: start Rx aggregation 1292 * @IEEE80211_AMPDU_RX_START: start Rx aggregation
1287 * @IEEE80211_AMPDU_RX_STOP: stop Rx aggregation 1293 * @IEEE80211_AMPDU_RX_STOP: stop Rx aggregation
1288 * @IEEE80211_AMPDU_TX_START: start Tx aggregation 1294 * @IEEE80211_AMPDU_TX_START: start Tx aggregation
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 6e5f0e0c7967..0a474568b003 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -893,7 +893,6 @@ struct sctp_transport {
893 */ 893 */
894 /* RTO : The current retransmission timeout value. */ 894 /* RTO : The current retransmission timeout value. */
895 unsigned long rto; 895 unsigned long rto;
896 unsigned long last_rto;
897 896
898 __u32 rtt; /* This is the most recent RTT. */ 897 __u32 rtt; /* This is the most recent RTT. */
899 898
@@ -1980,7 +1979,7 @@ void sctp_assoc_set_primary(struct sctp_association *,
1980void sctp_assoc_del_nonprimary_peers(struct sctp_association *, 1979void sctp_assoc_del_nonprimary_peers(struct sctp_association *,
1981 struct sctp_transport *); 1980 struct sctp_transport *);
1982int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *, 1981int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *,
1983 gfp_t); 1982 sctp_scope_t, gfp_t);
1984int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *, 1983int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *,
1985 struct sctp_cookie*, 1984 struct sctp_cookie*,
1986 gfp_t gfp); 1985 gfp_t gfp);
diff --git a/include/pcmcia/cs.h b/include/pcmcia/cs.h
index 904468a191ef..afc2bfb9e917 100644
--- a/include/pcmcia/cs.h
+++ b/include/pcmcia/cs.h
@@ -15,6 +15,10 @@
15#ifndef _LINUX_CS_H 15#ifndef _LINUX_CS_H
16#define _LINUX_CS_H 16#define _LINUX_CS_H
17 17
18#ifdef __KERNEL__
19#include <linux/interrupt.h>
20#endif
21
18/* For AccessConfigurationRegister */ 22/* For AccessConfigurationRegister */
19typedef struct conf_reg_t { 23typedef struct conf_reg_t {
20 u_char Function; 24 u_char Function;
@@ -111,11 +115,9 @@ typedef struct io_req_t {
111 115
112/* For RequestIRQ and ReleaseIRQ */ 116/* For RequestIRQ and ReleaseIRQ */
113typedef struct irq_req_t { 117typedef struct irq_req_t {
114 u_int Attributes; 118 u_int Attributes;
115 u_int AssignedIRQ; 119 u_int AssignedIRQ;
116 u_int IRQInfo1, IRQInfo2; /* IRQInfo2 is ignored */ 120 irq_handler_t Handler;
117 void *Handler;
118 void *Instance;
119} irq_req_t; 121} irq_req_t;
120 122
121/* Attributes for RequestIRQ and ReleaseIRQ */ 123/* Attributes for RequestIRQ and ReleaseIRQ */
@@ -125,7 +127,7 @@ typedef struct irq_req_t {
125#define IRQ_TYPE_DYNAMIC_SHARING 0x02 127#define IRQ_TYPE_DYNAMIC_SHARING 0x02
126#define IRQ_FORCED_PULSE 0x04 128#define IRQ_FORCED_PULSE 0x04
127#define IRQ_FIRST_SHARED 0x08 129#define IRQ_FIRST_SHARED 0x08
128#define IRQ_HANDLE_PRESENT 0x10 130//#define IRQ_HANDLE_PRESENT 0x10
129#define IRQ_PULSE_ALLOCATED 0x100 131#define IRQ_PULSE_ALLOCATED 0x100
130 132
131/* Bits in IRQInfo1 field */ 133/* Bits in IRQInfo1 field */
diff --git a/include/pcmcia/cs_types.h b/include/pcmcia/cs_types.h
index 315965a37930..f5e3b8386c8f 100644
--- a/include/pcmcia/cs_types.h
+++ b/include/pcmcia/cs_types.h
@@ -26,8 +26,7 @@ typedef u_int event_t;
26typedef u_char cisdata_t; 26typedef u_char cisdata_t;
27typedef u_short page_t; 27typedef u_short page_t;
28 28
29struct window_t; 29typedef unsigned long window_handle_t;
30typedef struct window_t *window_handle_t;
31 30
32struct region_t; 31struct region_t;
33typedef struct region_t *memory_handle_t; 32typedef struct region_t *memory_handle_t;
diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h
index a2be80b9a095..d403c12f7978 100644
--- a/include/pcmcia/ds.h
+++ b/include/pcmcia/ds.h
@@ -34,6 +34,7 @@
34struct pcmcia_socket; 34struct pcmcia_socket;
35struct pcmcia_device; 35struct pcmcia_device;
36struct config_t; 36struct config_t;
37struct net_device;
37 38
38/* dynamic device IDs for PCMCIA device drivers. See 39/* dynamic device IDs for PCMCIA device drivers. See
39 * Documentation/pcmcia/driver.txt for details. 40 * Documentation/pcmcia/driver.txt for details.
@@ -137,65 +138,39 @@ struct pcmcia_device {
137#define to_pcmcia_dev(n) container_of(n, struct pcmcia_device, dev) 138#define to_pcmcia_dev(n) container_of(n, struct pcmcia_device, dev)
138#define to_pcmcia_drv(n) container_of(n, struct pcmcia_driver, drv) 139#define to_pcmcia_drv(n) container_of(n, struct pcmcia_driver, drv)
139 140
140/* deprecated -- don't use! */
141#define handle_to_dev(handle) (handle->dev)
142 141
143 142/*
144/* (deprecated) error reporting by PCMCIA devices. Use dev_printk() 143 * CIS access.
145 * or dev_dbg() directly in the driver, without referring to pcmcia_error_func() 144 *
146 * and/or pcmcia_error_ret() for those functions will go away soon. 145 * Please use the following functions to access CIS tuples:
147 */ 146 * - pcmcia_get_tuple()
148enum service { 147 * - pcmcia_loop_tuple()
149 AccessConfigurationRegister, AddSocketServices, 148 * - pcmcia_get_mac_from_cis()
150 AdjustResourceInfo, CheckEraseQueue, CloseMemory, CopyMemory, 149 *
151 DeregisterClient, DeregisterEraseQueue, GetCardServicesInfo, 150 * To parse a tuple_t, pcmcia_parse_tuple() exists. Its interface
152 GetClientInfo, GetConfigurationInfo, GetEventMask, 151 * might change in future.
153 GetFirstClient, GetFirstPartion, GetFirstRegion, GetFirstTuple,
154 GetNextClient, GetNextPartition, GetNextRegion, GetNextTuple,
155 GetStatus, GetTupleData, MapLogSocket, MapLogWindow, MapMemPage,
156 MapPhySocket, MapPhyWindow, ModifyConfiguration, ModifyWindow,
157 OpenMemory, ParseTuple, ReadMemory, RegisterClient,
158 RegisterEraseQueue, RegisterMTD, RegisterTimer,
159 ReleaseConfiguration, ReleaseExclusive, ReleaseIO, ReleaseIRQ,
160 ReleaseSocketMask, ReleaseWindow, ReplaceSocketServices,
161 RequestConfiguration, RequestExclusive, RequestIO, RequestIRQ,
162 RequestSocketMask, RequestWindow, ResetCard, ReturnSSEntry,
163 SetEventMask, SetRegion, ValidateCIS, VendorSpecific,
164 WriteMemory, BindDevice, BindMTD, ReportError,
165 SuspendCard, ResumeCard, EjectCard, InsertCard, ReplaceCIS,
166 GetFirstWindow, GetNextWindow, GetMemPage
167};
168const char *pcmcia_error_func(int func);
169const char *pcmcia_error_ret(int ret);
170
171#define cs_error(p_dev, func, ret) \
172 { \
173 dev_printk(KERN_NOTICE, &p_dev->dev, \
174 "%s : %s\n", \
175 pcmcia_error_func(func), \
176 pcmcia_error_ret(ret)); \
177 }
178
179/* CIS access.
180 * Use the pcmcia_* versions in PCMCIA drivers
181 */ 152 */
182int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse);
183 153
184int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function, 154/* get the very first CIS entry of type @code. Note that buf is pointer
185 tuple_t *tuple); 155 * to u8 *buf; and that you need to kfree(buf) afterwards. */
186#define pcmcia_get_first_tuple(p_dev, tuple) \ 156size_t pcmcia_get_tuple(struct pcmcia_device *p_dev, cisdata_t code,
187 pccard_get_first_tuple(p_dev->socket, p_dev->func, tuple) 157 u8 **buf);
188 158
189int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, 159/* loop over CIS entries */
190 tuple_t *tuple); 160int pcmcia_loop_tuple(struct pcmcia_device *p_dev, cisdata_t code,
191#define pcmcia_get_next_tuple(p_dev, tuple) \ 161 int (*loop_tuple) (struct pcmcia_device *p_dev,
192 pccard_get_next_tuple(p_dev->socket, p_dev->func, tuple) 162 tuple_t *tuple,
163 void *priv_data),
164 void *priv_data);
193 165
194int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple); 166/* get the MAC address from CISTPL_FUNCE */
195#define pcmcia_get_tuple_data(p_dev, tuple) \ 167int pcmcia_get_mac_from_cis(struct pcmcia_device *p_dev,
196 pccard_get_tuple_data(p_dev->socket, tuple) 168 struct net_device *dev);
197 169
198 170
171/* parse a tuple_t */
172int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse);
173
199/* loop CIS entries for valid configuration */ 174/* loop CIS entries for valid configuration */
200int pcmcia_loop_config(struct pcmcia_device *p_dev, 175int pcmcia_loop_config(struct pcmcia_device *p_dev,
201 int (*conf_check) (struct pcmcia_device *p_dev, 176 int (*conf_check) (struct pcmcia_device *p_dev,
@@ -221,12 +196,11 @@ int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req);
221int pcmcia_request_configuration(struct pcmcia_device *p_dev, 196int pcmcia_request_configuration(struct pcmcia_device *p_dev,
222 config_req_t *req); 197 config_req_t *req);
223 198
224int pcmcia_request_window(struct pcmcia_device **p_dev, win_req_t *req, 199int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req,
225 window_handle_t *wh); 200 window_handle_t *wh);
226int pcmcia_release_window(window_handle_t win); 201int pcmcia_release_window(struct pcmcia_device *p_dev, window_handle_t win);
227 202int pcmcia_map_mem_page(struct pcmcia_device *p_dev, window_handle_t win,
228int pcmcia_get_mem_page(window_handle_t win, memreq_t *req); 203 memreq_t *req);
229int pcmcia_map_mem_page(window_handle_t win, memreq_t *req);
230 204
231int pcmcia_modify_configuration(struct pcmcia_device *p_dev, modconf_t *mod); 205int pcmcia_modify_configuration(struct pcmcia_device *p_dev, modconf_t *mod);
232void pcmcia_disable_device(struct pcmcia_device *p_dev); 206void pcmcia_disable_device(struct pcmcia_device *p_dev);
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h
index e0f6feb8588c..7c23be706f12 100644
--- a/include/pcmcia/ss.h
+++ b/include/pcmcia/ss.h
@@ -107,15 +107,6 @@ typedef struct io_window_t {
107 struct resource *res; 107 struct resource *res;
108} io_window_t; 108} io_window_t;
109 109
110#define WINDOW_MAGIC 0xB35C
111typedef struct window_t {
112 u_short magic;
113 u_short index;
114 struct pcmcia_device *handle;
115 struct pcmcia_socket *sock;
116 pccard_mem_map ctl;
117} window_t;
118
119/* Maximum number of IO windows per socket */ 110/* Maximum number of IO windows per socket */
120#define MAX_IO_WIN 2 111#define MAX_IO_WIN 2
121 112
@@ -155,7 +146,7 @@ struct pcmcia_socket {
155 u_int Config; 146 u_int Config;
156 } irq; 147 } irq;
157 io_window_t io[MAX_IO_WIN]; 148 io_window_t io[MAX_IO_WIN];
158 window_t win[MAX_WIN]; 149 pccard_mem_map win[MAX_WIN];
159 struct list_head cis_cache; 150 struct list_head cis_cache;
160 size_t fake_cis_len; 151 size_t fake_cis_len;
161 u8 *fake_cis; 152 u8 *fake_cis;
@@ -172,7 +163,7 @@ struct pcmcia_socket {
172 u_int irq_mask; 163 u_int irq_mask;
173 u_int map_size; 164 u_int map_size;
174 u_int io_offset; 165 u_int io_offset;
175 u_char pci_irq; 166 u_int pci_irq;
176 struct pci_dev * cb_dev; 167 struct pci_dev * cb_dev;
177 168
178 169
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 9af48cbf0036..f097ae340bc1 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -145,6 +145,7 @@ struct scsi_device {
145 unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */ 145 unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */
146 unsigned last_sector_bug:1; /* do not use multisector accesses on 146 unsigned last_sector_bug:1; /* do not use multisector accesses on
147 SD_LAST_BUGGY_SECTORS */ 147 SD_LAST_BUGGY_SECTORS */
148 unsigned is_visible:1; /* is the device visible in sysfs */
148 149
149 DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */ 150 DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
150 struct list_head event_list; /* asserted events */ 151 struct list_head event_list; /* asserted events */
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 6e728b176904..47941fc5aba7 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -797,30 +797,23 @@ static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
797 797
798static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type) 798static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
799{ 799{
800 switch (target_type) { 800 static unsigned char cap[] = { 0,
801 case 1: 801 SHOST_DIF_TYPE1_PROTECTION,
802 if (shost->prot_capabilities & SHOST_DIF_TYPE1_PROTECTION) 802 SHOST_DIF_TYPE2_PROTECTION,
803 return target_type; 803 SHOST_DIF_TYPE3_PROTECTION };
804 case 2:
805 if (shost->prot_capabilities & SHOST_DIF_TYPE2_PROTECTION)
806 return target_type;
807 case 3:
808 if (shost->prot_capabilities & SHOST_DIF_TYPE3_PROTECTION)
809 return target_type;
810 }
811 804
812 return 0; 805 return shost->prot_capabilities & cap[target_type] ? target_type : 0;
813} 806}
814 807
815static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type) 808static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
816{ 809{
817#if defined(CONFIG_BLK_DEV_INTEGRITY) 810#if defined(CONFIG_BLK_DEV_INTEGRITY)
818 switch (target_type) { 811 static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION,
819 case 0: return shost->prot_capabilities & SHOST_DIX_TYPE0_PROTECTION; 812 SHOST_DIX_TYPE1_PROTECTION,
820 case 1: return shost->prot_capabilities & SHOST_DIX_TYPE1_PROTECTION; 813 SHOST_DIX_TYPE2_PROTECTION,
821 case 2: return shost->prot_capabilities & SHOST_DIX_TYPE2_PROTECTION; 814 SHOST_DIX_TYPE3_PROTECTION };
822 case 3: return shost->prot_capabilities & SHOST_DIX_TYPE3_PROTECTION; 815
823 } 816 return shost->prot_capabilities & cap[target_type];
824#endif 817#endif
825 return 0; 818 return 0;
826} 819}
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h
index 397dff2dbd5a..fb726ac7caee 100644
--- a/include/trace/events/syscalls.h
+++ b/include/trace/events/syscalls.h
@@ -1,5 +1,6 @@
1#undef TRACE_SYSTEM 1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM syscalls 2#define TRACE_SYSTEM raw_syscalls
3#define TRACE_INCLUDE_FILE syscalls
3 4
4#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ) 5#if !defined(_TRACE_EVENTS_SYSCALLS_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_EVENTS_SYSCALLS_H 6#define _TRACE_EVENTS_SYSCALLS_H
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 2c9c073e45ad..d1b3de9c1a71 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -195,7 +195,7 @@
195#undef __get_str 195#undef __get_str
196 196
197#undef TP_printk 197#undef TP_printk
198#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args) 198#define TP_printk(fmt, args...) "\"%s\", %s\n", fmt, __stringify(args)
199 199
200#undef TP_fast_assign 200#undef TP_fast_assign
201#define TP_fast_assign(args...) args 201#define TP_fast_assign(args...) args
diff --git a/include/trace/power.h b/include/trace/power.h
deleted file mode 100644
index ef204666e983..000000000000
--- a/include/trace/power.h
+++ /dev/null
@@ -1,32 +0,0 @@
1#ifndef _TRACE_POWER_H
2#define _TRACE_POWER_H
3
4#include <linux/ktime.h>
5#include <linux/tracepoint.h>
6
7enum {
8 POWER_NONE = 0,
9 POWER_CSTATE = 1,
10 POWER_PSTATE = 2,
11};
12
13struct power_trace {
14 ktime_t stamp;
15 ktime_t end;
16 int type;
17 int state;
18};
19
20DECLARE_TRACE(power_start,
21 TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state),
22 TP_ARGS(it, type, state));
23
24DECLARE_TRACE(power_mark,
25 TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state),
26 TP_ARGS(it, type, state));
27
28DECLARE_TRACE(power_end,
29 TP_PROTO(struct power_trace *it),
30 TP_ARGS(it));
31
32#endif /* _TRACE_POWER_H */
diff --git a/init/Kconfig b/init/Kconfig
index 9e03ef8b311e..9ee778294756 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -334,6 +334,15 @@ config TREE_PREEMPT_RCU
334 is also required. It also scales down nicely to 334 is also required. It also scales down nicely to
335 smaller systems. 335 smaller systems.
336 336
337config TINY_RCU
338 bool "UP-only small-memory-footprint RCU"
339 depends on !SMP
340 help
341 This option selects the RCU implementation that is
342 designed for UP systems from which real-time response
343 is not required. This option greatly reduces the
344 memory footprint of RCU.
345
337endchoice 346endchoice
338 347
339config RCU_TRACE 348config RCU_TRACE
@@ -606,7 +615,7 @@ config SYSFS_DEPRECATED
606 bool 615 bool
607 616
608config SYSFS_DEPRECATED_V2 617config SYSFS_DEPRECATED_V2
609 bool "remove sysfs features which may confuse old userspace tools" 618 bool "enable deprecated sysfs features which may confuse old userspace tools"
610 depends on SYSFS 619 depends on SYSFS
611 default n 620 default n
612 select SYSFS_DEPRECATED 621 select SYSFS_DEPRECATED
@@ -1098,6 +1107,16 @@ config SLOW_WORK
1098 1107
1099 See Documentation/slow-work.txt. 1108 See Documentation/slow-work.txt.
1100 1109
1110config SLOW_WORK_DEBUG
1111 bool "Slow work debugging through debugfs"
1112 default n
1113 depends on SLOW_WORK && DEBUG_FS
1114 help
1115 Display the contents of the slow work run queue through debugfs,
1116 including items currently executing.
1117
1118 See Documentation/slow-work.txt.
1119
1101endmenu # General setup 1120endmenu # General setup
1102 1121
1103config HAVE_GENERIC_DMA_COHERENT 1122config HAVE_GENERIC_DMA_COHERENT
@@ -1210,3 +1229,4 @@ source "block/Kconfig"
1210config PREEMPT_NOTIFIERS 1229config PREEMPT_NOTIFIERS
1211 bool 1230 bool
1212 1231
1232source "kernel/Kconfig.locks"
diff --git a/init/main.c b/init/main.c
index 5988debfc505..4051d75dd2d6 100644
--- a/init/main.c
+++ b/init/main.c
@@ -251,7 +251,7 @@ early_param("loglevel", loglevel);
251 251
252/* 252/*
253 * Unknown boot options get handed to init, unless they look like 253 * Unknown boot options get handed to init, unless they look like
254 * failed parameters 254 * unused parameters (modprobe will find them in /proc/cmdline).
255 */ 255 */
256static int __init unknown_bootoption(char *param, char *val) 256static int __init unknown_bootoption(char *param, char *val)
257{ 257{
@@ -272,14 +272,9 @@ static int __init unknown_bootoption(char *param, char *val)
272 if (obsolete_checksetup(param)) 272 if (obsolete_checksetup(param))
273 return 0; 273 return 0;
274 274
275 /* 275 /* Unused module parameter. */
276 * Preemptive maintenance for "why didn't my misspelled command 276 if (strchr(param, '.') && (!val || strchr(param, '.') < val))
277 * line work?"
278 */
279 if (strchr(param, '.') && (!val || strchr(param, '.') < val)) {
280 printk(KERN_ERR "Unknown boot option `%s': ignoring\n", param);
281 return 0; 277 return 0;
282 }
283 278
284 if (panic_later) 279 if (panic_later)
285 return 0; 280 return 0;
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
new file mode 100644
index 000000000000..88c92fb44618
--- /dev/null
+++ b/kernel/Kconfig.locks
@@ -0,0 +1,202 @@
1#
2# The ARCH_INLINE foo is necessary because select ignores "depends on"
3#
4config ARCH_INLINE_SPIN_TRYLOCK
5 bool
6
7config ARCH_INLINE_SPIN_TRYLOCK_BH
8 bool
9
10config ARCH_INLINE_SPIN_LOCK
11 bool
12
13config ARCH_INLINE_SPIN_LOCK_BH
14 bool
15
16config ARCH_INLINE_SPIN_LOCK_IRQ
17 bool
18
19config ARCH_INLINE_SPIN_LOCK_IRQSAVE
20 bool
21
22config ARCH_INLINE_SPIN_UNLOCK
23 bool
24
25config ARCH_INLINE_SPIN_UNLOCK_BH
26 bool
27
28config ARCH_INLINE_SPIN_UNLOCK_IRQ
29 bool
30
31config ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
32 bool
33
34
35config ARCH_INLINE_READ_TRYLOCK
36 bool
37
38config ARCH_INLINE_READ_LOCK
39 bool
40
41config ARCH_INLINE_READ_LOCK_BH
42 bool
43
44config ARCH_INLINE_READ_LOCK_IRQ
45 bool
46
47config ARCH_INLINE_READ_LOCK_IRQSAVE
48 bool
49
50config ARCH_INLINE_READ_UNLOCK
51 bool
52
53config ARCH_INLINE_READ_UNLOCK_BH
54 bool
55
56config ARCH_INLINE_READ_UNLOCK_IRQ
57 bool
58
59config ARCH_INLINE_READ_UNLOCK_IRQRESTORE
60 bool
61
62
63config ARCH_INLINE_WRITE_TRYLOCK
64 bool
65
66config ARCH_INLINE_WRITE_LOCK
67 bool
68
69config ARCH_INLINE_WRITE_LOCK_BH
70 bool
71
72config ARCH_INLINE_WRITE_LOCK_IRQ
73 bool
74
75config ARCH_INLINE_WRITE_LOCK_IRQSAVE
76 bool
77
78config ARCH_INLINE_WRITE_UNLOCK
79 bool
80
81config ARCH_INLINE_WRITE_UNLOCK_BH
82 bool
83
84config ARCH_INLINE_WRITE_UNLOCK_IRQ
85 bool
86
87config ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
88 bool
89
90#
91# lock_* functions are inlined when:
92# - DEBUG_SPINLOCK=n and GENERIC_LOCKBREAK=n and ARCH_INLINE_*LOCK=y
93#
94# trylock_* functions are inlined when:
95# - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
96#
97# unlock and unlock_irq functions are inlined when:
98# - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
99# or
100# - DEBUG_SPINLOCK=n and PREEMPT=n
101#
102# unlock_bh and unlock_irqrestore functions are inlined when:
103# - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
104#
105
106config INLINE_SPIN_TRYLOCK
107 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK
108
109config INLINE_SPIN_TRYLOCK_BH
110 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK_BH
111
112config INLINE_SPIN_LOCK
113 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK
114
115config INLINE_SPIN_LOCK_BH
116 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
117 ARCH_INLINE_SPIN_LOCK_BH
118
119config INLINE_SPIN_LOCK_IRQ
120 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
121 ARCH_INLINE_SPIN_LOCK_IRQ
122
123config INLINE_SPIN_LOCK_IRQSAVE
124 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
125 ARCH_INLINE_SPIN_LOCK_IRQSAVE
126
127config INLINE_SPIN_UNLOCK
128 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK)
129
130config INLINE_SPIN_UNLOCK_BH
131 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_BH
132
133config INLINE_SPIN_UNLOCK_IRQ
134 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK_BH)
135
136config INLINE_SPIN_UNLOCK_IRQRESTORE
137 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
138
139
140config INLINE_READ_TRYLOCK
141 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_TRYLOCK
142
143config INLINE_READ_LOCK
144 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK
145
146config INLINE_READ_LOCK_BH
147 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
148 ARCH_INLINE_READ_LOCK_BH
149
150config INLINE_READ_LOCK_IRQ
151 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
152 ARCH_INLINE_READ_LOCK_IRQ
153
154config INLINE_READ_LOCK_IRQSAVE
155 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
156 ARCH_INLINE_READ_LOCK_IRQSAVE
157
158config INLINE_READ_UNLOCK
159 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK)
160
161config INLINE_READ_UNLOCK_BH
162 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_BH
163
164config INLINE_READ_UNLOCK_IRQ
165 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK_BH)
166
167config INLINE_READ_UNLOCK_IRQRESTORE
168 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_IRQRESTORE
169
170
171config INLINE_WRITE_TRYLOCK
172 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_TRYLOCK
173
174config INLINE_WRITE_LOCK
175 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK
176
177config INLINE_WRITE_LOCK_BH
178 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
179 ARCH_INLINE_WRITE_LOCK_BH
180
181config INLINE_WRITE_LOCK_IRQ
182 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
183 ARCH_INLINE_WRITE_LOCK_IRQ
184
185config INLINE_WRITE_LOCK_IRQSAVE
186 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
187 ARCH_INLINE_WRITE_LOCK_IRQSAVE
188
189config INLINE_WRITE_UNLOCK
190 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK)
191
192config INLINE_WRITE_UNLOCK_BH
193 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_BH
194
195config INLINE_WRITE_UNLOCK_IRQ
196 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK_BH)
197
198config INLINE_WRITE_UNLOCK_IRQRESTORE
199 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
200
201config MUTEX_SPIN_ON_OWNER
202 def_bool SMP && !DEBUG_MUTEXES && !HAVE_DEFAULT_NO_SPIN_MUTEXES
diff --git a/kernel/Makefile b/kernel/Makefile
index 6b7ce8173dfd..982c50e2ce53 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -83,6 +83,7 @@ obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
83obj-$(CONFIG_TREE_RCU) += rcutree.o 83obj-$(CONFIG_TREE_RCU) += rcutree.o
84obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o 84obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o
85obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o 85obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
86obj-$(CONFIG_TINY_RCU) += rcutiny.o
86obj-$(CONFIG_RELAY) += relay.o 87obj-$(CONFIG_RELAY) += relay.o
87obj-$(CONFIG_SYSCTL) += utsname_sysctl.o 88obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
88obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o 89obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
@@ -95,6 +96,7 @@ obj-$(CONFIG_X86_DS) += trace/
95obj-$(CONFIG_RING_BUFFER) += trace/ 96obj-$(CONFIG_RING_BUFFER) += trace/
96obj-$(CONFIG_SMP) += sched_cpupri.o 97obj-$(CONFIG_SMP) += sched_cpupri.o
97obj-$(CONFIG_SLOW_WORK) += slow-work.o 98obj-$(CONFIG_SLOW_WORK) += slow-work.o
99obj-$(CONFIG_SLOW_WORK_DEBUG) += slow-work-debugfs.o
98obj-$(CONFIG_PERF_EVENTS) += perf_event.o 100obj-$(CONFIG_PERF_EVENTS) += perf_event.o
99obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o 101obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
100 102
diff --git a/kernel/capability.c b/kernel/capability.c
index 4e17041963f5..7f876e60521f 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -29,7 +29,6 @@ EXPORT_SYMBOL(__cap_empty_set);
29EXPORT_SYMBOL(__cap_full_set); 29EXPORT_SYMBOL(__cap_full_set);
30EXPORT_SYMBOL(__cap_init_eff_set); 30EXPORT_SYMBOL(__cap_init_eff_set);
31 31
32#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
33int file_caps_enabled = 1; 32int file_caps_enabled = 1;
34 33
35static int __init file_caps_disable(char *str) 34static int __init file_caps_disable(char *str)
@@ -38,7 +37,6 @@ static int __init file_caps_disable(char *str)
38 return 1; 37 return 1;
39} 38}
40__setup("no_file_caps", file_caps_disable); 39__setup("no_file_caps", file_caps_disable);
41#endif
42 40
43/* 41/*
44 * More recent versions of libcap are available from: 42 * More recent versions of libcap are available from:
@@ -169,8 +167,8 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
169 kernel_cap_t pE, pI, pP; 167 kernel_cap_t pE, pI, pP;
170 168
171 ret = cap_validate_magic(header, &tocopy); 169 ret = cap_validate_magic(header, &tocopy);
172 if (ret != 0) 170 if ((dataptr == NULL) || (ret != 0))
173 return ret; 171 return ((dataptr == NULL) && (ret == -EINVAL)) ? 0 : ret;
174 172
175 if (get_user(pid, &header->pid)) 173 if (get_user(pid, &header->pid))
176 return -EFAULT; 174 return -EFAULT;
@@ -238,7 +236,7 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
238SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data) 236SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
239{ 237{
240 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; 238 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
241 unsigned i, tocopy; 239 unsigned i, tocopy, copybytes;
242 kernel_cap_t inheritable, permitted, effective; 240 kernel_cap_t inheritable, permitted, effective;
243 struct cred *new; 241 struct cred *new;
244 int ret; 242 int ret;
@@ -255,8 +253,11 @@ SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
255 if (pid != 0 && pid != task_pid_vnr(current)) 253 if (pid != 0 && pid != task_pid_vnr(current))
256 return -EPERM; 254 return -EPERM;
257 255
258 if (copy_from_user(&kdata, data, 256 copybytes = tocopy * sizeof(struct __user_cap_data_struct);
259 tocopy * sizeof(struct __user_cap_data_struct))) 257 if (copybytes > sizeof(kdata))
258 return -EFAULT;
259
260 if (copy_from_user(&kdata, data, copybytes))
260 return -EFAULT; 261 return -EFAULT;
261 262
262 for (i = 0; i < tocopy; i++) { 263 for (i = 0; i < tocopy; i++) {
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index d4e841747400..0c642d51aac2 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -144,7 +144,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
144 144
145 rcu_read_lock(); 145 rcu_read_lock();
146 do_each_thread(g, t) { 146 do_each_thread(g, t) {
147 if (!--max_count) 147 if (!max_count--)
148 goto unlock; 148 goto unlock;
149 if (!--batch_count) { 149 if (!--batch_count) {
150 batch_count = HUNG_TASK_BATCHING; 150 batch_count = HUNG_TASK_BATCHING;
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index c1660194d115..ba566c261adc 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -166,11 +166,11 @@ int set_irq_data(unsigned int irq, void *data)
166EXPORT_SYMBOL(set_irq_data); 166EXPORT_SYMBOL(set_irq_data);
167 167
168/** 168/**
169 * set_irq_data - set irq type data for an irq 169 * set_irq_msi - set MSI descriptor data for an irq
170 * @irq: Interrupt number 170 * @irq: Interrupt number
171 * @entry: Pointer to MSI descriptor data 171 * @entry: Pointer to MSI descriptor data
172 * 172 *
173 * Set the hardware irq controller data for an irq 173 * Set the MSI descriptor entry for an irq
174 */ 174 */
175int set_irq_msi(unsigned int irq, struct msi_desc *entry) 175int set_irq_msi(unsigned int irq, struct msi_desc *entry)
176{ 176{
@@ -590,7 +590,7 @@ out_unlock:
590} 590}
591 591
592/** 592/**
593 * handle_percpu_IRQ - Per CPU local irq handler 593 * handle_percpu_irq - Per CPU local irq handler
594 * @irq: the interrupt number 594 * @irq: the interrupt number
595 * @desc: the interrupt description structure for this irq 595 * @desc: the interrupt description structure for this irq
596 * 596 *
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 692363dd591f..0832145fea97 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -136,7 +136,7 @@ out:
136 136
137static int default_affinity_open(struct inode *inode, struct file *file) 137static int default_affinity_open(struct inode *inode, struct file *file)
138{ 138{
139 return single_open(file, default_affinity_show, NULL); 139 return single_open(file, default_affinity_show, PDE(inode)->data);
140} 140}
141 141
142static const struct file_operations default_affinity_proc_fops = { 142static const struct file_operations default_affinity_proc_fops = {
@@ -148,18 +148,28 @@ static const struct file_operations default_affinity_proc_fops = {
148}; 148};
149#endif 149#endif
150 150
151static int irq_spurious_read(char *page, char **start, off_t off, 151static int irq_spurious_proc_show(struct seq_file *m, void *v)
152 int count, int *eof, void *data)
153{ 152{
154 struct irq_desc *desc = irq_to_desc((long) data); 153 struct irq_desc *desc = irq_to_desc((long) m->private);
155 return sprintf(page, "count %u\n" 154
156 "unhandled %u\n" 155 seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
157 "last_unhandled %u ms\n", 156 desc->irq_count, desc->irqs_unhandled,
158 desc->irq_count, 157 jiffies_to_msecs(desc->last_unhandled));
159 desc->irqs_unhandled, 158 return 0;
160 jiffies_to_msecs(desc->last_unhandled)); 159}
160
161static int irq_spurious_proc_open(struct inode *inode, struct file *file)
162{
163 return single_open(file, irq_spurious_proc_show, NULL);
161} 164}
162 165
166static const struct file_operations irq_spurious_proc_fops = {
167 .open = irq_spurious_proc_open,
168 .read = seq_read,
169 .llseek = seq_lseek,
170 .release = single_release,
171};
172
163#define MAX_NAMELEN 128 173#define MAX_NAMELEN 128
164 174
165static int name_unique(unsigned int irq, struct irqaction *new_action) 175static int name_unique(unsigned int irq, struct irqaction *new_action)
@@ -204,7 +214,6 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
204void register_irq_proc(unsigned int irq, struct irq_desc *desc) 214void register_irq_proc(unsigned int irq, struct irq_desc *desc)
205{ 215{
206 char name [MAX_NAMELEN]; 216 char name [MAX_NAMELEN];
207 struct proc_dir_entry *entry;
208 217
209 if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir) 218 if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir)
210 return; 219 return;
@@ -214,6 +223,8 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
214 223
215 /* create /proc/irq/1234 */ 224 /* create /proc/irq/1234 */
216 desc->dir = proc_mkdir(name, root_irq_dir); 225 desc->dir = proc_mkdir(name, root_irq_dir);
226 if (!desc->dir)
227 return;
217 228
218#ifdef CONFIG_SMP 229#ifdef CONFIG_SMP
219 /* create /proc/irq/<irq>/smp_affinity */ 230 /* create /proc/irq/<irq>/smp_affinity */
@@ -221,11 +232,8 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
221 &irq_affinity_proc_fops, (void *)(long)irq); 232 &irq_affinity_proc_fops, (void *)(long)irq);
222#endif 233#endif
223 234
224 entry = create_proc_entry("spurious", 0444, desc->dir); 235 proc_create_data("spurious", 0444, desc->dir,
225 if (entry) { 236 &irq_spurious_proc_fops, (void *)(long)irq);
226 entry->data = (void *)(long)irq;
227 entry->read_proc = irq_spurious_read;
228 }
229} 237}
230 238
231#undef MAX_NAMELEN 239#undef MAX_NAMELEN
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index bd7273e6282e..22b0a6eedf24 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -104,7 +104,7 @@ static int misrouted_irq(int irq)
104 return ok; 104 return ok;
105} 105}
106 106
107static void poll_all_shared_irqs(void) 107static void poll_spurious_irqs(unsigned long dummy)
108{ 108{
109 struct irq_desc *desc; 109 struct irq_desc *desc;
110 int i; 110 int i;
@@ -125,23 +125,11 @@ static void poll_all_shared_irqs(void)
125 try_one_irq(i, desc); 125 try_one_irq(i, desc);
126 local_irq_enable(); 126 local_irq_enable();
127 } 127 }
128}
129
130static void poll_spurious_irqs(unsigned long dummy)
131{
132 poll_all_shared_irqs();
133 128
134 mod_timer(&poll_spurious_irq_timer, 129 mod_timer(&poll_spurious_irq_timer,
135 jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 130 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
136} 131}
137 132
138#ifdef CONFIG_DEBUG_SHIRQ
139void debug_poll_all_shared_irqs(void)
140{
141 poll_all_shared_irqs();
142}
143#endif
144
145/* 133/*
146 * If 99,900 of the previous 100,000 interrupts have not been handled 134 * If 99,900 of the previous 100,000 interrupts have not been handled
147 * then assume that the IRQ is stuck in some manner. Drop a diagnostic 135 * then assume that the IRQ is stuck in some manner. Drop a diagnostic
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 9fcb53a11f87..25b103190364 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -80,16 +80,16 @@ int __request_module(bool wait, const char *fmt, ...)
80#define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */ 80#define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
81 static int kmod_loop_msg; 81 static int kmod_loop_msg;
82 82
83 ret = security_kernel_module_request();
84 if (ret)
85 return ret;
86
87 va_start(args, fmt); 83 va_start(args, fmt);
88 ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); 84 ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
89 va_end(args); 85 va_end(args);
90 if (ret >= MODULE_NAME_LEN) 86 if (ret >= MODULE_NAME_LEN)
91 return -ENAMETOOLONG; 87 return -ENAMETOOLONG;
92 88
89 ret = security_kernel_module_request(module_name);
90 if (ret)
91 return ret;
92
93 /* If modprobe needs a service that is in a module, we get a recursive 93 /* If modprobe needs a service that is in a module, we get a recursive
94 * loop. Limit the number of running kmod threads to max_threads/2 or 94 * loop. Limit the number of running kmod threads to max_threads/2 or
95 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method 95 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 84495958e703..e5342a344c43 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1035,9 +1035,9 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
1035 /* Pre-allocate memory for max kretprobe instances */ 1035 /* Pre-allocate memory for max kretprobe instances */
1036 if (rp->maxactive <= 0) { 1036 if (rp->maxactive <= 0) {
1037#ifdef CONFIG_PREEMPT 1037#ifdef CONFIG_PREEMPT
1038 rp->maxactive = max(10, 2 * NR_CPUS); 1038 rp->maxactive = max(10, 2 * num_possible_cpus());
1039#else 1039#else
1040 rp->maxactive = NR_CPUS; 1040 rp->maxactive = num_possible_cpus();
1041#endif 1041#endif
1042 } 1042 }
1043 spin_lock_init(&rp->lock); 1043 spin_lock_init(&rp->lock);
diff --git a/kernel/module.c b/kernel/module.c
index 8b7d8805819d..5842a71cf052 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1187,7 +1187,8 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
1187 1187
1188 /* Count loaded sections and allocate structures */ 1188 /* Count loaded sections and allocate structures */
1189 for (i = 0; i < nsect; i++) 1189 for (i = 0; i < nsect; i++)
1190 if (sechdrs[i].sh_flags & SHF_ALLOC) 1190 if (sechdrs[i].sh_flags & SHF_ALLOC
1191 && sechdrs[i].sh_size)
1191 nloaded++; 1192 nloaded++;
1192 size[0] = ALIGN(sizeof(*sect_attrs) 1193 size[0] = ALIGN(sizeof(*sect_attrs)
1193 + nloaded * sizeof(sect_attrs->attrs[0]), 1194 + nloaded * sizeof(sect_attrs->attrs[0]),
@@ -1207,6 +1208,8 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
1207 for (i = 0; i < nsect; i++) { 1208 for (i = 0; i < nsect; i++) {
1208 if (! (sechdrs[i].sh_flags & SHF_ALLOC)) 1209 if (! (sechdrs[i].sh_flags & SHF_ALLOC))
1209 continue; 1210 continue;
1211 if (!sechdrs[i].sh_size)
1212 continue;
1210 sattr->address = sechdrs[i].sh_addr; 1213 sattr->address = sechdrs[i].sh_addr;
1211 sattr->name = kstrdup(secstrings + sechdrs[i].sh_name, 1214 sattr->name = kstrdup(secstrings + sechdrs[i].sh_name,
1212 GFP_KERNEL); 1215 GFP_KERNEL);
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 947b3ad551f8..632f04c57d82 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -148,8 +148,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
148 148
149 preempt_disable(); 149 preempt_disable();
150 mutex_acquire(&lock->dep_map, subclass, 0, ip); 150 mutex_acquire(&lock->dep_map, subclass, 0, ip);
151#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && \ 151
152 !defined(CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES) 152#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
153 /* 153 /*
154 * Optimistic spinning. 154 * Optimistic spinning.
155 * 155 *
diff --git a/kernel/printk.c b/kernel/printk.c
index f38b07f78a4e..b5ac4d99c667 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -33,6 +33,7 @@
33#include <linux/bootmem.h> 33#include <linux/bootmem.h>
34#include <linux/syscalls.h> 34#include <linux/syscalls.h>
35#include <linux/kexec.h> 35#include <linux/kexec.h>
36#include <linux/ratelimit.h>
36 37
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
38 39
@@ -1376,11 +1377,11 @@ late_initcall(disable_boot_consoles);
1376 */ 1377 */
1377DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10); 1378DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
1378 1379
1379int printk_ratelimit(void) 1380int __printk_ratelimit(const char *func)
1380{ 1381{
1381 return __ratelimit(&printk_ratelimit_state); 1382 return ___ratelimit(&printk_ratelimit_state, func);
1382} 1383}
1383EXPORT_SYMBOL(printk_ratelimit); 1384EXPORT_SYMBOL(__printk_ratelimit);
1384 1385
1385/** 1386/**
1386 * printk_timed_ratelimit - caller-controlled printk ratelimiting 1387 * printk_timed_ratelimit - caller-controlled printk ratelimiting
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 400183346ad2..9b7fd4723878 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -44,7 +44,6 @@
44#include <linux/cpu.h> 44#include <linux/cpu.h>
45#include <linux/mutex.h> 45#include <linux/mutex.h>
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/kernel_stat.h>
48 47
49#ifdef CONFIG_DEBUG_LOCK_ALLOC 48#ifdef CONFIG_DEBUG_LOCK_ALLOC
50static struct lock_class_key rcu_lock_key; 49static struct lock_class_key rcu_lock_key;
@@ -53,8 +52,6 @@ struct lockdep_map rcu_lock_map =
53EXPORT_SYMBOL_GPL(rcu_lock_map); 52EXPORT_SYMBOL_GPL(rcu_lock_map);
54#endif 53#endif
55 54
56int rcu_scheduler_active __read_mostly;
57
58/* 55/*
59 * Awaken the corresponding synchronize_rcu() instance now that a 56 * Awaken the corresponding synchronize_rcu() instance now that a
60 * grace period has elapsed. 57 * grace period has elapsed.
@@ -66,122 +63,3 @@ void wakeme_after_rcu(struct rcu_head *head)
66 rcu = container_of(head, struct rcu_synchronize, head); 63 rcu = container_of(head, struct rcu_synchronize, head);
67 complete(&rcu->completion); 64 complete(&rcu->completion);
68} 65}
69
70#ifdef CONFIG_TREE_PREEMPT_RCU
71
72/**
73 * synchronize_rcu - wait until a grace period has elapsed.
74 *
75 * Control will return to the caller some time after a full grace
76 * period has elapsed, in other words after all currently executing RCU
77 * read-side critical sections have completed. RCU read-side critical
78 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
79 * and may be nested.
80 */
81void synchronize_rcu(void)
82{
83 struct rcu_synchronize rcu;
84
85 if (!rcu_scheduler_active)
86 return;
87
88 init_completion(&rcu.completion);
89 /* Will wake me after RCU finished. */
90 call_rcu(&rcu.head, wakeme_after_rcu);
91 /* Wait for it. */
92 wait_for_completion(&rcu.completion);
93}
94EXPORT_SYMBOL_GPL(synchronize_rcu);
95
96#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
97
98/**
99 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
100 *
101 * Control will return to the caller some time after a full rcu-sched
102 * grace period has elapsed, in other words after all currently executing
103 * rcu-sched read-side critical sections have completed. These read-side
104 * critical sections are delimited by rcu_read_lock_sched() and
105 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
106 * local_irq_disable(), and so on may be used in place of
107 * rcu_read_lock_sched().
108 *
109 * This means that all preempt_disable code sequences, including NMI and
110 * hardware-interrupt handlers, in progress on entry will have completed
111 * before this primitive returns. However, this does not guarantee that
112 * softirq handlers will have completed, since in some kernels, these
113 * handlers can run in process context, and can block.
114 *
115 * This primitive provides the guarantees made by the (now removed)
116 * synchronize_kernel() API. In contrast, synchronize_rcu() only
117 * guarantees that rcu_read_lock() sections will have completed.
118 * In "classic RCU", these two guarantees happen to be one and
119 * the same, but can differ in realtime RCU implementations.
120 */
121void synchronize_sched(void)
122{
123 struct rcu_synchronize rcu;
124
125 if (rcu_blocking_is_gp())
126 return;
127
128 init_completion(&rcu.completion);
129 /* Will wake me after RCU finished. */
130 call_rcu_sched(&rcu.head, wakeme_after_rcu);
131 /* Wait for it. */
132 wait_for_completion(&rcu.completion);
133}
134EXPORT_SYMBOL_GPL(synchronize_sched);
135
136/**
137 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
138 *
139 * Control will return to the caller some time after a full rcu_bh grace
140 * period has elapsed, in other words after all currently executing rcu_bh
141 * read-side critical sections have completed. RCU read-side critical
142 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
143 * and may be nested.
144 */
145void synchronize_rcu_bh(void)
146{
147 struct rcu_synchronize rcu;
148
149 if (rcu_blocking_is_gp())
150 return;
151
152 init_completion(&rcu.completion);
153 /* Will wake me after RCU finished. */
154 call_rcu_bh(&rcu.head, wakeme_after_rcu);
155 /* Wait for it. */
156 wait_for_completion(&rcu.completion);
157}
158EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
159
160static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
161 unsigned long action, void *hcpu)
162{
163 return rcu_cpu_notify(self, action, hcpu);
164}
165
166void __init rcu_init(void)
167{
168 int i;
169
170 __rcu_init();
171 cpu_notifier(rcu_barrier_cpu_hotplug, 0);
172
173 /*
174 * We don't need protection against CPU-hotplug here because
175 * this is called early in boot, before either interrupts
176 * or the scheduler are operational.
177 */
178 for_each_online_cpu(i)
179 rcu_barrier_cpu_hotplug(NULL, CPU_UP_PREPARE, (void *)(long)i);
180}
181
182void rcu_scheduler_starting(void)
183{
184 WARN_ON(num_online_cpus() != 1);
185 WARN_ON(nr_context_switches() > 0);
186 rcu_scheduler_active = 1;
187}
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
new file mode 100644
index 000000000000..9f6d9ff2572c
--- /dev/null
+++ b/kernel/rcutiny.c
@@ -0,0 +1,282 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
23 * Documentation/RCU
24 */
25#include <linux/moduleparam.h>
26#include <linux/completion.h>
27#include <linux/interrupt.h>
28#include <linux/notifier.h>
29#include <linux/rcupdate.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/mutex.h>
33#include <linux/sched.h>
34#include <linux/types.h>
35#include <linux/init.h>
36#include <linux/time.h>
37#include <linux/cpu.h>
38
39/* Global control variables for rcupdate callback mechanism. */
40struct rcu_ctrlblk {
41 struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
42 struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
43 struct rcu_head **curtail; /* ->next pointer of last CB. */
44};
45
46/* Definition for rcupdate control block. */
47static struct rcu_ctrlblk rcu_ctrlblk = {
48 .donetail = &rcu_ctrlblk.rcucblist,
49 .curtail = &rcu_ctrlblk.rcucblist,
50};
51
52static struct rcu_ctrlblk rcu_bh_ctrlblk = {
53 .donetail = &rcu_bh_ctrlblk.rcucblist,
54 .curtail = &rcu_bh_ctrlblk.rcucblist,
55};
56
57#ifdef CONFIG_NO_HZ
58
59static long rcu_dynticks_nesting = 1;
60
61/*
62 * Enter dynticks-idle mode, which is an extended quiescent state
63 * if we have fully entered that mode (i.e., if the new value of
64 * dynticks_nesting is zero).
65 */
66void rcu_enter_nohz(void)
67{
68 if (--rcu_dynticks_nesting == 0)
69 rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
70}
71
72/*
73 * Exit dynticks-idle mode, so that we are no longer in an extended
74 * quiescent state.
75 */
76void rcu_exit_nohz(void)
77{
78 rcu_dynticks_nesting++;
79}
80
81#endif /* #ifdef CONFIG_NO_HZ */
82
83/*
84 * Helper function for rcu_qsctr_inc() and rcu_bh_qsctr_inc().
85 * Also disable irqs to avoid confusion due to interrupt handlers
86 * invoking call_rcu().
87 */
88static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
89{
90 unsigned long flags;
91
92 local_irq_save(flags);
93 if (rcp->rcucblist != NULL &&
94 rcp->donetail != rcp->curtail) {
95 rcp->donetail = rcp->curtail;
96 local_irq_restore(flags);
97 return 1;
98 }
99 local_irq_restore(flags);
100
101 return 0;
102}
103
104/*
105 * Record an rcu quiescent state. And an rcu_bh quiescent state while we
106 * are at it, given that any rcu quiescent state is also an rcu_bh
107 * quiescent state. Use "+" instead of "||" to defeat short circuiting.
108 */
109void rcu_sched_qs(int cpu)
110{
111 if (rcu_qsctr_help(&rcu_ctrlblk) + rcu_qsctr_help(&rcu_bh_ctrlblk))
112 raise_softirq(RCU_SOFTIRQ);
113}
114
115/*
116 * Record an rcu_bh quiescent state.
117 */
118void rcu_bh_qs(int cpu)
119{
120 if (rcu_qsctr_help(&rcu_bh_ctrlblk))
121 raise_softirq(RCU_SOFTIRQ);
122}
123
124/*
125 * Check to see if the scheduling-clock interrupt came from an extended
126 * quiescent state, and, if so, tell RCU about it.
127 */
128void rcu_check_callbacks(int cpu, int user)
129{
130 if (user ||
131 (idle_cpu(cpu) &&
132 !in_softirq() &&
133 hardirq_count() <= (1 << HARDIRQ_SHIFT)))
134 rcu_sched_qs(cpu);
135 else if (!in_softirq())
136 rcu_bh_qs(cpu);
137}
138
139/*
140 * Helper function for rcu_process_callbacks() that operates on the
141 * specified rcu_ctrlkblk structure.
142 */
143static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
144{
145 struct rcu_head *next, *list;
146 unsigned long flags;
147
148 /* If no RCU callbacks ready to invoke, just return. */
149 if (&rcp->rcucblist == rcp->donetail)
150 return;
151
152 /* Move the ready-to-invoke callbacks to a local list. */
153 local_irq_save(flags);
154 list = rcp->rcucblist;
155 rcp->rcucblist = *rcp->donetail;
156 *rcp->donetail = NULL;
157 if (rcp->curtail == rcp->donetail)
158 rcp->curtail = &rcp->rcucblist;
159 rcp->donetail = &rcp->rcucblist;
160 local_irq_restore(flags);
161
162 /* Invoke the callbacks on the local list. */
163 while (list) {
164 next = list->next;
165 prefetch(next);
166 list->func(list);
167 list = next;
168 }
169}
170
171/*
172 * Invoke any callbacks whose grace period has completed.
173 */
174static void rcu_process_callbacks(struct softirq_action *unused)
175{
176 __rcu_process_callbacks(&rcu_ctrlblk);
177 __rcu_process_callbacks(&rcu_bh_ctrlblk);
178}
179
180/*
181 * Wait for a grace period to elapse. But it is illegal to invoke
182 * synchronize_sched() from within an RCU read-side critical section.
183 * Therefore, any legal call to synchronize_sched() is a quiescent
184 * state, and so on a UP system, synchronize_sched() need do nothing.
185 * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the
186 * benefits of doing might_sleep() to reduce latency.)
187 *
188 * Cool, huh? (Due to Josh Triplett.)
189 *
190 * But we want to make this a static inline later.
191 */
192void synchronize_sched(void)
193{
194 cond_resched();
195}
196EXPORT_SYMBOL_GPL(synchronize_sched);
197
198void synchronize_rcu_bh(void)
199{
200 synchronize_sched();
201}
202EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
203
204/*
205 * Helper function for call_rcu() and call_rcu_bh().
206 */
207static void __call_rcu(struct rcu_head *head,
208 void (*func)(struct rcu_head *rcu),
209 struct rcu_ctrlblk *rcp)
210{
211 unsigned long flags;
212
213 head->func = func;
214 head->next = NULL;
215
216 local_irq_save(flags);
217 *rcp->curtail = head;
218 rcp->curtail = &head->next;
219 local_irq_restore(flags);
220}
221
222/*
223 * Post an RCU callback to be invoked after the end of an RCU grace
224 * period. But since we have but one CPU, that would be after any
225 * quiescent state.
226 */
227void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
228{
229 __call_rcu(head, func, &rcu_ctrlblk);
230}
231EXPORT_SYMBOL_GPL(call_rcu);
232
233/*
234 * Post an RCU bottom-half callback to be invoked after any subsequent
235 * quiescent state.
236 */
237void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
238{
239 __call_rcu(head, func, &rcu_bh_ctrlblk);
240}
241EXPORT_SYMBOL_GPL(call_rcu_bh);
242
243void rcu_barrier(void)
244{
245 struct rcu_synchronize rcu;
246
247 init_completion(&rcu.completion);
248 /* Will wake me after RCU finished. */
249 call_rcu(&rcu.head, wakeme_after_rcu);
250 /* Wait for it. */
251 wait_for_completion(&rcu.completion);
252}
253EXPORT_SYMBOL_GPL(rcu_barrier);
254
255void rcu_barrier_bh(void)
256{
257 struct rcu_synchronize rcu;
258
259 init_completion(&rcu.completion);
260 /* Will wake me after RCU finished. */
261 call_rcu_bh(&rcu.head, wakeme_after_rcu);
262 /* Wait for it. */
263 wait_for_completion(&rcu.completion);
264}
265EXPORT_SYMBOL_GPL(rcu_barrier_bh);
266
267void rcu_barrier_sched(void)
268{
269 struct rcu_synchronize rcu;
270
271 init_completion(&rcu.completion);
272 /* Will wake me after RCU finished. */
273 call_rcu_sched(&rcu.head, wakeme_after_rcu);
274 /* Wait for it. */
275 wait_for_completion(&rcu.completion);
276}
277EXPORT_SYMBOL_GPL(rcu_barrier_sched);
278
279void __init rcu_init(void)
280{
281 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
282}
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 697c0a0229d4..a621a67ef4e3 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -327,6 +327,11 @@ rcu_torture_cb(struct rcu_head *p)
327 cur_ops->deferred_free(rp); 327 cur_ops->deferred_free(rp);
328} 328}
329 329
330static int rcu_no_completed(void)
331{
332 return 0;
333}
334
330static void rcu_torture_deferred_free(struct rcu_torture *p) 335static void rcu_torture_deferred_free(struct rcu_torture *p)
331{ 336{
332 call_rcu(&p->rtort_rcu, rcu_torture_cb); 337 call_rcu(&p->rtort_rcu, rcu_torture_cb);
@@ -388,6 +393,21 @@ static struct rcu_torture_ops rcu_sync_ops = {
388 .name = "rcu_sync" 393 .name = "rcu_sync"
389}; 394};
390 395
396static struct rcu_torture_ops rcu_expedited_ops = {
397 .init = rcu_sync_torture_init,
398 .cleanup = NULL,
399 .readlock = rcu_torture_read_lock,
400 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
401 .readunlock = rcu_torture_read_unlock,
402 .completed = rcu_no_completed,
403 .deferred_free = rcu_sync_torture_deferred_free,
404 .sync = synchronize_rcu_expedited,
405 .cb_barrier = NULL,
406 .stats = NULL,
407 .irq_capable = 1,
408 .name = "rcu_expedited"
409};
410
391/* 411/*
392 * Definitions for rcu_bh torture testing. 412 * Definitions for rcu_bh torture testing.
393 */ 413 */
@@ -547,6 +567,25 @@ static struct rcu_torture_ops srcu_ops = {
547 .name = "srcu" 567 .name = "srcu"
548}; 568};
549 569
570static void srcu_torture_synchronize_expedited(void)
571{
572 synchronize_srcu_expedited(&srcu_ctl);
573}
574
575static struct rcu_torture_ops srcu_expedited_ops = {
576 .init = srcu_torture_init,
577 .cleanup = srcu_torture_cleanup,
578 .readlock = srcu_torture_read_lock,
579 .read_delay = srcu_read_delay,
580 .readunlock = srcu_torture_read_unlock,
581 .completed = srcu_torture_completed,
582 .deferred_free = rcu_sync_torture_deferred_free,
583 .sync = srcu_torture_synchronize_expedited,
584 .cb_barrier = NULL,
585 .stats = srcu_torture_stats,
586 .name = "srcu_expedited"
587};
588
550/* 589/*
551 * Definitions for sched torture testing. 590 * Definitions for sched torture testing.
552 */ 591 */
@@ -562,11 +601,6 @@ static void sched_torture_read_unlock(int idx)
562 preempt_enable(); 601 preempt_enable();
563} 602}
564 603
565static int sched_torture_completed(void)
566{
567 return 0;
568}
569
570static void rcu_sched_torture_deferred_free(struct rcu_torture *p) 604static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
571{ 605{
572 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb); 606 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
@@ -583,7 +617,7 @@ static struct rcu_torture_ops sched_ops = {
583 .readlock = sched_torture_read_lock, 617 .readlock = sched_torture_read_lock,
584 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 618 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
585 .readunlock = sched_torture_read_unlock, 619 .readunlock = sched_torture_read_unlock,
586 .completed = sched_torture_completed, 620 .completed = rcu_no_completed,
587 .deferred_free = rcu_sched_torture_deferred_free, 621 .deferred_free = rcu_sched_torture_deferred_free,
588 .sync = sched_torture_synchronize, 622 .sync = sched_torture_synchronize,
589 .cb_barrier = rcu_barrier_sched, 623 .cb_barrier = rcu_barrier_sched,
@@ -592,13 +626,13 @@ static struct rcu_torture_ops sched_ops = {
592 .name = "sched" 626 .name = "sched"
593}; 627};
594 628
595static struct rcu_torture_ops sched_ops_sync = { 629static struct rcu_torture_ops sched_sync_ops = {
596 .init = rcu_sync_torture_init, 630 .init = rcu_sync_torture_init,
597 .cleanup = NULL, 631 .cleanup = NULL,
598 .readlock = sched_torture_read_lock, 632 .readlock = sched_torture_read_lock,
599 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 633 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
600 .readunlock = sched_torture_read_unlock, 634 .readunlock = sched_torture_read_unlock,
601 .completed = sched_torture_completed, 635 .completed = rcu_no_completed,
602 .deferred_free = rcu_sync_torture_deferred_free, 636 .deferred_free = rcu_sync_torture_deferred_free,
603 .sync = sched_torture_synchronize, 637 .sync = sched_torture_synchronize,
604 .cb_barrier = NULL, 638 .cb_barrier = NULL,
@@ -612,7 +646,7 @@ static struct rcu_torture_ops sched_expedited_ops = {
612 .readlock = sched_torture_read_lock, 646 .readlock = sched_torture_read_lock,
613 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 647 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
614 .readunlock = sched_torture_read_unlock, 648 .readunlock = sched_torture_read_unlock,
615 .completed = sched_torture_completed, 649 .completed = rcu_no_completed,
616 .deferred_free = rcu_sync_torture_deferred_free, 650 .deferred_free = rcu_sync_torture_deferred_free,
617 .sync = synchronize_sched_expedited, 651 .sync = synchronize_sched_expedited,
618 .cb_barrier = NULL, 652 .cb_barrier = NULL,
@@ -1097,9 +1131,10 @@ rcu_torture_init(void)
1097 int cpu; 1131 int cpu;
1098 int firsterr = 0; 1132 int firsterr = 0;
1099 static struct rcu_torture_ops *torture_ops[] = 1133 static struct rcu_torture_ops *torture_ops[] =
1100 { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, 1134 { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
1101 &sched_expedited_ops, 1135 &rcu_bh_ops, &rcu_bh_sync_ops,
1102 &srcu_ops, &sched_ops, &sched_ops_sync, }; 1136 &srcu_ops, &srcu_expedited_ops,
1137 &sched_ops, &sched_sync_ops, &sched_expedited_ops, };
1103 1138
1104 mutex_lock(&fullstop_mutex); 1139 mutex_lock(&fullstop_mutex);
1105 1140
@@ -1110,8 +1145,12 @@ rcu_torture_init(void)
1110 break; 1145 break;
1111 } 1146 }
1112 if (i == ARRAY_SIZE(torture_ops)) { 1147 if (i == ARRAY_SIZE(torture_ops)) {
1113 printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n", 1148 printk(KERN_ALERT "rcu-torture: invalid torture type: \"%s\"\n",
1114 torture_type); 1149 torture_type);
1150 printk(KERN_ALERT "rcu-torture types:");
1151 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1152 printk(KERN_ALERT " %s", torture_ops[i]->name);
1153 printk(KERN_ALERT "\n");
1115 mutex_unlock(&fullstop_mutex); 1154 mutex_unlock(&fullstop_mutex);
1116 return -EINVAL; 1155 return -EINVAL;
1117 } 1156 }
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index f3077c0ab181..53ae9598f798 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -46,18 +46,22 @@
46#include <linux/cpu.h> 46#include <linux/cpu.h>
47#include <linux/mutex.h> 47#include <linux/mutex.h>
48#include <linux/time.h> 48#include <linux/time.h>
49#include <linux/kernel_stat.h>
49 50
50#include "rcutree.h" 51#include "rcutree.h"
51 52
52/* Data structures. */ 53/* Data structures. */
53 54
55static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
56
54#define RCU_STATE_INITIALIZER(name) { \ 57#define RCU_STATE_INITIALIZER(name) { \
55 .level = { &name.node[0] }, \ 58 .level = { &name.node[0] }, \
56 .levelcnt = { \ 59 .levelcnt = { \
57 NUM_RCU_LVL_0, /* root of hierarchy. */ \ 60 NUM_RCU_LVL_0, /* root of hierarchy. */ \
58 NUM_RCU_LVL_1, \ 61 NUM_RCU_LVL_1, \
59 NUM_RCU_LVL_2, \ 62 NUM_RCU_LVL_2, \
60 NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ 63 NUM_RCU_LVL_3, \
64 NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \
61 }, \ 65 }, \
62 .signaled = RCU_GP_IDLE, \ 66 .signaled = RCU_GP_IDLE, \
63 .gpnum = -300, \ 67 .gpnum = -300, \
@@ -77,6 +81,8 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
77struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); 81struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
78DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); 82DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
79 83
84static int rcu_scheduler_active __read_mostly;
85
80 86
81/* 87/*
82 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s 88 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
@@ -98,7 +104,7 @@ void rcu_sched_qs(int cpu)
98 struct rcu_data *rdp; 104 struct rcu_data *rdp;
99 105
100 rdp = &per_cpu(rcu_sched_data, cpu); 106 rdp = &per_cpu(rcu_sched_data, cpu);
101 rdp->passed_quiesc_completed = rdp->completed; 107 rdp->passed_quiesc_completed = rdp->gpnum - 1;
102 barrier(); 108 barrier();
103 rdp->passed_quiesc = 1; 109 rdp->passed_quiesc = 1;
104 rcu_preempt_note_context_switch(cpu); 110 rcu_preempt_note_context_switch(cpu);
@@ -109,7 +115,7 @@ void rcu_bh_qs(int cpu)
109 struct rcu_data *rdp; 115 struct rcu_data *rdp;
110 116
111 rdp = &per_cpu(rcu_bh_data, cpu); 117 rdp = &per_cpu(rcu_bh_data, cpu);
112 rdp->passed_quiesc_completed = rdp->completed; 118 rdp->passed_quiesc_completed = rdp->gpnum - 1;
113 barrier(); 119 barrier();
114 rdp->passed_quiesc = 1; 120 rdp->passed_quiesc = 1;
115} 121}
@@ -335,28 +341,9 @@ void rcu_irq_exit(void)
335 set_need_resched(); 341 set_need_resched();
336} 342}
337 343
338/*
339 * Record the specified "completed" value, which is later used to validate
340 * dynticks counter manipulations. Specify "rsp->completed - 1" to
341 * unconditionally invalidate any future dynticks manipulations (which is
342 * useful at the beginning of a grace period).
343 */
344static void dyntick_record_completed(struct rcu_state *rsp, long comp)
345{
346 rsp->dynticks_completed = comp;
347}
348
349#ifdef CONFIG_SMP 344#ifdef CONFIG_SMP
350 345
351/* 346/*
352 * Recall the previously recorded value of the completion for dynticks.
353 */
354static long dyntick_recall_completed(struct rcu_state *rsp)
355{
356 return rsp->dynticks_completed;
357}
358
359/*
360 * Snapshot the specified CPU's dynticks counter so that we can later 347 * Snapshot the specified CPU's dynticks counter so that we can later
361 * credit them with an implicit quiescent state. Return 1 if this CPU 348 * credit them with an implicit quiescent state. Return 1 if this CPU
362 * is in dynticks idle mode, which is an extended quiescent state. 349 * is in dynticks idle mode, which is an extended quiescent state.
@@ -419,24 +406,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
419 406
420#else /* #ifdef CONFIG_NO_HZ */ 407#else /* #ifdef CONFIG_NO_HZ */
421 408
422static void dyntick_record_completed(struct rcu_state *rsp, long comp)
423{
424}
425
426#ifdef CONFIG_SMP 409#ifdef CONFIG_SMP
427 410
428/*
429 * If there are no dynticks, then the only way that a CPU can passively
430 * be in a quiescent state is to be offline. Unlike dynticks idle, which
431 * is a point in time during the prior (already finished) grace period,
432 * an offline CPU is always in a quiescent state, and thus can be
433 * unconditionally applied. So just return the current value of completed.
434 */
435static long dyntick_recall_completed(struct rcu_state *rsp)
436{
437 return rsp->completed;
438}
439
440static int dyntick_save_progress_counter(struct rcu_data *rdp) 411static int dyntick_save_progress_counter(struct rcu_data *rdp)
441{ 412{
442 return 0; 413 return 0;
@@ -553,13 +524,33 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
553/* 524/*
554 * Update CPU-local rcu_data state to record the newly noticed grace period. 525 * Update CPU-local rcu_data state to record the newly noticed grace period.
555 * This is used both when we started the grace period and when we notice 526 * This is used both when we started the grace period and when we notice
556 * that someone else started the grace period. 527 * that someone else started the grace period. The caller must hold the
528 * ->lock of the leaf rcu_node structure corresponding to the current CPU,
529 * and must have irqs disabled.
557 */ 530 */
531static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
532{
533 if (rdp->gpnum != rnp->gpnum) {
534 rdp->qs_pending = 1;
535 rdp->passed_quiesc = 0;
536 rdp->gpnum = rnp->gpnum;
537 }
538}
539
558static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) 540static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
559{ 541{
560 rdp->qs_pending = 1; 542 unsigned long flags;
561 rdp->passed_quiesc = 0; 543 struct rcu_node *rnp;
562 rdp->gpnum = rsp->gpnum; 544
545 local_irq_save(flags);
546 rnp = rdp->mynode;
547 if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */
548 !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */
549 local_irq_restore(flags);
550 return;
551 }
552 __note_new_gpnum(rsp, rnp, rdp);
553 spin_unlock_irqrestore(&rnp->lock, flags);
563} 554}
564 555
565/* 556/*
@@ -583,6 +574,79 @@ check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
583} 574}
584 575
585/* 576/*
577 * Advance this CPU's callbacks, but only if the current grace period
578 * has ended. This may be called only from the CPU to whom the rdp
579 * belongs. In addition, the corresponding leaf rcu_node structure's
580 * ->lock must be held by the caller, with irqs disabled.
581 */
582static void
583__rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
584{
585 /* Did another grace period end? */
586 if (rdp->completed != rnp->completed) {
587
588 /* Advance callbacks. No harm if list empty. */
589 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
590 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
591 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
592
593 /* Remember that we saw this grace-period completion. */
594 rdp->completed = rnp->completed;
595 }
596}
597
598/*
599 * Advance this CPU's callbacks, but only if the current grace period
600 * has ended. This may be called only from the CPU to whom the rdp
601 * belongs.
602 */
603static void
604rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
605{
606 unsigned long flags;
607 struct rcu_node *rnp;
608
609 local_irq_save(flags);
610 rnp = rdp->mynode;
611 if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */
612 !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */
613 local_irq_restore(flags);
614 return;
615 }
616 __rcu_process_gp_end(rsp, rnp, rdp);
617 spin_unlock_irqrestore(&rnp->lock, flags);
618}
619
620/*
621 * Do per-CPU grace-period initialization for running CPU. The caller
622 * must hold the lock of the leaf rcu_node structure corresponding to
623 * this CPU.
624 */
625static void
626rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
627{
628 /* Prior grace period ended, so advance callbacks for current CPU. */
629 __rcu_process_gp_end(rsp, rnp, rdp);
630
631 /*
632 * Because this CPU just now started the new grace period, we know
633 * that all of its callbacks will be covered by this upcoming grace
634 * period, even the ones that were registered arbitrarily recently.
635 * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
636 *
637 * Other CPUs cannot be sure exactly when the grace period started.
638 * Therefore, their recently registered callbacks must pass through
639 * an additional RCU_NEXT_READY stage, so that they will be handled
640 * by the next RCU grace period.
641 */
642 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
643 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
644
645 /* Set state so that this CPU will detect the next quiescent state. */
646 __note_new_gpnum(rsp, rnp, rdp);
647}
648
649/*
586 * Start a new RCU grace period if warranted, re-initializing the hierarchy 650 * Start a new RCU grace period if warranted, re-initializing the hierarchy
587 * in preparation for detecting the next grace period. The caller must hold 651 * in preparation for detecting the next grace period. The caller must hold
588 * the root node's ->lock, which is released before return. Hard irqs must 652 * the root node's ->lock, which is released before return. Hard irqs must
@@ -596,7 +660,23 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
596 struct rcu_node *rnp = rcu_get_root(rsp); 660 struct rcu_node *rnp = rcu_get_root(rsp);
597 661
598 if (!cpu_needs_another_gp(rsp, rdp)) { 662 if (!cpu_needs_another_gp(rsp, rdp)) {
599 spin_unlock_irqrestore(&rnp->lock, flags); 663 if (rnp->completed == rsp->completed) {
664 spin_unlock_irqrestore(&rnp->lock, flags);
665 return;
666 }
667 spin_unlock(&rnp->lock); /* irqs remain disabled. */
668
669 /*
670 * Propagate new ->completed value to rcu_node structures
671 * so that other CPUs don't have to wait until the start
672 * of the next grace period to process their callbacks.
673 */
674 rcu_for_each_node_breadth_first(rsp, rnp) {
675 spin_lock(&rnp->lock); /* irqs already disabled. */
676 rnp->completed = rsp->completed;
677 spin_unlock(&rnp->lock); /* irqs remain disabled. */
678 }
679 local_irq_restore(flags);
600 return; 680 return;
601 } 681 }
602 682
@@ -606,29 +686,15 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
606 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ 686 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
607 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 687 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
608 record_gp_stall_check_time(rsp); 688 record_gp_stall_check_time(rsp);
609 dyntick_record_completed(rsp, rsp->completed - 1);
610 note_new_gpnum(rsp, rdp);
611
612 /*
613 * Because this CPU just now started the new grace period, we know
614 * that all of its callbacks will be covered by this upcoming grace
615 * period, even the ones that were registered arbitrarily recently.
616 * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
617 *
618 * Other CPUs cannot be sure exactly when the grace period started.
619 * Therefore, their recently registered callbacks must pass through
620 * an additional RCU_NEXT_READY stage, so that they will be handled
621 * by the next RCU grace period.
622 */
623 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
624 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
625 689
626 /* Special-case the common single-level case. */ 690 /* Special-case the common single-level case. */
627 if (NUM_RCU_NODES == 1) { 691 if (NUM_RCU_NODES == 1) {
628 rcu_preempt_check_blocked_tasks(rnp); 692 rcu_preempt_check_blocked_tasks(rnp);
629 rnp->qsmask = rnp->qsmaskinit; 693 rnp->qsmask = rnp->qsmaskinit;
630 rnp->gpnum = rsp->gpnum; 694 rnp->gpnum = rsp->gpnum;
695 rnp->completed = rsp->completed;
631 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ 696 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
697 rcu_start_gp_per_cpu(rsp, rnp, rdp);
632 spin_unlock_irqrestore(&rnp->lock, flags); 698 spin_unlock_irqrestore(&rnp->lock, flags);
633 return; 699 return;
634 } 700 }
@@ -661,6 +727,9 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
661 rcu_preempt_check_blocked_tasks(rnp); 727 rcu_preempt_check_blocked_tasks(rnp);
662 rnp->qsmask = rnp->qsmaskinit; 728 rnp->qsmask = rnp->qsmaskinit;
663 rnp->gpnum = rsp->gpnum; 729 rnp->gpnum = rsp->gpnum;
730 rnp->completed = rsp->completed;
731 if (rnp == rdp->mynode)
732 rcu_start_gp_per_cpu(rsp, rnp, rdp);
664 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 733 spin_unlock(&rnp->lock); /* irqs remain disabled. */
665 } 734 }
666 735
@@ -672,58 +741,32 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
672} 741}
673 742
674/* 743/*
675 * Advance this CPU's callbacks, but only if the current grace period 744 * Report a full set of quiescent states to the specified rcu_state
676 * has ended. This may be called only from the CPU to whom the rdp 745 * data structure. This involves cleaning up after the prior grace
677 * belongs. 746 * period and letting rcu_start_gp() start up the next grace period
747 * if one is needed. Note that the caller must hold rnp->lock, as
748 * required by rcu_start_gp(), which will release it.
678 */ 749 */
679static void 750static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
680rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
681{
682 long completed_snap;
683 unsigned long flags;
684
685 local_irq_save(flags);
686 completed_snap = ACCESS_ONCE(rsp->completed); /* outside of lock. */
687
688 /* Did another grace period end? */
689 if (rdp->completed != completed_snap) {
690
691 /* Advance callbacks. No harm if list empty. */
692 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
693 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
694 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
695
696 /* Remember that we saw this grace-period completion. */
697 rdp->completed = completed_snap;
698 }
699 local_irq_restore(flags);
700}
701
702/*
703 * Clean up after the prior grace period and let rcu_start_gp() start up
704 * the next grace period if one is needed. Note that the caller must
705 * hold rnp->lock, as required by rcu_start_gp(), which will release it.
706 */
707static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
708 __releases(rcu_get_root(rsp)->lock) 751 __releases(rcu_get_root(rsp)->lock)
709{ 752{
710 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 753 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
711 rsp->completed = rsp->gpnum; 754 rsp->completed = rsp->gpnum;
712 rsp->signaled = RCU_GP_IDLE; 755 rsp->signaled = RCU_GP_IDLE;
713 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
714 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ 756 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
715} 757}
716 758
717/* 759/*
718 * Similar to cpu_quiet(), for which it is a helper function. Allows 760 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
719 * a group of CPUs to be quieted at one go, though all the CPUs in the 761 * Allows quiescent states for a group of CPUs to be reported at one go
720 * group must be represented by the same leaf rcu_node structure. 762 * to the specified rcu_node structure, though all the CPUs in the group
721 * That structure's lock must be held upon entry, and it is released 763 * must be represented by the same rcu_node structure (which need not be
722 * before return. 764 * a leaf rcu_node structure, though it often will be). That structure's
765 * lock must be held upon entry, and it is released before return.
723 */ 766 */
724static void 767static void
725cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, 768rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
726 unsigned long flags) 769 struct rcu_node *rnp, unsigned long flags)
727 __releases(rnp->lock) 770 __releases(rnp->lock)
728{ 771{
729 struct rcu_node *rnp_c; 772 struct rcu_node *rnp_c;
@@ -759,21 +802,23 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
759 802
760 /* 803 /*
761 * Get here if we are the last CPU to pass through a quiescent 804 * Get here if we are the last CPU to pass through a quiescent
762 * state for this grace period. Invoke cpu_quiet_msk_finish() 805 * state for this grace period. Invoke rcu_report_qs_rsp()
763 * to clean up and start the next grace period if one is needed. 806 * to clean up and start the next grace period if one is needed.
764 */ 807 */
765 cpu_quiet_msk_finish(rsp, flags); /* releases rnp->lock. */ 808 rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
766} 809}
767 810
768/* 811/*
769 * Record a quiescent state for the specified CPU, which must either be 812 * Record a quiescent state for the specified CPU to that CPU's rcu_data
770 * the current CPU. The lastcomp argument is used to make sure we are 813 * structure. This must be either called from the specified CPU, or
771 * still in the grace period of interest. We don't want to end the current 814 * called when the specified CPU is known to be offline (and when it is
772 * grace period based on quiescent states detected in an earlier grace 815 * also known that no other CPU is concurrently trying to help the offline
773 * period! 816 * CPU). The lastcomp argument is used to make sure we are still in the
817 * grace period of interest. We don't want to end the current grace period
818 * based on quiescent states detected in an earlier grace period!
774 */ 819 */
775static void 820static void
776cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) 821rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
777{ 822{
778 unsigned long flags; 823 unsigned long flags;
779 unsigned long mask; 824 unsigned long mask;
@@ -781,15 +826,15 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
781 826
782 rnp = rdp->mynode; 827 rnp = rdp->mynode;
783 spin_lock_irqsave(&rnp->lock, flags); 828 spin_lock_irqsave(&rnp->lock, flags);
784 if (lastcomp != ACCESS_ONCE(rsp->completed)) { 829 if (lastcomp != rnp->completed) {
785 830
786 /* 831 /*
787 * Someone beat us to it for this grace period, so leave. 832 * Someone beat us to it for this grace period, so leave.
788 * The race with GP start is resolved by the fact that we 833 * The race with GP start is resolved by the fact that we
789 * hold the leaf rcu_node lock, so that the per-CPU bits 834 * hold the leaf rcu_node lock, so that the per-CPU bits
790 * cannot yet be initialized -- so we would simply find our 835 * cannot yet be initialized -- so we would simply find our
791 * CPU's bit already cleared in cpu_quiet_msk() if this race 836 * CPU's bit already cleared in rcu_report_qs_rnp() if this
792 * occurred. 837 * race occurred.
793 */ 838 */
794 rdp->passed_quiesc = 0; /* try again later! */ 839 rdp->passed_quiesc = 0; /* try again later! */
795 spin_unlock_irqrestore(&rnp->lock, flags); 840 spin_unlock_irqrestore(&rnp->lock, flags);
@@ -807,7 +852,7 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
807 */ 852 */
808 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 853 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
809 854
810 cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */ 855 rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
811 } 856 }
812} 857}
813 858
@@ -838,8 +883,11 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
838 if (!rdp->passed_quiesc) 883 if (!rdp->passed_quiesc)
839 return; 884 return;
840 885
841 /* Tell RCU we are done (but cpu_quiet() will be the judge of that). */ 886 /*
842 cpu_quiet(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed); 887 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
888 * judge of that).
889 */
890 rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed);
843} 891}
844 892
845#ifdef CONFIG_HOTPLUG_CPU 893#ifdef CONFIG_HOTPLUG_CPU
@@ -899,8 +947,8 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
899static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) 947static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
900{ 948{
901 unsigned long flags; 949 unsigned long flags;
902 long lastcomp;
903 unsigned long mask; 950 unsigned long mask;
951 int need_report = 0;
904 struct rcu_data *rdp = rsp->rda[cpu]; 952 struct rcu_data *rdp = rsp->rda[cpu];
905 struct rcu_node *rnp; 953 struct rcu_node *rnp;
906 954
@@ -914,30 +962,32 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
914 spin_lock(&rnp->lock); /* irqs already disabled. */ 962 spin_lock(&rnp->lock); /* irqs already disabled. */
915 rnp->qsmaskinit &= ~mask; 963 rnp->qsmaskinit &= ~mask;
916 if (rnp->qsmaskinit != 0) { 964 if (rnp->qsmaskinit != 0) {
917 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 965 if (rnp != rdp->mynode)
966 spin_unlock(&rnp->lock); /* irqs remain disabled. */
918 break; 967 break;
919 } 968 }
920 969 if (rnp == rdp->mynode)
921 /* 970 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
922 * If there was a task blocking the current grace period, 971 else
923 * and if all CPUs have checked in, we need to propagate 972 spin_unlock(&rnp->lock); /* irqs remain disabled. */
924 * the quiescent state up the rcu_node hierarchy. But that
925 * is inconvenient at the moment due to deadlock issues if
926 * this should end the current grace period. So set the
927 * offlined CPU's bit in ->qsmask in order to force the
928 * next force_quiescent_state() invocation to clean up this
929 * mess in a deadlock-free manner.
930 */
931 if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask)
932 rnp->qsmask |= mask;
933
934 mask = rnp->grpmask; 973 mask = rnp->grpmask;
935 spin_unlock(&rnp->lock); /* irqs remain disabled. */
936 rnp = rnp->parent; 974 rnp = rnp->parent;
937 } while (rnp != NULL); 975 } while (rnp != NULL);
938 lastcomp = rsp->completed;
939 976
940 spin_unlock_irqrestore(&rsp->onofflock, flags); 977 /*
978 * We still hold the leaf rcu_node structure lock here, and
979 * irqs are still disabled. The reason for this subterfuge is
980 * because invoking rcu_report_unblock_qs_rnp() with ->onofflock
981 * held leads to deadlock.
982 */
983 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
984 rnp = rdp->mynode;
985 if (need_report & RCU_OFL_TASKS_NORM_GP)
986 rcu_report_unblock_qs_rnp(rnp, flags);
987 else
988 spin_unlock_irqrestore(&rnp->lock, flags);
989 if (need_report & RCU_OFL_TASKS_EXP_GP)
990 rcu_report_exp_rnp(rsp, rnp);
941 991
942 rcu_adopt_orphan_cbs(rsp); 992 rcu_adopt_orphan_cbs(rsp);
943} 993}
@@ -1109,7 +1159,7 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
1109 rcu_for_each_leaf_node(rsp, rnp) { 1159 rcu_for_each_leaf_node(rsp, rnp) {
1110 mask = 0; 1160 mask = 0;
1111 spin_lock_irqsave(&rnp->lock, flags); 1161 spin_lock_irqsave(&rnp->lock, flags);
1112 if (rsp->completed != lastcomp) { 1162 if (rnp->completed != lastcomp) {
1113 spin_unlock_irqrestore(&rnp->lock, flags); 1163 spin_unlock_irqrestore(&rnp->lock, flags);
1114 return 1; 1164 return 1;
1115 } 1165 }
@@ -1123,10 +1173,10 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
1123 if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu])) 1173 if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu]))
1124 mask |= bit; 1174 mask |= bit;
1125 } 1175 }
1126 if (mask != 0 && rsp->completed == lastcomp) { 1176 if (mask != 0 && rnp->completed == lastcomp) {
1127 1177
1128 /* cpu_quiet_msk() releases rnp->lock. */ 1178 /* rcu_report_qs_rnp() releases rnp->lock. */
1129 cpu_quiet_msk(mask, rsp, rnp, flags); 1179 rcu_report_qs_rnp(mask, rsp, rnp, flags);
1130 continue; 1180 continue;
1131 } 1181 }
1132 spin_unlock_irqrestore(&rnp->lock, flags); 1182 spin_unlock_irqrestore(&rnp->lock, flags);
@@ -1144,6 +1194,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1144 long lastcomp; 1194 long lastcomp;
1145 struct rcu_node *rnp = rcu_get_root(rsp); 1195 struct rcu_node *rnp = rcu_get_root(rsp);
1146 u8 signaled; 1196 u8 signaled;
1197 u8 forcenow;
1147 1198
1148 if (!rcu_gp_in_progress(rsp)) 1199 if (!rcu_gp_in_progress(rsp))
1149 return; /* No grace period in progress, nothing to force. */ 1200 return; /* No grace period in progress, nothing to force. */
@@ -1156,10 +1207,10 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1156 goto unlock_ret; /* no emergency and done recently. */ 1207 goto unlock_ret; /* no emergency and done recently. */
1157 rsp->n_force_qs++; 1208 rsp->n_force_qs++;
1158 spin_lock(&rnp->lock); 1209 spin_lock(&rnp->lock);
1159 lastcomp = rsp->completed; 1210 lastcomp = rsp->gpnum - 1;
1160 signaled = rsp->signaled; 1211 signaled = rsp->signaled;
1161 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 1212 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
1162 if (lastcomp == rsp->gpnum) { 1213 if(!rcu_gp_in_progress(rsp)) {
1163 rsp->n_force_qs_ngp++; 1214 rsp->n_force_qs_ngp++;
1164 spin_unlock(&rnp->lock); 1215 spin_unlock(&rnp->lock);
1165 goto unlock_ret; /* no GP in progress, time updated. */ 1216 goto unlock_ret; /* no GP in progress, time updated. */
@@ -1180,21 +1231,29 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1180 if (rcu_process_dyntick(rsp, lastcomp, 1231 if (rcu_process_dyntick(rsp, lastcomp,
1181 dyntick_save_progress_counter)) 1232 dyntick_save_progress_counter))
1182 goto unlock_ret; 1233 goto unlock_ret;
1234 /* fall into next case. */
1235
1236 case RCU_SAVE_COMPLETED:
1183 1237
1184 /* Update state, record completion counter. */ 1238 /* Update state, record completion counter. */
1239 forcenow = 0;
1185 spin_lock(&rnp->lock); 1240 spin_lock(&rnp->lock);
1186 if (lastcomp == rsp->completed && 1241 if (lastcomp + 1 == rsp->gpnum &&
1187 rsp->signaled == RCU_SAVE_DYNTICK) { 1242 lastcomp == rsp->completed &&
1243 rsp->signaled == signaled) {
1188 rsp->signaled = RCU_FORCE_QS; 1244 rsp->signaled = RCU_FORCE_QS;
1189 dyntick_record_completed(rsp, lastcomp); 1245 rsp->completed_fqs = lastcomp;
1246 forcenow = signaled == RCU_SAVE_COMPLETED;
1190 } 1247 }
1191 spin_unlock(&rnp->lock); 1248 spin_unlock(&rnp->lock);
1192 break; 1249 if (!forcenow)
1250 break;
1251 /* fall into next case. */
1193 1252
1194 case RCU_FORCE_QS: 1253 case RCU_FORCE_QS:
1195 1254
1196 /* Check dyntick-idle state, send IPI to laggarts. */ 1255 /* Check dyntick-idle state, send IPI to laggarts. */
1197 if (rcu_process_dyntick(rsp, dyntick_recall_completed(rsp), 1256 if (rcu_process_dyntick(rsp, rsp->completed_fqs,
1198 rcu_implicit_dynticks_qs)) 1257 rcu_implicit_dynticks_qs))
1199 goto unlock_ret; 1258 goto unlock_ret;
1200 1259
@@ -1351,6 +1410,68 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1351} 1410}
1352EXPORT_SYMBOL_GPL(call_rcu_bh); 1411EXPORT_SYMBOL_GPL(call_rcu_bh);
1353 1412
1413/**
1414 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
1415 *
1416 * Control will return to the caller some time after a full rcu-sched
1417 * grace period has elapsed, in other words after all currently executing
1418 * rcu-sched read-side critical sections have completed. These read-side
1419 * critical sections are delimited by rcu_read_lock_sched() and
1420 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
1421 * local_irq_disable(), and so on may be used in place of
1422 * rcu_read_lock_sched().
1423 *
1424 * This means that all preempt_disable code sequences, including NMI and
1425 * hardware-interrupt handlers, in progress on entry will have completed
1426 * before this primitive returns. However, this does not guarantee that
1427 * softirq handlers will have completed, since in some kernels, these
1428 * handlers can run in process context, and can block.
1429 *
1430 * This primitive provides the guarantees made by the (now removed)
1431 * synchronize_kernel() API. In contrast, synchronize_rcu() only
1432 * guarantees that rcu_read_lock() sections will have completed.
1433 * In "classic RCU", these two guarantees happen to be one and
1434 * the same, but can differ in realtime RCU implementations.
1435 */
1436void synchronize_sched(void)
1437{
1438 struct rcu_synchronize rcu;
1439
1440 if (rcu_blocking_is_gp())
1441 return;
1442
1443 init_completion(&rcu.completion);
1444 /* Will wake me after RCU finished. */
1445 call_rcu_sched(&rcu.head, wakeme_after_rcu);
1446 /* Wait for it. */
1447 wait_for_completion(&rcu.completion);
1448}
1449EXPORT_SYMBOL_GPL(synchronize_sched);
1450
1451/**
1452 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
1453 *
1454 * Control will return to the caller some time after a full rcu_bh grace
1455 * period has elapsed, in other words after all currently executing rcu_bh
1456 * read-side critical sections have completed. RCU read-side critical
1457 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
1458 * and may be nested.
1459 */
1460void synchronize_rcu_bh(void)
1461{
1462 struct rcu_synchronize rcu;
1463
1464 if (rcu_blocking_is_gp())
1465 return;
1466
1467 init_completion(&rcu.completion);
1468 /* Will wake me after RCU finished. */
1469 call_rcu_bh(&rcu.head, wakeme_after_rcu);
1470 /* Wait for it. */
1471 wait_for_completion(&rcu.completion);
1472}
1473EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
1474
1354/* 1475/*
1355 * Check to see if there is any immediate RCU-related work to be done 1476 * Check to see if there is any immediate RCU-related work to be done
1356 * by the current CPU, for the specified type of RCU, returning 1 if so. 1477 * by the current CPU, for the specified type of RCU, returning 1 if so.
@@ -1360,6 +1481,8 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
1360 */ 1481 */
1361static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) 1482static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1362{ 1483{
1484 struct rcu_node *rnp = rdp->mynode;
1485
1363 rdp->n_rcu_pending++; 1486 rdp->n_rcu_pending++;
1364 1487
1365 /* Check for CPU stalls, if enabled. */ 1488 /* Check for CPU stalls, if enabled. */
@@ -1384,13 +1507,13 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1384 } 1507 }
1385 1508
1386 /* Has another RCU grace period completed? */ 1509 /* Has another RCU grace period completed? */
1387 if (ACCESS_ONCE(rsp->completed) != rdp->completed) { /* outside lock */ 1510 if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
1388 rdp->n_rp_gp_completed++; 1511 rdp->n_rp_gp_completed++;
1389 return 1; 1512 return 1;
1390 } 1513 }
1391 1514
1392 /* Has a new RCU grace period started? */ 1515 /* Has a new RCU grace period started? */
1393 if (ACCESS_ONCE(rsp->gpnum) != rdp->gpnum) { /* outside lock */ 1516 if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
1394 rdp->n_rp_gp_started++; 1517 rdp->n_rp_gp_started++;
1395 return 1; 1518 return 1;
1396 } 1519 }
@@ -1433,6 +1556,21 @@ int rcu_needs_cpu(int cpu)
1433 rcu_preempt_needs_cpu(cpu); 1556 rcu_preempt_needs_cpu(cpu);
1434} 1557}
1435 1558
1559/*
1560 * This function is invoked towards the end of the scheduler's initialization
1561 * process. Before this is called, the idle task might contain
1562 * RCU read-side critical sections (during which time, this idle
1563 * task is booting the system). After this function is called, the
1564 * idle tasks are prohibited from containing RCU read-side critical
1565 * sections.
1566 */
1567void rcu_scheduler_starting(void)
1568{
1569 WARN_ON(num_online_cpus() != 1);
1570 WARN_ON(nr_context_switches() > 0);
1571 rcu_scheduler_active = 1;
1572}
1573
1436static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; 1574static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
1437static atomic_t rcu_barrier_cpu_count; 1575static atomic_t rcu_barrier_cpu_count;
1438static DEFINE_MUTEX(rcu_barrier_mutex); 1576static DEFINE_MUTEX(rcu_barrier_mutex);
@@ -1544,21 +1682,16 @@ static void __cpuinit
1544rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) 1682rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1545{ 1683{
1546 unsigned long flags; 1684 unsigned long flags;
1547 long lastcomp;
1548 unsigned long mask; 1685 unsigned long mask;
1549 struct rcu_data *rdp = rsp->rda[cpu]; 1686 struct rcu_data *rdp = rsp->rda[cpu];
1550 struct rcu_node *rnp = rcu_get_root(rsp); 1687 struct rcu_node *rnp = rcu_get_root(rsp);
1551 1688
1552 /* Set up local state, ensuring consistent view of global state. */ 1689 /* Set up local state, ensuring consistent view of global state. */
1553 spin_lock_irqsave(&rnp->lock, flags); 1690 spin_lock_irqsave(&rnp->lock, flags);
1554 lastcomp = rsp->completed;
1555 rdp->completed = lastcomp;
1556 rdp->gpnum = lastcomp;
1557 rdp->passed_quiesc = 0; /* We could be racing with new GP, */ 1691 rdp->passed_quiesc = 0; /* We could be racing with new GP, */
1558 rdp->qs_pending = 1; /* so set up to respond to current GP. */ 1692 rdp->qs_pending = 1; /* so set up to respond to current GP. */
1559 rdp->beenonline = 1; /* We have now been online. */ 1693 rdp->beenonline = 1; /* We have now been online. */
1560 rdp->preemptable = preemptable; 1694 rdp->preemptable = preemptable;
1561 rdp->passed_quiesc_completed = lastcomp - 1;
1562 rdp->qlen_last_fqs_check = 0; 1695 rdp->qlen_last_fqs_check = 0;
1563 rdp->n_force_qs_snap = rsp->n_force_qs; 1696 rdp->n_force_qs_snap = rsp->n_force_qs;
1564 rdp->blimit = blimit; 1697 rdp->blimit = blimit;
@@ -1580,6 +1713,11 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1580 spin_lock(&rnp->lock); /* irqs already disabled. */ 1713 spin_lock(&rnp->lock); /* irqs already disabled. */
1581 rnp->qsmaskinit |= mask; 1714 rnp->qsmaskinit |= mask;
1582 mask = rnp->grpmask; 1715 mask = rnp->grpmask;
1716 if (rnp == rdp->mynode) {
1717 rdp->gpnum = rnp->completed; /* if GP in progress... */
1718 rdp->completed = rnp->completed;
1719 rdp->passed_quiesc_completed = rnp->completed - 1;
1720 }
1583 spin_unlock(&rnp->lock); /* irqs already disabled. */ 1721 spin_unlock(&rnp->lock); /* irqs already disabled. */
1584 rnp = rnp->parent; 1722 rnp = rnp->parent;
1585 } while (rnp != NULL && !(rnp->qsmaskinit & mask)); 1723 } while (rnp != NULL && !(rnp->qsmaskinit & mask));
@@ -1597,8 +1735,8 @@ static void __cpuinit rcu_online_cpu(int cpu)
1597/* 1735/*
1598 * Handle CPU online/offline notification events. 1736 * Handle CPU online/offline notification events.
1599 */ 1737 */
1600int __cpuinit rcu_cpu_notify(struct notifier_block *self, 1738static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1601 unsigned long action, void *hcpu) 1739 unsigned long action, void *hcpu)
1602{ 1740{
1603 long cpu = (long)hcpu; 1741 long cpu = (long)hcpu;
1604 1742
@@ -1685,8 +1823,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1685 cpustride *= rsp->levelspread[i]; 1823 cpustride *= rsp->levelspread[i];
1686 rnp = rsp->level[i]; 1824 rnp = rsp->level[i];
1687 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { 1825 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
1688 if (rnp != rcu_get_root(rsp)) 1826 spin_lock_init(&rnp->lock);
1689 spin_lock_init(&rnp->lock); 1827 lockdep_set_class(&rnp->lock, &rcu_node_class[i]);
1690 rnp->gpnum = 0; 1828 rnp->gpnum = 0;
1691 rnp->qsmask = 0; 1829 rnp->qsmask = 0;
1692 rnp->qsmaskinit = 0; 1830 rnp->qsmaskinit = 0;
@@ -1707,9 +1845,10 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1707 rnp->level = i; 1845 rnp->level = i;
1708 INIT_LIST_HEAD(&rnp->blocked_tasks[0]); 1846 INIT_LIST_HEAD(&rnp->blocked_tasks[0]);
1709 INIT_LIST_HEAD(&rnp->blocked_tasks[1]); 1847 INIT_LIST_HEAD(&rnp->blocked_tasks[1]);
1848 INIT_LIST_HEAD(&rnp->blocked_tasks[2]);
1849 INIT_LIST_HEAD(&rnp->blocked_tasks[3]);
1710 } 1850 }
1711 } 1851 }
1712 spin_lock_init(&rcu_get_root(rsp)->lock);
1713} 1852}
1714 1853
1715/* 1854/*
@@ -1735,16 +1874,30 @@ do { \
1735 } \ 1874 } \
1736} while (0) 1875} while (0)
1737 1876
1738void __init __rcu_init(void) 1877void __init rcu_init(void)
1739{ 1878{
1879 int i;
1880
1740 rcu_bootup_announce(); 1881 rcu_bootup_announce();
1741#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 1882#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
1742 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); 1883 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
1743#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 1884#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
1885#if NUM_RCU_LVL_4 != 0
1886 printk(KERN_INFO "Experimental four-level hierarchy is enabled.\n");
1887#endif /* #if NUM_RCU_LVL_4 != 0 */
1744 RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); 1888 RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data);
1745 RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); 1889 RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data);
1746 __rcu_init_preempt(); 1890 __rcu_init_preempt();
1747 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 1891 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
1892
1893 /*
1894 * We don't need protection against CPU-hotplug here because
1895 * this is called early in boot, before either interrupts
1896 * or the scheduler are operational.
1897 */
1898 cpu_notifier(rcu_cpu_notify, 0);
1899 for_each_online_cpu(i)
1900 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)i);
1748} 1901}
1749 1902
1750#include "rcutree_plugin.h" 1903#include "rcutree_plugin.h"
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 1899023b0962..d2a0046f63b2 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -34,10 +34,11 @@
34 * In practice, this has not been tested, so there is probably some 34 * In practice, this has not been tested, so there is probably some
35 * bug somewhere. 35 * bug somewhere.
36 */ 36 */
37#define MAX_RCU_LVLS 3 37#define MAX_RCU_LVLS 4
38#define RCU_FANOUT (CONFIG_RCU_FANOUT) 38#define RCU_FANOUT (CONFIG_RCU_FANOUT)
39#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT) 39#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT)
40#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT) 40#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT)
41#define RCU_FANOUT_FOURTH (RCU_FANOUT_CUBE * RCU_FANOUT)
41 42
42#if NR_CPUS <= RCU_FANOUT 43#if NR_CPUS <= RCU_FANOUT
43# define NUM_RCU_LVLS 1 44# define NUM_RCU_LVLS 1
@@ -45,23 +46,33 @@
45# define NUM_RCU_LVL_1 (NR_CPUS) 46# define NUM_RCU_LVL_1 (NR_CPUS)
46# define NUM_RCU_LVL_2 0 47# define NUM_RCU_LVL_2 0
47# define NUM_RCU_LVL_3 0 48# define NUM_RCU_LVL_3 0
49# define NUM_RCU_LVL_4 0
48#elif NR_CPUS <= RCU_FANOUT_SQ 50#elif NR_CPUS <= RCU_FANOUT_SQ
49# define NUM_RCU_LVLS 2 51# define NUM_RCU_LVLS 2
50# define NUM_RCU_LVL_0 1 52# define NUM_RCU_LVL_0 1
51# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) 53# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
52# define NUM_RCU_LVL_2 (NR_CPUS) 54# define NUM_RCU_LVL_2 (NR_CPUS)
53# define NUM_RCU_LVL_3 0 55# define NUM_RCU_LVL_3 0
56# define NUM_RCU_LVL_4 0
54#elif NR_CPUS <= RCU_FANOUT_CUBE 57#elif NR_CPUS <= RCU_FANOUT_CUBE
55# define NUM_RCU_LVLS 3 58# define NUM_RCU_LVLS 3
56# define NUM_RCU_LVL_0 1 59# define NUM_RCU_LVL_0 1
57# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ) 60# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ)
58# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT) 61# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
59# define NUM_RCU_LVL_3 NR_CPUS 62# define NUM_RCU_LVL_3 NR_CPUS
63# define NUM_RCU_LVL_4 0
64#elif NR_CPUS <= RCU_FANOUT_FOURTH
65# define NUM_RCU_LVLS 4
66# define NUM_RCU_LVL_0 1
67# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_CUBE)
68# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ)
69# define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
70# define NUM_RCU_LVL_4 NR_CPUS
60#else 71#else
61# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" 72# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
62#endif /* #if (NR_CPUS) <= RCU_FANOUT */ 73#endif /* #if (NR_CPUS) <= RCU_FANOUT */
63 74
64#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3) 75#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4)
65#define NUM_RCU_NODES (RCU_SUM - NR_CPUS) 76#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
66 77
67/* 78/*
@@ -84,14 +95,21 @@ struct rcu_node {
84 long gpnum; /* Current grace period for this node. */ 95 long gpnum; /* Current grace period for this node. */
85 /* This will either be equal to or one */ 96 /* This will either be equal to or one */
86 /* behind the root rcu_node's gpnum. */ 97 /* behind the root rcu_node's gpnum. */
98 long completed; /* Last grace period completed for this node. */
99 /* This will either be equal to or one */
100 /* behind the root rcu_node's gpnum. */
87 unsigned long qsmask; /* CPUs or groups that need to switch in */ 101 unsigned long qsmask; /* CPUs or groups that need to switch in */
88 /* order for current grace period to proceed.*/ 102 /* order for current grace period to proceed.*/
89 /* In leaf rcu_node, each bit corresponds to */ 103 /* In leaf rcu_node, each bit corresponds to */
90 /* an rcu_data structure, otherwise, each */ 104 /* an rcu_data structure, otherwise, each */
91 /* bit corresponds to a child rcu_node */ 105 /* bit corresponds to a child rcu_node */
92 /* structure. */ 106 /* structure. */
107 unsigned long expmask; /* Groups that have ->blocked_tasks[] */
108 /* elements that need to drain to allow the */
109 /* current expedited grace period to */
110 /* complete (only for TREE_PREEMPT_RCU). */
93 unsigned long qsmaskinit; 111 unsigned long qsmaskinit;
94 /* Per-GP initialization for qsmask. */ 112 /* Per-GP initial value for qsmask & expmask. */
95 unsigned long grpmask; /* Mask to apply to parent qsmask. */ 113 unsigned long grpmask; /* Mask to apply to parent qsmask. */
96 /* Only one bit will be set in this mask. */ 114 /* Only one bit will be set in this mask. */
97 int grplo; /* lowest-numbered CPU or group here. */ 115 int grplo; /* lowest-numbered CPU or group here. */
@@ -99,7 +117,7 @@ struct rcu_node {
99 u8 grpnum; /* CPU/group number for next level up. */ 117 u8 grpnum; /* CPU/group number for next level up. */
100 u8 level; /* root is at level 0. */ 118 u8 level; /* root is at level 0. */
101 struct rcu_node *parent; 119 struct rcu_node *parent;
102 struct list_head blocked_tasks[2]; 120 struct list_head blocked_tasks[4];
103 /* Tasks blocked in RCU read-side critsect. */ 121 /* Tasks blocked in RCU read-side critsect. */
104 /* Grace period number (->gpnum) x blocked */ 122 /* Grace period number (->gpnum) x blocked */
105 /* by tasks on the (x & 0x1) element of the */ 123 /* by tasks on the (x & 0x1) element of the */
@@ -114,6 +132,21 @@ struct rcu_node {
114 for ((rnp) = &(rsp)->node[0]; \ 132 for ((rnp) = &(rsp)->node[0]; \
115 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) 133 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
116 134
135/*
136 * Do a breadth-first scan of the non-leaf rcu_node structures for the
137 * specified rcu_state structure. Note that if there is a singleton
138 * rcu_node tree with but one rcu_node structure, this loop is a no-op.
139 */
140#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
141 for ((rnp) = &(rsp)->node[0]; \
142 (rnp) < (rsp)->level[NUM_RCU_LVLS - 1]; (rnp)++)
143
144/*
145 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
146 * structure. Note that if there is a singleton rcu_node tree with but
147 * one rcu_node structure, this loop -will- visit the rcu_node structure.
148 * It is still a leaf node, even if it is also the root node.
149 */
117#define rcu_for_each_leaf_node(rsp, rnp) \ 150#define rcu_for_each_leaf_node(rsp, rnp) \
118 for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \ 151 for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \
119 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) 152 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
@@ -204,11 +237,12 @@ struct rcu_data {
204#define RCU_GP_IDLE 0 /* No grace period in progress. */ 237#define RCU_GP_IDLE 0 /* No grace period in progress. */
205#define RCU_GP_INIT 1 /* Grace period being initialized. */ 238#define RCU_GP_INIT 1 /* Grace period being initialized. */
206#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */ 239#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
207#define RCU_FORCE_QS 3 /* Need to force quiescent state. */ 240#define RCU_SAVE_COMPLETED 3 /* Need to save rsp->completed. */
241#define RCU_FORCE_QS 4 /* Need to force quiescent state. */
208#ifdef CONFIG_NO_HZ 242#ifdef CONFIG_NO_HZ
209#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK 243#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
210#else /* #ifdef CONFIG_NO_HZ */ 244#else /* #ifdef CONFIG_NO_HZ */
211#define RCU_SIGNAL_INIT RCU_FORCE_QS 245#define RCU_SIGNAL_INIT RCU_SAVE_COMPLETED
212#endif /* #else #ifdef CONFIG_NO_HZ */ 246#endif /* #else #ifdef CONFIG_NO_HZ */
213 247
214#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ 248#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
@@ -246,7 +280,7 @@ struct rcu_state {
246 long gpnum; /* Current gp number. */ 280 long gpnum; /* Current gp number. */
247 long completed; /* # of last completed gp. */ 281 long completed; /* # of last completed gp. */
248 282
249 /* End of fields guarded by root rcu_node's lock. */ 283 /* End of fields guarded by root rcu_node's lock. */
250 284
251 spinlock_t onofflock; /* exclude on/offline and */ 285 spinlock_t onofflock; /* exclude on/offline and */
252 /* starting new GP. Also */ 286 /* starting new GP. Also */
@@ -260,6 +294,8 @@ struct rcu_state {
260 long orphan_qlen; /* Number of orphaned cbs. */ 294 long orphan_qlen; /* Number of orphaned cbs. */
261 spinlock_t fqslock; /* Only one task forcing */ 295 spinlock_t fqslock; /* Only one task forcing */
262 /* quiescent states. */ 296 /* quiescent states. */
297 long completed_fqs; /* Value of completed @ snap. */
298 /* Protected by fqslock. */
263 unsigned long jiffies_force_qs; /* Time at which to invoke */ 299 unsigned long jiffies_force_qs; /* Time at which to invoke */
264 /* force_quiescent_state(). */ 300 /* force_quiescent_state(). */
265 unsigned long n_force_qs; /* Number of calls to */ 301 unsigned long n_force_qs; /* Number of calls to */
@@ -274,11 +310,15 @@ struct rcu_state {
274 unsigned long jiffies_stall; /* Time at which to check */ 310 unsigned long jiffies_stall; /* Time at which to check */
275 /* for CPU stalls. */ 311 /* for CPU stalls. */
276#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 312#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
277#ifdef CONFIG_NO_HZ
278 long dynticks_completed; /* Value of completed @ snap. */
279#endif /* #ifdef CONFIG_NO_HZ */
280}; 313};
281 314
315/* Return values for rcu_preempt_offline_tasks(). */
316
317#define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */
318 /* GP were moved to root. */
319#define RCU_OFL_TASKS_EXP_GP 0x2 /* Tasks blocking expedited */
320 /* GP were moved to root. */
321
282#ifdef RCU_TREE_NONCORE 322#ifdef RCU_TREE_NONCORE
283 323
284/* 324/*
@@ -298,10 +338,14 @@ DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
298#else /* #ifdef RCU_TREE_NONCORE */ 338#else /* #ifdef RCU_TREE_NONCORE */
299 339
300/* Forward declarations for rcutree_plugin.h */ 340/* Forward declarations for rcutree_plugin.h */
301static inline void rcu_bootup_announce(void); 341static void rcu_bootup_announce(void);
302long rcu_batches_completed(void); 342long rcu_batches_completed(void);
303static void rcu_preempt_note_context_switch(int cpu); 343static void rcu_preempt_note_context_switch(int cpu);
304static int rcu_preempted_readers(struct rcu_node *rnp); 344static int rcu_preempted_readers(struct rcu_node *rnp);
345#ifdef CONFIG_HOTPLUG_CPU
346static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
347 unsigned long flags);
348#endif /* #ifdef CONFIG_HOTPLUG_CPU */
305#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 349#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
306static void rcu_print_task_stall(struct rcu_node *rnp); 350static void rcu_print_task_stall(struct rcu_node *rnp);
307#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 351#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
@@ -315,6 +359,9 @@ static void rcu_preempt_offline_cpu(int cpu);
315static void rcu_preempt_check_callbacks(int cpu); 359static void rcu_preempt_check_callbacks(int cpu);
316static void rcu_preempt_process_callbacks(void); 360static void rcu_preempt_process_callbacks(void);
317void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); 361void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
362#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
363static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp);
364#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */
318static int rcu_preempt_pending(int cpu); 365static int rcu_preempt_pending(int cpu);
319static int rcu_preempt_needs_cpu(int cpu); 366static int rcu_preempt_needs_cpu(int cpu);
320static void __cpuinit rcu_preempt_init_percpu_data(int cpu); 367static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index ef2a58c2b9d5..37fbccdf41d5 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -24,16 +24,19 @@
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> 24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 */ 25 */
26 26
27#include <linux/delay.h>
27 28
28#ifdef CONFIG_TREE_PREEMPT_RCU 29#ifdef CONFIG_TREE_PREEMPT_RCU
29 30
30struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); 31struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
31DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); 32DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
32 33
34static int rcu_preempted_readers_exp(struct rcu_node *rnp);
35
33/* 36/*
34 * Tell them what RCU they are running. 37 * Tell them what RCU they are running.
35 */ 38 */
36static inline void rcu_bootup_announce(void) 39static void __init rcu_bootup_announce(void)
37{ 40{
38 printk(KERN_INFO 41 printk(KERN_INFO
39 "Experimental preemptable hierarchical RCU implementation.\n"); 42 "Experimental preemptable hierarchical RCU implementation.\n");
@@ -67,7 +70,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed);
67static void rcu_preempt_qs(int cpu) 70static void rcu_preempt_qs(int cpu)
68{ 71{
69 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); 72 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
70 rdp->passed_quiesc_completed = rdp->completed; 73 rdp->passed_quiesc_completed = rdp->gpnum - 1;
71 barrier(); 74 barrier();
72 rdp->passed_quiesc = 1; 75 rdp->passed_quiesc = 1;
73} 76}
@@ -157,14 +160,58 @@ EXPORT_SYMBOL_GPL(__rcu_read_lock);
157 */ 160 */
158static int rcu_preempted_readers(struct rcu_node *rnp) 161static int rcu_preempted_readers(struct rcu_node *rnp)
159{ 162{
160 return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); 163 int phase = rnp->gpnum & 0x1;
164
165 return !list_empty(&rnp->blocked_tasks[phase]) ||
166 !list_empty(&rnp->blocked_tasks[phase + 2]);
167}
168
169/*
170 * Record a quiescent state for all tasks that were previously queued
171 * on the specified rcu_node structure and that were blocking the current
172 * RCU grace period. The caller must hold the specified rnp->lock with
173 * irqs disabled, and this lock is released upon return, but irqs remain
174 * disabled.
175 */
176static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
177 __releases(rnp->lock)
178{
179 unsigned long mask;
180 struct rcu_node *rnp_p;
181
182 if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) {
183 spin_unlock_irqrestore(&rnp->lock, flags);
184 return; /* Still need more quiescent states! */
185 }
186
187 rnp_p = rnp->parent;
188 if (rnp_p == NULL) {
189 /*
190 * Either there is only one rcu_node in the tree,
191 * or tasks were kicked up to root rcu_node due to
192 * CPUs going offline.
193 */
194 rcu_report_qs_rsp(&rcu_preempt_state, flags);
195 return;
196 }
197
198 /* Report up the rest of the hierarchy. */
199 mask = rnp->grpmask;
200 spin_unlock(&rnp->lock); /* irqs remain disabled. */
201 spin_lock(&rnp_p->lock); /* irqs already disabled. */
202 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
161} 203}
162 204
205/*
206 * Handle special cases during rcu_read_unlock(), such as needing to
207 * notify RCU core processing or task having blocked during the RCU
208 * read-side critical section.
209 */
163static void rcu_read_unlock_special(struct task_struct *t) 210static void rcu_read_unlock_special(struct task_struct *t)
164{ 211{
165 int empty; 212 int empty;
213 int empty_exp;
166 unsigned long flags; 214 unsigned long flags;
167 unsigned long mask;
168 struct rcu_node *rnp; 215 struct rcu_node *rnp;
169 int special; 216 int special;
170 217
@@ -207,36 +254,30 @@ static void rcu_read_unlock_special(struct task_struct *t)
207 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 254 spin_unlock(&rnp->lock); /* irqs remain disabled. */
208 } 255 }
209 empty = !rcu_preempted_readers(rnp); 256 empty = !rcu_preempted_readers(rnp);
257 empty_exp = !rcu_preempted_readers_exp(rnp);
258 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
210 list_del_init(&t->rcu_node_entry); 259 list_del_init(&t->rcu_node_entry);
211 t->rcu_blocked_node = NULL; 260 t->rcu_blocked_node = NULL;
212 261
213 /* 262 /*
214 * If this was the last task on the current list, and if 263 * If this was the last task on the current list, and if
215 * we aren't waiting on any CPUs, report the quiescent state. 264 * we aren't waiting on any CPUs, report the quiescent state.
216 * Note that both cpu_quiet_msk_finish() and cpu_quiet_msk() 265 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
217 * drop rnp->lock and restore irq.
218 */ 266 */
219 if (!empty && rnp->qsmask == 0 && 267 if (empty)
220 !rcu_preempted_readers(rnp)) {
221 struct rcu_node *rnp_p;
222
223 if (rnp->parent == NULL) {
224 /* Only one rcu_node in the tree. */
225 cpu_quiet_msk_finish(&rcu_preempt_state, flags);
226 return;
227 }
228 /* Report up the rest of the hierarchy. */
229 mask = rnp->grpmask;
230 spin_unlock_irqrestore(&rnp->lock, flags); 268 spin_unlock_irqrestore(&rnp->lock, flags);
231 rnp_p = rnp->parent; 269 else
232 spin_lock_irqsave(&rnp_p->lock, flags); 270 rcu_report_unblock_qs_rnp(rnp, flags);
233 WARN_ON_ONCE(rnp->qsmask); 271
234 cpu_quiet_msk(mask, &rcu_preempt_state, rnp_p, flags); 272 /*
235 return; 273 * If this was the last task on the expedited lists,
236 } 274 * then we need to report up the rcu_node hierarchy.
237 spin_unlock(&rnp->lock); 275 */
276 if (!empty_exp && !rcu_preempted_readers_exp(rnp))
277 rcu_report_exp_rnp(&rcu_preempt_state, rnp);
278 } else {
279 local_irq_restore(flags);
238 } 280 }
239 local_irq_restore(flags);
240} 281}
241 282
242/* 283/*
@@ -303,6 +344,8 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
303 * rcu_node. The reason for not just moving them to the immediate 344 * rcu_node. The reason for not just moving them to the immediate
304 * parent is to remove the need for rcu_read_unlock_special() to 345 * parent is to remove the need for rcu_read_unlock_special() to
305 * make more than two attempts to acquire the target rcu_node's lock. 346 * make more than two attempts to acquire the target rcu_node's lock.
347 * Returns true if there were tasks blocking the current RCU grace
348 * period.
306 * 349 *
307 * Returns 1 if there was previously a task blocking the current grace 350 * Returns 1 if there was previously a task blocking the current grace
308 * period on the specified rcu_node structure. 351 * period on the specified rcu_node structure.
@@ -316,7 +359,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
316 int i; 359 int i;
317 struct list_head *lp; 360 struct list_head *lp;
318 struct list_head *lp_root; 361 struct list_head *lp_root;
319 int retval = rcu_preempted_readers(rnp); 362 int retval = 0;
320 struct rcu_node *rnp_root = rcu_get_root(rsp); 363 struct rcu_node *rnp_root = rcu_get_root(rsp);
321 struct task_struct *tp; 364 struct task_struct *tp;
322 365
@@ -326,7 +369,9 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
326 } 369 }
327 WARN_ON_ONCE(rnp != rdp->mynode && 370 WARN_ON_ONCE(rnp != rdp->mynode &&
328 (!list_empty(&rnp->blocked_tasks[0]) || 371 (!list_empty(&rnp->blocked_tasks[0]) ||
329 !list_empty(&rnp->blocked_tasks[1]))); 372 !list_empty(&rnp->blocked_tasks[1]) ||
373 !list_empty(&rnp->blocked_tasks[2]) ||
374 !list_empty(&rnp->blocked_tasks[3])));
330 375
331 /* 376 /*
332 * Move tasks up to root rcu_node. Rely on the fact that the 377 * Move tasks up to root rcu_node. Rely on the fact that the
@@ -334,7 +379,11 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
334 * rcu_nodes in terms of gp_num value. This fact allows us to 379 * rcu_nodes in terms of gp_num value. This fact allows us to
335 * move the blocked_tasks[] array directly, element by element. 380 * move the blocked_tasks[] array directly, element by element.
336 */ 381 */
337 for (i = 0; i < 2; i++) { 382 if (rcu_preempted_readers(rnp))
383 retval |= RCU_OFL_TASKS_NORM_GP;
384 if (rcu_preempted_readers_exp(rnp))
385 retval |= RCU_OFL_TASKS_EXP_GP;
386 for (i = 0; i < 4; i++) {
338 lp = &rnp->blocked_tasks[i]; 387 lp = &rnp->blocked_tasks[i];
339 lp_root = &rnp_root->blocked_tasks[i]; 388 lp_root = &rnp_root->blocked_tasks[i];
340 while (!list_empty(lp)) { 389 while (!list_empty(lp)) {
@@ -346,7 +395,6 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
346 spin_unlock(&rnp_root->lock); /* irqs remain disabled */ 395 spin_unlock(&rnp_root->lock); /* irqs remain disabled */
347 } 396 }
348 } 397 }
349
350 return retval; 398 return retval;
351} 399}
352 400
@@ -398,14 +446,183 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
398} 446}
399EXPORT_SYMBOL_GPL(call_rcu); 447EXPORT_SYMBOL_GPL(call_rcu);
400 448
449/**
450 * synchronize_rcu - wait until a grace period has elapsed.
451 *
452 * Control will return to the caller some time after a full grace
453 * period has elapsed, in other words after all currently executing RCU
454 * read-side critical sections have completed. RCU read-side critical
455 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
456 * and may be nested.
457 */
458void synchronize_rcu(void)
459{
460 struct rcu_synchronize rcu;
461
462 if (!rcu_scheduler_active)
463 return;
464
465 init_completion(&rcu.completion);
466 /* Will wake me after RCU finished. */
467 call_rcu(&rcu.head, wakeme_after_rcu);
468 /* Wait for it. */
469 wait_for_completion(&rcu.completion);
470}
471EXPORT_SYMBOL_GPL(synchronize_rcu);
472
473static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
474static long sync_rcu_preempt_exp_count;
475static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
476
401/* 477/*
402 * Wait for an rcu-preempt grace period. We are supposed to expedite the 478 * Return non-zero if there are any tasks in RCU read-side critical
403 * grace period, but this is the crude slow compatability hack, so just 479 * sections blocking the current preemptible-RCU expedited grace period.
404 * invoke synchronize_rcu(). 480 * If there is no preemptible-RCU expedited grace period currently in
481 * progress, returns zero unconditionally.
482 */
483static int rcu_preempted_readers_exp(struct rcu_node *rnp)
484{
485 return !list_empty(&rnp->blocked_tasks[2]) ||
486 !list_empty(&rnp->blocked_tasks[3]);
487}
488
489/*
490 * return non-zero if there is no RCU expedited grace period in progress
491 * for the specified rcu_node structure, in other words, if all CPUs and
492 * tasks covered by the specified rcu_node structure have done their bit
493 * for the current expedited grace period. Works only for preemptible
494 * RCU -- other RCU implementation use other means.
495 *
496 * Caller must hold sync_rcu_preempt_exp_mutex.
497 */
498static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
499{
500 return !rcu_preempted_readers_exp(rnp) &&
501 ACCESS_ONCE(rnp->expmask) == 0;
502}
503
504/*
505 * Report the exit from RCU read-side critical section for the last task
506 * that queued itself during or before the current expedited preemptible-RCU
507 * grace period. This event is reported either to the rcu_node structure on
508 * which the task was queued or to one of that rcu_node structure's ancestors,
509 * recursively up the tree. (Calm down, calm down, we do the recursion
510 * iteratively!)
511 *
512 * Caller must hold sync_rcu_preempt_exp_mutex.
513 */
514static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
515{
516 unsigned long flags;
517 unsigned long mask;
518
519 spin_lock_irqsave(&rnp->lock, flags);
520 for (;;) {
521 if (!sync_rcu_preempt_exp_done(rnp))
522 break;
523 if (rnp->parent == NULL) {
524 wake_up(&sync_rcu_preempt_exp_wq);
525 break;
526 }
527 mask = rnp->grpmask;
528 spin_unlock(&rnp->lock); /* irqs remain disabled */
529 rnp = rnp->parent;
530 spin_lock(&rnp->lock); /* irqs already disabled */
531 rnp->expmask &= ~mask;
532 }
533 spin_unlock_irqrestore(&rnp->lock, flags);
534}
535
536/*
537 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
538 * grace period for the specified rcu_node structure. If there are no such
539 * tasks, report it up the rcu_node hierarchy.
540 *
541 * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
542 */
543static void
544sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
545{
546 int must_wait;
547
548 spin_lock(&rnp->lock); /* irqs already disabled */
549 list_splice_init(&rnp->blocked_tasks[0], &rnp->blocked_tasks[2]);
550 list_splice_init(&rnp->blocked_tasks[1], &rnp->blocked_tasks[3]);
551 must_wait = rcu_preempted_readers_exp(rnp);
552 spin_unlock(&rnp->lock); /* irqs remain disabled */
553 if (!must_wait)
554 rcu_report_exp_rnp(rsp, rnp);
555}
556
557/*
558 * Wait for an rcu-preempt grace period, but expedite it. The basic idea
559 * is to invoke synchronize_sched_expedited() to push all the tasks to
560 * the ->blocked_tasks[] lists, move all entries from the first set of
561 * ->blocked_tasks[] lists to the second set, and finally wait for this
562 * second set to drain.
405 */ 563 */
406void synchronize_rcu_expedited(void) 564void synchronize_rcu_expedited(void)
407{ 565{
408 synchronize_rcu(); 566 unsigned long flags;
567 struct rcu_node *rnp;
568 struct rcu_state *rsp = &rcu_preempt_state;
569 long snap;
570 int trycount = 0;
571
572 smp_mb(); /* Caller's modifications seen first by other CPUs. */
573 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
574 smp_mb(); /* Above access cannot bleed into critical section. */
575
576 /*
577 * Acquire lock, falling back to synchronize_rcu() if too many
578 * lock-acquisition failures. Of course, if someone does the
579 * expedited grace period for us, just leave.
580 */
581 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
582 if (trycount++ < 10)
583 udelay(trycount * num_online_cpus());
584 else {
585 synchronize_rcu();
586 return;
587 }
588 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
589 goto mb_ret; /* Others did our work for us. */
590 }
591 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
592 goto unlock_mb_ret; /* Others did our work for us. */
593
594 /* force all RCU readers onto blocked_tasks[]. */
595 synchronize_sched_expedited();
596
597 spin_lock_irqsave(&rsp->onofflock, flags);
598
599 /* Initialize ->expmask for all non-leaf rcu_node structures. */
600 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
601 spin_lock(&rnp->lock); /* irqs already disabled. */
602 rnp->expmask = rnp->qsmaskinit;
603 spin_unlock(&rnp->lock); /* irqs remain disabled. */
604 }
605
606 /* Snapshot current state of ->blocked_tasks[] lists. */
607 rcu_for_each_leaf_node(rsp, rnp)
608 sync_rcu_preempt_exp_init(rsp, rnp);
609 if (NUM_RCU_NODES > 1)
610 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
611
612 spin_unlock_irqrestore(&rsp->onofflock, flags);
613
614 /* Wait for snapshotted ->blocked_tasks[] lists to drain. */
615 rnp = rcu_get_root(rsp);
616 wait_event(sync_rcu_preempt_exp_wq,
617 sync_rcu_preempt_exp_done(rnp));
618
619 /* Clean up and exit. */
620 smp_mb(); /* ensure expedited GP seen before counter increment. */
621 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
622unlock_mb_ret:
623 mutex_unlock(&sync_rcu_preempt_exp_mutex);
624mb_ret:
625 smp_mb(); /* ensure subsequent action seen after grace period. */
409} 626}
410EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 627EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
411 628
@@ -481,7 +698,7 @@ void exit_rcu(void)
481/* 698/*
482 * Tell them what RCU they are running. 699 * Tell them what RCU they are running.
483 */ 700 */
484static inline void rcu_bootup_announce(void) 701static void __init rcu_bootup_announce(void)
485{ 702{
486 printk(KERN_INFO "Hierarchical RCU implementation.\n"); 703 printk(KERN_INFO "Hierarchical RCU implementation.\n");
487} 704}
@@ -512,6 +729,16 @@ static int rcu_preempted_readers(struct rcu_node *rnp)
512 return 0; 729 return 0;
513} 730}
514 731
732#ifdef CONFIG_HOTPLUG_CPU
733
734/* Because preemptible RCU does not exist, no quieting of tasks. */
735static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
736{
737 spin_unlock_irqrestore(&rnp->lock, flags);
738}
739
740#endif /* #ifdef CONFIG_HOTPLUG_CPU */
741
515#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 742#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
516 743
517/* 744/*
@@ -594,6 +821,20 @@ void synchronize_rcu_expedited(void)
594} 821}
595EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 822EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
596 823
824#ifdef CONFIG_HOTPLUG_CPU
825
826/*
827 * Because preemptable RCU does not exist, there is never any need to
828 * report on tasks preempted in RCU read-side critical sections during
829 * expedited RCU grace periods.
830 */
831static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
832{
833 return;
834}
835
836#endif /* #ifdef CONFIG_HOTPLUG_CPU */
837
597/* 838/*
598 * Because preemptable RCU does not exist, it never has any work to do. 839 * Because preemptable RCU does not exist, it never has any work to do.
599 */ 840 */
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 4b31c779e62e..9d2c88423b31 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -155,12 +155,15 @@ static const struct file_operations rcudata_csv_fops = {
155 155
156static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) 156static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
157{ 157{
158 long gpnum;
158 int level = 0; 159 int level = 0;
160 int phase;
159 struct rcu_node *rnp; 161 struct rcu_node *rnp;
160 162
163 gpnum = rsp->gpnum;
161 seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x " 164 seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x "
162 "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld\n", 165 "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld\n",
163 rsp->completed, rsp->gpnum, rsp->signaled, 166 rsp->completed, gpnum, rsp->signaled,
164 (long)(rsp->jiffies_force_qs - jiffies), 167 (long)(rsp->jiffies_force_qs - jiffies),
165 (int)(jiffies & 0xffff), 168 (int)(jiffies & 0xffff),
166 rsp->n_force_qs, rsp->n_force_qs_ngp, 169 rsp->n_force_qs, rsp->n_force_qs_ngp,
@@ -171,8 +174,13 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
171 seq_puts(m, "\n"); 174 seq_puts(m, "\n");
172 level = rnp->level; 175 level = rnp->level;
173 } 176 }
174 seq_printf(m, "%lx/%lx %d:%d ^%d ", 177 phase = gpnum & 0x1;
178 seq_printf(m, "%lx/%lx %c%c>%c%c %d:%d ^%d ",
175 rnp->qsmask, rnp->qsmaskinit, 179 rnp->qsmask, rnp->qsmaskinit,
180 "T."[list_empty(&rnp->blocked_tasks[phase])],
181 "E."[list_empty(&rnp->blocked_tasks[phase + 2])],
182 "T."[list_empty(&rnp->blocked_tasks[!phase])],
183 "E."[list_empty(&rnp->blocked_tasks[!phase + 2])],
176 rnp->grplo, rnp->grphi, rnp->grpnum); 184 rnp->grplo, rnp->grphi, rnp->grpnum);
177 } 185 }
178 seq_puts(m, "\n"); 186 seq_puts(m, "\n");
diff --git a/kernel/sched.c b/kernel/sched.c
index 3c11ae0a948d..6ae2739b8f19 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5481,7 +5481,7 @@ need_resched_nonpreemptible:
5481} 5481}
5482EXPORT_SYMBOL(schedule); 5482EXPORT_SYMBOL(schedule);
5483 5483
5484#ifdef CONFIG_SMP 5484#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
5485/* 5485/*
5486 * Look out! "owner" is an entirely speculative pointer 5486 * Look out! "owner" is an entirely speculative pointer
5487 * access and not reliable. 5487 * access and not reliable.
@@ -10901,6 +10901,7 @@ void synchronize_sched_expedited(void)
10901 spin_unlock_irqrestore(&rq->lock, flags); 10901 spin_unlock_irqrestore(&rq->lock, flags);
10902 } 10902 }
10903 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; 10903 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
10904 synchronize_sched_expedited_count++;
10904 mutex_unlock(&rcu_sched_expedited_mutex); 10905 mutex_unlock(&rcu_sched_expedited_mutex);
10905 put_online_cpus(); 10906 put_online_cpus();
10906 if (need_full_sync) 10907 if (need_full_sync)
diff --git a/kernel/signal.c b/kernel/signal.c
index 93e72e5feae6..6b982f2cf524 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -22,6 +22,7 @@
22#include <linux/ptrace.h> 22#include <linux/ptrace.h>
23#include <linux/signal.h> 23#include <linux/signal.h>
24#include <linux/signalfd.h> 24#include <linux/signalfd.h>
25#include <linux/ratelimit.h>
25#include <linux/tracehook.h> 26#include <linux/tracehook.h>
26#include <linux/capability.h> 27#include <linux/capability.h>
27#include <linux/freezer.h> 28#include <linux/freezer.h>
@@ -42,6 +43,8 @@
42 43
43static struct kmem_cache *sigqueue_cachep; 44static struct kmem_cache *sigqueue_cachep;
44 45
46int print_fatal_signals __read_mostly;
47
45static void __user *sig_handler(struct task_struct *t, int sig) 48static void __user *sig_handler(struct task_struct *t, int sig)
46{ 49{
47 return t->sighand->action[sig - 1].sa.sa_handler; 50 return t->sighand->action[sig - 1].sa.sa_handler;
@@ -160,7 +163,7 @@ int next_signal(struct sigpending *pending, sigset_t *mask)
160{ 163{
161 unsigned long i, *s, *m, x; 164 unsigned long i, *s, *m, x;
162 int sig = 0; 165 int sig = 0;
163 166
164 s = pending->signal.sig; 167 s = pending->signal.sig;
165 m = mask->sig; 168 m = mask->sig;
166 switch (_NSIG_WORDS) { 169 switch (_NSIG_WORDS) {
@@ -185,17 +188,31 @@ int next_signal(struct sigpending *pending, sigset_t *mask)
185 sig = ffz(~x) + 1; 188 sig = ffz(~x) + 1;
186 break; 189 break;
187 } 190 }
188 191
189 return sig; 192 return sig;
190} 193}
191 194
195static inline void print_dropped_signal(int sig)
196{
197 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
198
199 if (!print_fatal_signals)
200 return;
201
202 if (!__ratelimit(&ratelimit_state))
203 return;
204
205 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
206 current->comm, current->pid, sig);
207}
208
192/* 209/*
193 * allocate a new signal queue record 210 * allocate a new signal queue record
194 * - this may be called without locks if and only if t == current, otherwise an 211 * - this may be called without locks if and only if t == current, otherwise an
195 * appopriate lock must be held to stop the target task from exiting 212 * appopriate lock must be held to stop the target task from exiting
196 */ 213 */
197static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, 214static struct sigqueue *
198 int override_rlimit) 215__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
199{ 216{
200 struct sigqueue *q = NULL; 217 struct sigqueue *q = NULL;
201 struct user_struct *user; 218 struct user_struct *user;
@@ -208,10 +225,15 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
208 */ 225 */
209 user = get_uid(__task_cred(t)->user); 226 user = get_uid(__task_cred(t)->user);
210 atomic_inc(&user->sigpending); 227 atomic_inc(&user->sigpending);
228
211 if (override_rlimit || 229 if (override_rlimit ||
212 atomic_read(&user->sigpending) <= 230 atomic_read(&user->sigpending) <=
213 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) 231 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) {
214 q = kmem_cache_alloc(sigqueue_cachep, flags); 232 q = kmem_cache_alloc(sigqueue_cachep, flags);
233 } else {
234 print_dropped_signal(sig);
235 }
236
215 if (unlikely(q == NULL)) { 237 if (unlikely(q == NULL)) {
216 atomic_dec(&user->sigpending); 238 atomic_dec(&user->sigpending);
217 free_uid(user); 239 free_uid(user);
@@ -870,7 +892,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
870 else 892 else
871 override_rlimit = 0; 893 override_rlimit = 0;
872 894
873 q = __sigqueue_alloc(t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, 895 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
874 override_rlimit); 896 override_rlimit);
875 if (q) { 897 if (q) {
876 list_add_tail(&q->list, &pending->list); 898 list_add_tail(&q->list, &pending->list);
@@ -935,8 +957,6 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
935 return __send_signal(sig, info, t, group, from_ancestor_ns); 957 return __send_signal(sig, info, t, group, from_ancestor_ns);
936} 958}
937 959
938int print_fatal_signals;
939
940static void print_fatal_signal(struct pt_regs *regs, int signr) 960static void print_fatal_signal(struct pt_regs *regs, int signr)
941{ 961{
942 printk("%s/%d: potentially unexpected fatal signal %d.\n", 962 printk("%s/%d: potentially unexpected fatal signal %d.\n",
@@ -1303,19 +1323,19 @@ EXPORT_SYMBOL(kill_pid);
1303 * These functions support sending signals using preallocated sigqueue 1323 * These functions support sending signals using preallocated sigqueue
1304 * structures. This is needed "because realtime applications cannot 1324 * structures. This is needed "because realtime applications cannot
1305 * afford to lose notifications of asynchronous events, like timer 1325 * afford to lose notifications of asynchronous events, like timer
1306 * expirations or I/O completions". In the case of Posix Timers 1326 * expirations or I/O completions". In the case of Posix Timers
1307 * we allocate the sigqueue structure from the timer_create. If this 1327 * we allocate the sigqueue structure from the timer_create. If this
1308 * allocation fails we are able to report the failure to the application 1328 * allocation fails we are able to report the failure to the application
1309 * with an EAGAIN error. 1329 * with an EAGAIN error.
1310 */ 1330 */
1311
1312struct sigqueue *sigqueue_alloc(void) 1331struct sigqueue *sigqueue_alloc(void)
1313{ 1332{
1314 struct sigqueue *q; 1333 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1315 1334
1316 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0))) 1335 if (q)
1317 q->flags |= SIGQUEUE_PREALLOC; 1336 q->flags |= SIGQUEUE_PREALLOC;
1318 return(q); 1337
1338 return q;
1319} 1339}
1320 1340
1321void sigqueue_free(struct sigqueue *q) 1341void sigqueue_free(struct sigqueue *q)
diff --git a/kernel/slow-work-debugfs.c b/kernel/slow-work-debugfs.c
new file mode 100644
index 000000000000..e45c43645298
--- /dev/null
+++ b/kernel/slow-work-debugfs.c
@@ -0,0 +1,227 @@
1/* Slow work debugging
2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/slow-work.h>
14#include <linux/fs.h>
15#include <linux/time.h>
16#include <linux/seq_file.h>
17#include "slow-work.h"
18
19#define ITERATOR_SHIFT (BITS_PER_LONG - 4)
20#define ITERATOR_SELECTOR (0xfUL << ITERATOR_SHIFT)
21#define ITERATOR_COUNTER (~ITERATOR_SELECTOR)
22
23void slow_work_new_thread_desc(struct slow_work *work, struct seq_file *m)
24{
25 seq_puts(m, "Slow-work: New thread");
26}
27
28/*
29 * Render the time mark field on a work item into a 5-char time with units plus
30 * a space
31 */
32static void slow_work_print_mark(struct seq_file *m, struct slow_work *work)
33{
34 struct timespec now, diff;
35
36 now = CURRENT_TIME;
37 diff = timespec_sub(now, work->mark);
38
39 if (diff.tv_sec < 0)
40 seq_puts(m, " -ve ");
41 else if (diff.tv_sec == 0 && diff.tv_nsec < 1000)
42 seq_printf(m, "%3luns ", diff.tv_nsec);
43 else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000)
44 seq_printf(m, "%3luus ", diff.tv_nsec / 1000);
45 else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000000)
46 seq_printf(m, "%3lums ", diff.tv_nsec / 1000000);
47 else if (diff.tv_sec <= 1)
48 seq_puts(m, " 1s ");
49 else if (diff.tv_sec < 60)
50 seq_printf(m, "%4lus ", diff.tv_sec);
51 else if (diff.tv_sec < 60 * 60)
52 seq_printf(m, "%4lum ", diff.tv_sec / 60);
53 else if (diff.tv_sec < 60 * 60 * 24)
54 seq_printf(m, "%4luh ", diff.tv_sec / 3600);
55 else
56 seq_puts(m, "exces ");
57}
58
59/*
60 * Describe a slow work item for debugfs
61 */
62static int slow_work_runqueue_show(struct seq_file *m, void *v)
63{
64 struct slow_work *work;
65 struct list_head *p = v;
66 unsigned long id;
67
68 switch ((unsigned long) v) {
69 case 1:
70 seq_puts(m, "THR PID ITEM ADDR FL MARK DESC\n");
71 return 0;
72 case 2:
73 seq_puts(m, "=== ===== ================ == ===== ==========\n");
74 return 0;
75
76 case 3 ... 3 + SLOW_WORK_THREAD_LIMIT - 1:
77 id = (unsigned long) v - 3;
78
79 read_lock(&slow_work_execs_lock);
80 work = slow_work_execs[id];
81 if (work) {
82 smp_read_barrier_depends();
83
84 seq_printf(m, "%3lu %5d %16p %2lx ",
85 id, slow_work_pids[id], work, work->flags);
86 slow_work_print_mark(m, work);
87
88 if (work->ops->desc)
89 work->ops->desc(work, m);
90 seq_putc(m, '\n');
91 }
92 read_unlock(&slow_work_execs_lock);
93 return 0;
94
95 default:
96 work = list_entry(p, struct slow_work, link);
97 seq_printf(m, "%3s - %16p %2lx ",
98 work->flags & SLOW_WORK_VERY_SLOW ? "vsq" : "sq",
99 work, work->flags);
100 slow_work_print_mark(m, work);
101
102 if (work->ops->desc)
103 work->ops->desc(work, m);
104 seq_putc(m, '\n');
105 return 0;
106 }
107}
108
109/*
110 * map the iterator to a work item
111 */
112static void *slow_work_runqueue_index(struct seq_file *m, loff_t *_pos)
113{
114 struct list_head *p;
115 unsigned long count, id;
116
117 switch (*_pos >> ITERATOR_SHIFT) {
118 case 0x0:
119 if (*_pos == 0)
120 *_pos = 1;
121 if (*_pos < 3)
122 return (void *)(unsigned long) *_pos;
123 if (*_pos < 3 + SLOW_WORK_THREAD_LIMIT)
124 for (id = *_pos - 3;
125 id < SLOW_WORK_THREAD_LIMIT;
126 id++, (*_pos)++)
127 if (slow_work_execs[id])
128 return (void *)(unsigned long) *_pos;
129 *_pos = 0x1UL << ITERATOR_SHIFT;
130
131 case 0x1:
132 count = *_pos & ITERATOR_COUNTER;
133 list_for_each(p, &slow_work_queue) {
134 if (count == 0)
135 return p;
136 count--;
137 }
138 *_pos = 0x2UL << ITERATOR_SHIFT;
139
140 case 0x2:
141 count = *_pos & ITERATOR_COUNTER;
142 list_for_each(p, &vslow_work_queue) {
143 if (count == 0)
144 return p;
145 count--;
146 }
147 *_pos = 0x3UL << ITERATOR_SHIFT;
148
149 default:
150 return NULL;
151 }
152}
153
154/*
155 * set up the iterator to start reading from the first line
156 */
157static void *slow_work_runqueue_start(struct seq_file *m, loff_t *_pos)
158{
159 spin_lock_irq(&slow_work_queue_lock);
160 return slow_work_runqueue_index(m, _pos);
161}
162
163/*
164 * move to the next line
165 */
166static void *slow_work_runqueue_next(struct seq_file *m, void *v, loff_t *_pos)
167{
168 struct list_head *p = v;
169 unsigned long selector = *_pos >> ITERATOR_SHIFT;
170
171 (*_pos)++;
172 switch (selector) {
173 case 0x0:
174 return slow_work_runqueue_index(m, _pos);
175
176 case 0x1:
177 if (*_pos >> ITERATOR_SHIFT == 0x1) {
178 p = p->next;
179 if (p != &slow_work_queue)
180 return p;
181 }
182 *_pos = 0x2UL << ITERATOR_SHIFT;
183 p = &vslow_work_queue;
184
185 case 0x2:
186 if (*_pos >> ITERATOR_SHIFT == 0x2) {
187 p = p->next;
188 if (p != &vslow_work_queue)
189 return p;
190 }
191 *_pos = 0x3UL << ITERATOR_SHIFT;
192
193 default:
194 return NULL;
195 }
196}
197
198/*
199 * clean up after reading
200 */
201static void slow_work_runqueue_stop(struct seq_file *m, void *v)
202{
203 spin_unlock_irq(&slow_work_queue_lock);
204}
205
206static const struct seq_operations slow_work_runqueue_ops = {
207 .start = slow_work_runqueue_start,
208 .stop = slow_work_runqueue_stop,
209 .next = slow_work_runqueue_next,
210 .show = slow_work_runqueue_show,
211};
212
213/*
214 * open "/sys/kernel/debug/slow_work/runqueue" to list queue contents
215 */
216static int slow_work_runqueue_open(struct inode *inode, struct file *file)
217{
218 return seq_open(file, &slow_work_runqueue_ops);
219}
220
221const struct file_operations slow_work_runqueue_fops = {
222 .owner = THIS_MODULE,
223 .open = slow_work_runqueue_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = seq_release,
227};
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index 0d31135efbf4..00889bd3c590 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -16,11 +16,8 @@
16#include <linux/kthread.h> 16#include <linux/kthread.h>
17#include <linux/freezer.h> 17#include <linux/freezer.h>
18#include <linux/wait.h> 18#include <linux/wait.h>
19 19#include <linux/debugfs.h>
20#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of 20#include "slow-work.h"
21 * things to do */
22#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
23 * OOM */
24 21
25static void slow_work_cull_timeout(unsigned long); 22static void slow_work_cull_timeout(unsigned long);
26static void slow_work_oom_timeout(unsigned long); 23static void slow_work_oom_timeout(unsigned long);
@@ -46,7 +43,7 @@ static unsigned vslow_work_proportion = 50; /* % of threads that may process
46 43
47#ifdef CONFIG_SYSCTL 44#ifdef CONFIG_SYSCTL
48static const int slow_work_min_min_threads = 2; 45static const int slow_work_min_min_threads = 2;
49static int slow_work_max_max_threads = 255; 46static int slow_work_max_max_threads = SLOW_WORK_THREAD_LIMIT;
50static const int slow_work_min_vslow = 1; 47static const int slow_work_min_vslow = 1;
51static const int slow_work_max_vslow = 99; 48static const int slow_work_max_vslow = 99;
52 49
@@ -98,6 +95,56 @@ static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
98static struct slow_work slow_work_new_thread; /* new thread starter */ 95static struct slow_work slow_work_new_thread; /* new thread starter */
99 96
100/* 97/*
98 * slow work ID allocation (use slow_work_queue_lock)
99 */
100static DECLARE_BITMAP(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
101
102/*
103 * Unregistration tracking to prevent put_ref() from disappearing during module
104 * unload
105 */
106#ifdef CONFIG_MODULES
107static struct module *slow_work_thread_processing[SLOW_WORK_THREAD_LIMIT];
108static struct module *slow_work_unreg_module;
109static struct slow_work *slow_work_unreg_work_item;
110static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq);
111static DEFINE_MUTEX(slow_work_unreg_sync_lock);
112
113static void slow_work_set_thread_processing(int id, struct slow_work *work)
114{
115 if (work)
116 slow_work_thread_processing[id] = work->owner;
117}
118static void slow_work_done_thread_processing(int id, struct slow_work *work)
119{
120 struct module *module = slow_work_thread_processing[id];
121
122 slow_work_thread_processing[id] = NULL;
123 smp_mb();
124 if (slow_work_unreg_work_item == work ||
125 slow_work_unreg_module == module)
126 wake_up_all(&slow_work_unreg_wq);
127}
128static void slow_work_clear_thread_processing(int id)
129{
130 slow_work_thread_processing[id] = NULL;
131}
132#else
133static void slow_work_set_thread_processing(int id, struct slow_work *work) {}
134static void slow_work_done_thread_processing(int id, struct slow_work *work) {}
135static void slow_work_clear_thread_processing(int id) {}
136#endif
137
138/*
139 * Data for tracking currently executing items for indication through /proc
140 */
141#ifdef CONFIG_SLOW_WORK_DEBUG
142struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT];
143pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT];
144DEFINE_RWLOCK(slow_work_execs_lock);
145#endif
146
147/*
101 * The queues of work items and the lock governing access to them. These are 148 * The queues of work items and the lock governing access to them. These are
102 * shared between all the CPUs. It doesn't make sense to have per-CPU queues 149 * shared between all the CPUs. It doesn't make sense to have per-CPU queues
103 * as the number of threads bears no relation to the number of CPUs. 150 * as the number of threads bears no relation to the number of CPUs.
@@ -105,9 +152,18 @@ static struct slow_work slow_work_new_thread; /* new thread starter */
105 * There are two queues of work items: one for slow work items, and one for 152 * There are two queues of work items: one for slow work items, and one for
106 * very slow work items. 153 * very slow work items.
107 */ 154 */
108static LIST_HEAD(slow_work_queue); 155LIST_HEAD(slow_work_queue);
109static LIST_HEAD(vslow_work_queue); 156LIST_HEAD(vslow_work_queue);
110static DEFINE_SPINLOCK(slow_work_queue_lock); 157DEFINE_SPINLOCK(slow_work_queue_lock);
158
159/*
160 * The following are two wait queues that get pinged when a work item is placed
161 * on an empty queue. These allow work items that are hogging a thread by
162 * sleeping in a way that could be deferred to yield their thread and enqueue
163 * themselves.
164 */
165static DECLARE_WAIT_QUEUE_HEAD(slow_work_queue_waits_for_occupation);
166static DECLARE_WAIT_QUEUE_HEAD(vslow_work_queue_waits_for_occupation);
111 167
112/* 168/*
113 * The thread controls. A variable used to signal to the threads that they 169 * The thread controls. A variable used to signal to the threads that they
@@ -126,6 +182,20 @@ static DECLARE_COMPLETION(slow_work_last_thread_exited);
126static int slow_work_user_count; 182static int slow_work_user_count;
127static DEFINE_MUTEX(slow_work_user_lock); 183static DEFINE_MUTEX(slow_work_user_lock);
128 184
185static inline int slow_work_get_ref(struct slow_work *work)
186{
187 if (work->ops->get_ref)
188 return work->ops->get_ref(work);
189
190 return 0;
191}
192
193static inline void slow_work_put_ref(struct slow_work *work)
194{
195 if (work->ops->put_ref)
196 work->ops->put_ref(work);
197}
198
129/* 199/*
130 * Calculate the maximum number of active threads in the pool that are 200 * Calculate the maximum number of active threads in the pool that are
131 * permitted to process very slow work items. 201 * permitted to process very slow work items.
@@ -149,7 +219,7 @@ static unsigned slow_work_calc_vsmax(void)
149 * Attempt to execute stuff queued on a slow thread. Return true if we managed 219 * Attempt to execute stuff queued on a slow thread. Return true if we managed
150 * it, false if there was nothing to do. 220 * it, false if there was nothing to do.
151 */ 221 */
152static bool slow_work_execute(void) 222static noinline bool slow_work_execute(int id)
153{ 223{
154 struct slow_work *work = NULL; 224 struct slow_work *work = NULL;
155 unsigned vsmax; 225 unsigned vsmax;
@@ -186,6 +256,13 @@ static bool slow_work_execute(void)
186 } else { 256 } else {
187 very_slow = false; /* avoid the compiler warning */ 257 very_slow = false; /* avoid the compiler warning */
188 } 258 }
259
260 slow_work_set_thread_processing(id, work);
261 if (work) {
262 slow_work_mark_time(work);
263 slow_work_begin_exec(id, work);
264 }
265
189 spin_unlock_irq(&slow_work_queue_lock); 266 spin_unlock_irq(&slow_work_queue_lock);
190 267
191 if (!work) 268 if (!work)
@@ -194,12 +271,19 @@ static bool slow_work_execute(void)
194 if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags)) 271 if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags))
195 BUG(); 272 BUG();
196 273
197 work->ops->execute(work); 274 /* don't execute if the work is in the process of being cancelled */
275 if (!test_bit(SLOW_WORK_CANCELLING, &work->flags))
276 work->ops->execute(work);
198 277
199 if (very_slow) 278 if (very_slow)
200 atomic_dec(&vslow_work_executing_count); 279 atomic_dec(&vslow_work_executing_count);
201 clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags); 280 clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags);
202 281
282 /* wake up anyone waiting for this work to be complete */
283 wake_up_bit(&work->flags, SLOW_WORK_EXECUTING);
284
285 slow_work_end_exec(id, work);
286
203 /* if someone tried to enqueue the item whilst we were executing it, 287 /* if someone tried to enqueue the item whilst we were executing it,
204 * then it'll be left unenqueued to avoid multiple threads trying to 288 * then it'll be left unenqueued to avoid multiple threads trying to
205 * execute it simultaneously 289 * execute it simultaneously
@@ -219,7 +303,10 @@ static bool slow_work_execute(void)
219 spin_unlock_irq(&slow_work_queue_lock); 303 spin_unlock_irq(&slow_work_queue_lock);
220 } 304 }
221 305
222 work->ops->put_ref(work); 306 /* sort out the race between module unloading and put_ref() */
307 slow_work_put_ref(work);
308 slow_work_done_thread_processing(id, work);
309
223 return true; 310 return true;
224 311
225auto_requeue: 312auto_requeue:
@@ -227,15 +314,61 @@ auto_requeue:
227 * - we transfer our ref on the item back to the appropriate queue 314 * - we transfer our ref on the item back to the appropriate queue
228 * - don't wake another thread up as we're awake already 315 * - don't wake another thread up as we're awake already
229 */ 316 */
317 slow_work_mark_time(work);
230 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 318 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
231 list_add_tail(&work->link, &vslow_work_queue); 319 list_add_tail(&work->link, &vslow_work_queue);
232 else 320 else
233 list_add_tail(&work->link, &slow_work_queue); 321 list_add_tail(&work->link, &slow_work_queue);
234 spin_unlock_irq(&slow_work_queue_lock); 322 spin_unlock_irq(&slow_work_queue_lock);
323 slow_work_clear_thread_processing(id);
235 return true; 324 return true;
236} 325}
237 326
238/** 327/**
328 * slow_work_sleep_till_thread_needed - Sleep till thread needed by other work
329 * work: The work item under execution that wants to sleep
330 * _timeout: Scheduler sleep timeout
331 *
332 * Allow a requeueable work item to sleep on a slow-work processor thread until
333 * that thread is needed to do some other work or the sleep is interrupted by
334 * some other event.
335 *
336 * The caller must set up a wake up event before calling this and must have set
337 * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
338 * condition before calling this function as no test is made here.
339 *
340 * False is returned if there is nothing on the queue; true is returned if the
341 * work item should be requeued
342 */
343bool slow_work_sleep_till_thread_needed(struct slow_work *work,
344 signed long *_timeout)
345{
346 wait_queue_head_t *wfo_wq;
347 struct list_head *queue;
348
349 DEFINE_WAIT(wait);
350
351 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
352 wfo_wq = &vslow_work_queue_waits_for_occupation;
353 queue = &vslow_work_queue;
354 } else {
355 wfo_wq = &slow_work_queue_waits_for_occupation;
356 queue = &slow_work_queue;
357 }
358
359 if (!list_empty(queue))
360 return true;
361
362 add_wait_queue_exclusive(wfo_wq, &wait);
363 if (list_empty(queue))
364 *_timeout = schedule_timeout(*_timeout);
365 finish_wait(wfo_wq, &wait);
366
367 return !list_empty(queue);
368}
369EXPORT_SYMBOL(slow_work_sleep_till_thread_needed);
370
371/**
239 * slow_work_enqueue - Schedule a slow work item for processing 372 * slow_work_enqueue - Schedule a slow work item for processing
240 * @work: The work item to queue 373 * @work: The work item to queue
241 * 374 *
@@ -260,16 +393,22 @@ auto_requeue:
260 * allowed to pick items to execute. This ensures that very slow items won't 393 * allowed to pick items to execute. This ensures that very slow items won't
261 * overly block ones that are just ordinarily slow. 394 * overly block ones that are just ordinarily slow.
262 * 395 *
263 * Returns 0 if successful, -EAGAIN if not. 396 * Returns 0 if successful, -EAGAIN if not (or -ECANCELED if cancelled work is
397 * attempted queued)
264 */ 398 */
265int slow_work_enqueue(struct slow_work *work) 399int slow_work_enqueue(struct slow_work *work)
266{ 400{
401 wait_queue_head_t *wfo_wq;
402 struct list_head *queue;
267 unsigned long flags; 403 unsigned long flags;
404 int ret;
405
406 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
407 return -ECANCELED;
268 408
269 BUG_ON(slow_work_user_count <= 0); 409 BUG_ON(slow_work_user_count <= 0);
270 BUG_ON(!work); 410 BUG_ON(!work);
271 BUG_ON(!work->ops); 411 BUG_ON(!work->ops);
272 BUG_ON(!work->ops->get_ref);
273 412
274 /* when honouring an enqueue request, we only promise that we will run 413 /* when honouring an enqueue request, we only promise that we will run
275 * the work function in the future; we do not promise to run it once 414 * the work function in the future; we do not promise to run it once
@@ -280,8 +419,19 @@ int slow_work_enqueue(struct slow_work *work)
280 * maintaining our promise 419 * maintaining our promise
281 */ 420 */
282 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { 421 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
422 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
423 wfo_wq = &vslow_work_queue_waits_for_occupation;
424 queue = &vslow_work_queue;
425 } else {
426 wfo_wq = &slow_work_queue_waits_for_occupation;
427 queue = &slow_work_queue;
428 }
429
283 spin_lock_irqsave(&slow_work_queue_lock, flags); 430 spin_lock_irqsave(&slow_work_queue_lock, flags);
284 431
432 if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags)))
433 goto cancelled;
434
285 /* we promise that we will not attempt to execute the work 435 /* we promise that we will not attempt to execute the work
286 * function in more than one thread simultaneously 436 * function in more than one thread simultaneously
287 * 437 *
@@ -299,25 +449,221 @@ int slow_work_enqueue(struct slow_work *work)
299 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { 449 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
300 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); 450 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
301 } else { 451 } else {
302 if (work->ops->get_ref(work) < 0) 452 ret = slow_work_get_ref(work);
303 goto cant_get_ref; 453 if (ret < 0)
304 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 454 goto failed;
305 list_add_tail(&work->link, &vslow_work_queue); 455 slow_work_mark_time(work);
306 else 456 list_add_tail(&work->link, queue);
307 list_add_tail(&work->link, &slow_work_queue);
308 wake_up(&slow_work_thread_wq); 457 wake_up(&slow_work_thread_wq);
458
459 /* if someone who could be requeued is sleeping on a
460 * thread, then ask them to yield their thread */
461 if (work->link.prev == queue)
462 wake_up(wfo_wq);
309 } 463 }
310 464
311 spin_unlock_irqrestore(&slow_work_queue_lock, flags); 465 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
312 } 466 }
313 return 0; 467 return 0;
314 468
315cant_get_ref: 469cancelled:
470 ret = -ECANCELED;
471failed:
316 spin_unlock_irqrestore(&slow_work_queue_lock, flags); 472 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
317 return -EAGAIN; 473 return ret;
318} 474}
319EXPORT_SYMBOL(slow_work_enqueue); 475EXPORT_SYMBOL(slow_work_enqueue);
320 476
477static int slow_work_wait(void *word)
478{
479 schedule();
480 return 0;
481}
482
483/**
484 * slow_work_cancel - Cancel a slow work item
485 * @work: The work item to cancel
486 *
487 * This function will cancel a previously enqueued work item. If we cannot
488 * cancel the work item, it is guarenteed to have run when this function
489 * returns.
490 */
491void slow_work_cancel(struct slow_work *work)
492{
493 bool wait = true, put = false;
494
495 set_bit(SLOW_WORK_CANCELLING, &work->flags);
496 smp_mb();
497
498 /* if the work item is a delayed work item with an active timer, we
499 * need to wait for the timer to finish _before_ getting the spinlock,
500 * lest we deadlock against the timer routine
501 *
502 * the timer routine will leave DELAYED set if it notices the
503 * CANCELLING flag in time
504 */
505 if (test_bit(SLOW_WORK_DELAYED, &work->flags)) {
506 struct delayed_slow_work *dwork =
507 container_of(work, struct delayed_slow_work, work);
508 del_timer_sync(&dwork->timer);
509 }
510
511 spin_lock_irq(&slow_work_queue_lock);
512
513 if (test_bit(SLOW_WORK_DELAYED, &work->flags)) {
514 /* the timer routine aborted or never happened, so we are left
515 * holding the timer's reference on the item and should just
516 * drop the pending flag and wait for any ongoing execution to
517 * finish */
518 struct delayed_slow_work *dwork =
519 container_of(work, struct delayed_slow_work, work);
520
521 BUG_ON(timer_pending(&dwork->timer));
522 BUG_ON(!list_empty(&work->link));
523
524 clear_bit(SLOW_WORK_DELAYED, &work->flags);
525 put = true;
526 clear_bit(SLOW_WORK_PENDING, &work->flags);
527
528 } else if (test_bit(SLOW_WORK_PENDING, &work->flags) &&
529 !list_empty(&work->link)) {
530 /* the link in the pending queue holds a reference on the item
531 * that we will need to release */
532 list_del_init(&work->link);
533 wait = false;
534 put = true;
535 clear_bit(SLOW_WORK_PENDING, &work->flags);
536
537 } else if (test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) {
538 /* the executor is holding our only reference on the item, so
539 * we merely need to wait for it to finish executing */
540 clear_bit(SLOW_WORK_PENDING, &work->flags);
541 }
542
543 spin_unlock_irq(&slow_work_queue_lock);
544
545 /* the EXECUTING flag is set by the executor whilst the spinlock is set
546 * and before the item is dequeued - so assuming the above doesn't
547 * actually dequeue it, simply waiting for the EXECUTING flag to be
548 * released here should be sufficient */
549 if (wait)
550 wait_on_bit(&work->flags, SLOW_WORK_EXECUTING, slow_work_wait,
551 TASK_UNINTERRUPTIBLE);
552
553 clear_bit(SLOW_WORK_CANCELLING, &work->flags);
554 if (put)
555 slow_work_put_ref(work);
556}
557EXPORT_SYMBOL(slow_work_cancel);
558
559/*
560 * Handle expiry of the delay timer, indicating that a delayed slow work item
561 * should now be queued if not cancelled
562 */
563static void delayed_slow_work_timer(unsigned long data)
564{
565 wait_queue_head_t *wfo_wq;
566 struct list_head *queue;
567 struct slow_work *work = (struct slow_work *) data;
568 unsigned long flags;
569 bool queued = false, put = false, first = false;
570
571 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
572 wfo_wq = &vslow_work_queue_waits_for_occupation;
573 queue = &vslow_work_queue;
574 } else {
575 wfo_wq = &slow_work_queue_waits_for_occupation;
576 queue = &slow_work_queue;
577 }
578
579 spin_lock_irqsave(&slow_work_queue_lock, flags);
580 if (likely(!test_bit(SLOW_WORK_CANCELLING, &work->flags))) {
581 clear_bit(SLOW_WORK_DELAYED, &work->flags);
582
583 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
584 /* we discard the reference the timer was holding in
585 * favour of the one the executor holds */
586 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
587 put = true;
588 } else {
589 slow_work_mark_time(work);
590 list_add_tail(&work->link, queue);
591 queued = true;
592 if (work->link.prev == queue)
593 first = true;
594 }
595 }
596
597 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
598 if (put)
599 slow_work_put_ref(work);
600 if (first)
601 wake_up(wfo_wq);
602 if (queued)
603 wake_up(&slow_work_thread_wq);
604}
605
606/**
607 * delayed_slow_work_enqueue - Schedule a delayed slow work item for processing
608 * @dwork: The delayed work item to queue
609 * @delay: When to start executing the work, in jiffies from now
610 *
611 * This is similar to slow_work_enqueue(), but it adds a delay before the work
612 * is actually queued for processing.
613 *
614 * The item can have delayed processing requested on it whilst it is being
615 * executed. The delay will begin immediately, and if it expires before the
616 * item finishes executing, the item will be placed back on the queue when it
617 * has done executing.
618 */
619int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
620 unsigned long delay)
621{
622 struct slow_work *work = &dwork->work;
623 unsigned long flags;
624 int ret;
625
626 if (delay == 0)
627 return slow_work_enqueue(&dwork->work);
628
629 BUG_ON(slow_work_user_count <= 0);
630 BUG_ON(!work);
631 BUG_ON(!work->ops);
632
633 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
634 return -ECANCELED;
635
636 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
637 spin_lock_irqsave(&slow_work_queue_lock, flags);
638
639 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
640 goto cancelled;
641
642 /* the timer holds a reference whilst it is pending */
643 ret = work->ops->get_ref(work);
644 if (ret < 0)
645 goto cant_get_ref;
646
647 if (test_and_set_bit(SLOW_WORK_DELAYED, &work->flags))
648 BUG();
649 dwork->timer.expires = jiffies + delay;
650 dwork->timer.data = (unsigned long) work;
651 dwork->timer.function = delayed_slow_work_timer;
652 add_timer(&dwork->timer);
653
654 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
655 }
656
657 return 0;
658
659cancelled:
660 ret = -ECANCELED;
661cant_get_ref:
662 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
663 return ret;
664}
665EXPORT_SYMBOL(delayed_slow_work_enqueue);
666
321/* 667/*
322 * Schedule a cull of the thread pool at some time in the near future 668 * Schedule a cull of the thread pool at some time in the near future
323 */ 669 */
@@ -368,13 +714,23 @@ static inline bool slow_work_available(int vsmax)
368 */ 714 */
369static int slow_work_thread(void *_data) 715static int slow_work_thread(void *_data)
370{ 716{
371 int vsmax; 717 int vsmax, id;
372 718
373 DEFINE_WAIT(wait); 719 DEFINE_WAIT(wait);
374 720
375 set_freezable(); 721 set_freezable();
376 set_user_nice(current, -5); 722 set_user_nice(current, -5);
377 723
724 /* allocate ourselves an ID */
725 spin_lock_irq(&slow_work_queue_lock);
726 id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
727 BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT);
728 __set_bit(id, slow_work_ids);
729 slow_work_set_thread_pid(id, current->pid);
730 spin_unlock_irq(&slow_work_queue_lock);
731
732 sprintf(current->comm, "kslowd%03u", id);
733
378 for (;;) { 734 for (;;) {
379 vsmax = vslow_work_proportion; 735 vsmax = vslow_work_proportion;
380 vsmax *= atomic_read(&slow_work_thread_count); 736 vsmax *= atomic_read(&slow_work_thread_count);
@@ -395,7 +751,7 @@ static int slow_work_thread(void *_data)
395 vsmax *= atomic_read(&slow_work_thread_count); 751 vsmax *= atomic_read(&slow_work_thread_count);
396 vsmax /= 100; 752 vsmax /= 100;
397 753
398 if (slow_work_available(vsmax) && slow_work_execute()) { 754 if (slow_work_available(vsmax) && slow_work_execute(id)) {
399 cond_resched(); 755 cond_resched();
400 if (list_empty(&slow_work_queue) && 756 if (list_empty(&slow_work_queue) &&
401 list_empty(&vslow_work_queue) && 757 list_empty(&vslow_work_queue) &&
@@ -412,6 +768,11 @@ static int slow_work_thread(void *_data)
412 break; 768 break;
413 } 769 }
414 770
771 spin_lock_irq(&slow_work_queue_lock);
772 slow_work_set_thread_pid(id, 0);
773 __clear_bit(id, slow_work_ids);
774 spin_unlock_irq(&slow_work_queue_lock);
775
415 if (atomic_dec_and_test(&slow_work_thread_count)) 776 if (atomic_dec_and_test(&slow_work_thread_count))
416 complete_and_exit(&slow_work_last_thread_exited, 0); 777 complete_and_exit(&slow_work_last_thread_exited, 0);
417 return 0; 778 return 0;
@@ -427,21 +788,6 @@ static void slow_work_cull_timeout(unsigned long data)
427} 788}
428 789
429/* 790/*
430 * Get a reference on slow work thread starter
431 */
432static int slow_work_new_thread_get_ref(struct slow_work *work)
433{
434 return 0;
435}
436
437/*
438 * Drop a reference on slow work thread starter
439 */
440static void slow_work_new_thread_put_ref(struct slow_work *work)
441{
442}
443
444/*
445 * Start a new slow work thread 791 * Start a new slow work thread
446 */ 792 */
447static void slow_work_new_thread_execute(struct slow_work *work) 793static void slow_work_new_thread_execute(struct slow_work *work)
@@ -475,9 +821,11 @@ static void slow_work_new_thread_execute(struct slow_work *work)
475} 821}
476 822
477static const struct slow_work_ops slow_work_new_thread_ops = { 823static const struct slow_work_ops slow_work_new_thread_ops = {
478 .get_ref = slow_work_new_thread_get_ref, 824 .owner = THIS_MODULE,
479 .put_ref = slow_work_new_thread_put_ref,
480 .execute = slow_work_new_thread_execute, 825 .execute = slow_work_new_thread_execute,
826#ifdef CONFIG_SLOW_WORK_DEBUG
827 .desc = slow_work_new_thread_desc,
828#endif
481}; 829};
482 830
483/* 831/*
@@ -546,12 +894,13 @@ static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
546 894
547/** 895/**
548 * slow_work_register_user - Register a user of the facility 896 * slow_work_register_user - Register a user of the facility
897 * @module: The module about to make use of the facility
549 * 898 *
550 * Register a user of the facility, starting up the initial threads if there 899 * Register a user of the facility, starting up the initial threads if there
551 * aren't any other users at this point. This will return 0 if successful, or 900 * aren't any other users at this point. This will return 0 if successful, or
552 * an error if not. 901 * an error if not.
553 */ 902 */
554int slow_work_register_user(void) 903int slow_work_register_user(struct module *module)
555{ 904{
556 struct task_struct *p; 905 struct task_struct *p;
557 int loop; 906 int loop;
@@ -598,14 +947,81 @@ error:
598} 947}
599EXPORT_SYMBOL(slow_work_register_user); 948EXPORT_SYMBOL(slow_work_register_user);
600 949
950/*
951 * wait for all outstanding items from the calling module to complete
952 * - note that more items may be queued whilst we're waiting
953 */
954static void slow_work_wait_for_items(struct module *module)
955{
956#ifdef CONFIG_MODULES
957 DECLARE_WAITQUEUE(myself, current);
958 struct slow_work *work;
959 int loop;
960
961 mutex_lock(&slow_work_unreg_sync_lock);
962 add_wait_queue(&slow_work_unreg_wq, &myself);
963
964 for (;;) {
965 spin_lock_irq(&slow_work_queue_lock);
966
967 /* first of all, we wait for the last queued item in each list
968 * to be processed */
969 list_for_each_entry_reverse(work, &vslow_work_queue, link) {
970 if (work->owner == module) {
971 set_current_state(TASK_UNINTERRUPTIBLE);
972 slow_work_unreg_work_item = work;
973 goto do_wait;
974 }
975 }
976 list_for_each_entry_reverse(work, &slow_work_queue, link) {
977 if (work->owner == module) {
978 set_current_state(TASK_UNINTERRUPTIBLE);
979 slow_work_unreg_work_item = work;
980 goto do_wait;
981 }
982 }
983
984 /* then we wait for the items being processed to finish */
985 slow_work_unreg_module = module;
986 smp_mb();
987 for (loop = 0; loop < SLOW_WORK_THREAD_LIMIT; loop++) {
988 if (slow_work_thread_processing[loop] == module)
989 goto do_wait;
990 }
991 spin_unlock_irq(&slow_work_queue_lock);
992 break; /* okay, we're done */
993
994 do_wait:
995 spin_unlock_irq(&slow_work_queue_lock);
996 schedule();
997 slow_work_unreg_work_item = NULL;
998 slow_work_unreg_module = NULL;
999 }
1000
1001 remove_wait_queue(&slow_work_unreg_wq, &myself);
1002 mutex_unlock(&slow_work_unreg_sync_lock);
1003#endif /* CONFIG_MODULES */
1004}
1005
601/** 1006/**
602 * slow_work_unregister_user - Unregister a user of the facility 1007 * slow_work_unregister_user - Unregister a user of the facility
1008 * @module: The module whose items should be cleared
603 * 1009 *
604 * Unregister a user of the facility, killing all the threads if this was the 1010 * Unregister a user of the facility, killing all the threads if this was the
605 * last one. 1011 * last one.
1012 *
1013 * This waits for all the work items belonging to the nominated module to go
1014 * away before proceeding.
606 */ 1015 */
607void slow_work_unregister_user(void) 1016void slow_work_unregister_user(struct module *module)
608{ 1017{
1018 /* first of all, wait for all outstanding items from the calling module
1019 * to complete */
1020 if (module)
1021 slow_work_wait_for_items(module);
1022
1023 /* then we can actually go about shutting down the facility if need
1024 * be */
609 mutex_lock(&slow_work_user_lock); 1025 mutex_lock(&slow_work_user_lock);
610 1026
611 BUG_ON(slow_work_user_count <= 0); 1027 BUG_ON(slow_work_user_count <= 0);
@@ -639,6 +1055,16 @@ static int __init init_slow_work(void)
639 if (slow_work_max_max_threads < nr_cpus * 2) 1055 if (slow_work_max_max_threads < nr_cpus * 2)
640 slow_work_max_max_threads = nr_cpus * 2; 1056 slow_work_max_max_threads = nr_cpus * 2;
641#endif 1057#endif
1058#ifdef CONFIG_SLOW_WORK_DEBUG
1059 {
1060 struct dentry *dbdir;
1061
1062 dbdir = debugfs_create_dir("slow_work", NULL);
1063 if (dbdir && !IS_ERR(dbdir))
1064 debugfs_create_file("runqueue", S_IFREG | 0400, dbdir,
1065 NULL, &slow_work_runqueue_fops);
1066 }
1067#endif
642 return 0; 1068 return 0;
643} 1069}
644 1070
diff --git a/kernel/slow-work.h b/kernel/slow-work.h
new file mode 100644
index 000000000000..321f3c59d732
--- /dev/null
+++ b/kernel/slow-work.h
@@ -0,0 +1,72 @@
1/* Slow work private definitions
2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
13 * things to do */
14#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
15 * OOM */
16
17#define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */
18
19/*
20 * slow-work.c
21 */
22#ifdef CONFIG_SLOW_WORK_DEBUG
23extern struct slow_work *slow_work_execs[];
24extern pid_t slow_work_pids[];
25extern rwlock_t slow_work_execs_lock;
26#endif
27
28extern struct list_head slow_work_queue;
29extern struct list_head vslow_work_queue;
30extern spinlock_t slow_work_queue_lock;
31
32/*
33 * slow-work-debugfs.c
34 */
35#ifdef CONFIG_SLOW_WORK_DEBUG
36extern const struct file_operations slow_work_runqueue_fops;
37
38extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *);
39#endif
40
41/*
42 * Helper functions
43 */
44static inline void slow_work_set_thread_pid(int id, pid_t pid)
45{
46#ifdef CONFIG_SLOW_WORK_PROC
47 slow_work_pids[id] = pid;
48#endif
49}
50
51static inline void slow_work_mark_time(struct slow_work *work)
52{
53#ifdef CONFIG_SLOW_WORK_PROC
54 work->mark = CURRENT_TIME;
55#endif
56}
57
58static inline void slow_work_begin_exec(int id, struct slow_work *work)
59{
60#ifdef CONFIG_SLOW_WORK_PROC
61 slow_work_execs[id] = work;
62#endif
63}
64
65static inline void slow_work_end_exec(int id, struct slow_work *work)
66{
67#ifdef CONFIG_SLOW_WORK_PROC
68 write_lock(&slow_work_execs_lock);
69 slow_work_execs[id] = NULL;
70 write_unlock(&slow_work_execs_lock);
71#endif
72}
diff --git a/kernel/smp.c b/kernel/smp.c
index c9d1c7835c2f..a8c76069cf50 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -265,9 +265,7 @@ static DEFINE_PER_CPU(struct call_single_data, csd_data);
265 * @info: An arbitrary pointer to pass to the function. 265 * @info: An arbitrary pointer to pass to the function.
266 * @wait: If true, wait until function has completed on other CPUs. 266 * @wait: If true, wait until function has completed on other CPUs.
267 * 267 *
268 * Returns 0 on success, else a negative status code. Note that @wait 268 * Returns 0 on success, else a negative status code.
269 * will be implicitly turned on in case of allocation failures, since
270 * we fall back to on-stack allocation.
271 */ 269 */
272int smp_call_function_single(int cpu, void (*func) (void *info), void *info, 270int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
273 int wait) 271 int wait)
@@ -321,6 +319,51 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
321} 319}
322EXPORT_SYMBOL(smp_call_function_single); 320EXPORT_SYMBOL(smp_call_function_single);
323 321
322/*
323 * smp_call_function_any - Run a function on any of the given cpus
324 * @mask: The mask of cpus it can run on.
325 * @func: The function to run. This must be fast and non-blocking.
326 * @info: An arbitrary pointer to pass to the function.
327 * @wait: If true, wait until function has completed.
328 *
329 * Returns 0 on success, else a negative status code (if no cpus were online).
330 * Note that @wait will be implicitly turned on in case of allocation failures,
331 * since we fall back to on-stack allocation.
332 *
333 * Selection preference:
334 * 1) current cpu if in @mask
335 * 2) any cpu of current node if in @mask
336 * 3) any other online cpu in @mask
337 */
338int smp_call_function_any(const struct cpumask *mask,
339 void (*func)(void *info), void *info, int wait)
340{
341 unsigned int cpu;
342 const struct cpumask *nodemask;
343 int ret;
344
345 /* Try for same CPU (cheapest) */
346 cpu = get_cpu();
347 if (cpumask_test_cpu(cpu, mask))
348 goto call;
349
350 /* Try for same node. */
351 nodemask = cpumask_of_node(cpu);
352 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
353 cpu = cpumask_next_and(cpu, nodemask, mask)) {
354 if (cpu_online(cpu))
355 goto call;
356 }
357
358 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
359 cpu = cpumask_any_and(mask, cpu_online_mask);
360call:
361 ret = smp_call_function_single(cpu, func, info, wait);
362 put_cpu();
363 return ret;
364}
365EXPORT_SYMBOL_GPL(smp_call_function_any);
366
324/** 367/**
325 * __smp_call_function_single(): Run a function on another CPU 368 * __smp_call_function_single(): Run a function on another CPU
326 * @cpu: The CPU to run on. 369 * @cpu: The CPU to run on.
@@ -355,9 +398,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
355 * @wait: If true, wait (atomically) until function has completed 398 * @wait: If true, wait (atomically) until function has completed
356 * on other CPUs. 399 * on other CPUs.
357 * 400 *
358 * If @wait is true, then returns once @func has returned. Note that @wait 401 * If @wait is true, then returns once @func has returned.
359 * will be implicitly turned on in case of allocation failures, since
360 * we fall back to on-stack allocation.
361 * 402 *
362 * You must not call this function with disabled interrupts or from a 403 * You must not call this function with disabled interrupts or from a
363 * hardware interrupt handler or from a bottom half handler. Preemption 404 * hardware interrupt handler or from a bottom half handler. Preemption
@@ -443,8 +484,7 @@ EXPORT_SYMBOL(smp_call_function_many);
443 * Returns 0. 484 * Returns 0.
444 * 485 *
445 * If @wait is true, then returns once @func has returned; otherwise 486 * If @wait is true, then returns once @func has returned; otherwise
446 * it returns just before the target cpu calls @func. In case of allocation 487 * it returns just before the target cpu calls @func.
447 * failure, @wait will be implicitly turned on.
448 * 488 *
449 * You must not call this function with disabled interrupts or from a 489 * You must not call this function with disabled interrupts or from a
450 * hardware interrupt handler or from a bottom half handler. 490 * hardware interrupt handler or from a bottom half handler.
diff --git a/kernel/softirq.c b/kernel/softirq.c
index f8749e5216e0..21939d9e830e 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -302,9 +302,9 @@ void irq_exit(void)
302 if (!in_interrupt() && local_softirq_pending()) 302 if (!in_interrupt() && local_softirq_pending())
303 invoke_softirq(); 303 invoke_softirq();
304 304
305 rcu_irq_exit();
305#ifdef CONFIG_NO_HZ 306#ifdef CONFIG_NO_HZ
306 /* Make sure that timer wheel updates are propagated */ 307 /* Make sure that timer wheel updates are propagated */
307 rcu_irq_exit();
308 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) 308 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
309 tick_nohz_stop_sched_tick(0); 309 tick_nohz_stop_sched_tick(0);
310#endif 310#endif
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 5ddab730cb2f..41e042219ff6 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -21,145 +21,28 @@
21#include <linux/debug_locks.h> 21#include <linux/debug_locks.h>
22#include <linux/module.h> 22#include <linux/module.h>
23 23
24#ifndef _spin_trylock
25int __lockfunc _spin_trylock(spinlock_t *lock)
26{
27 return __spin_trylock(lock);
28}
29EXPORT_SYMBOL(_spin_trylock);
30#endif
31
32#ifndef _read_trylock
33int __lockfunc _read_trylock(rwlock_t *lock)
34{
35 return __read_trylock(lock);
36}
37EXPORT_SYMBOL(_read_trylock);
38#endif
39
40#ifndef _write_trylock
41int __lockfunc _write_trylock(rwlock_t *lock)
42{
43 return __write_trylock(lock);
44}
45EXPORT_SYMBOL(_write_trylock);
46#endif
47
48/* 24/*
49 * If lockdep is enabled then we use the non-preemption spin-ops 25 * If lockdep is enabled then we use the non-preemption spin-ops
50 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are 26 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
51 * not re-enabled during lock-acquire (which the preempt-spin-ops do): 27 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
52 */ 28 */
53#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) 29#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
54
55#ifndef _read_lock
56void __lockfunc _read_lock(rwlock_t *lock)
57{
58 __read_lock(lock);
59}
60EXPORT_SYMBOL(_read_lock);
61#endif
62
63#ifndef _spin_lock_irqsave
64unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
65{
66 return __spin_lock_irqsave(lock);
67}
68EXPORT_SYMBOL(_spin_lock_irqsave);
69#endif
70
71#ifndef _spin_lock_irq
72void __lockfunc _spin_lock_irq(spinlock_t *lock)
73{
74 __spin_lock_irq(lock);
75}
76EXPORT_SYMBOL(_spin_lock_irq);
77#endif
78
79#ifndef _spin_lock_bh
80void __lockfunc _spin_lock_bh(spinlock_t *lock)
81{
82 __spin_lock_bh(lock);
83}
84EXPORT_SYMBOL(_spin_lock_bh);
85#endif
86
87#ifndef _read_lock_irqsave
88unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
89{
90 return __read_lock_irqsave(lock);
91}
92EXPORT_SYMBOL(_read_lock_irqsave);
93#endif
94
95#ifndef _read_lock_irq
96void __lockfunc _read_lock_irq(rwlock_t *lock)
97{
98 __read_lock_irq(lock);
99}
100EXPORT_SYMBOL(_read_lock_irq);
101#endif
102
103#ifndef _read_lock_bh
104void __lockfunc _read_lock_bh(rwlock_t *lock)
105{
106 __read_lock_bh(lock);
107}
108EXPORT_SYMBOL(_read_lock_bh);
109#endif
110
111#ifndef _write_lock_irqsave
112unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
113{
114 return __write_lock_irqsave(lock);
115}
116EXPORT_SYMBOL(_write_lock_irqsave);
117#endif
118
119#ifndef _write_lock_irq
120void __lockfunc _write_lock_irq(rwlock_t *lock)
121{
122 __write_lock_irq(lock);
123}
124EXPORT_SYMBOL(_write_lock_irq);
125#endif
126
127#ifndef _write_lock_bh
128void __lockfunc _write_lock_bh(rwlock_t *lock)
129{
130 __write_lock_bh(lock);
131}
132EXPORT_SYMBOL(_write_lock_bh);
133#endif
134
135#ifndef _spin_lock
136void __lockfunc _spin_lock(spinlock_t *lock)
137{
138 __spin_lock(lock);
139}
140EXPORT_SYMBOL(_spin_lock);
141#endif
142
143#ifndef _write_lock
144void __lockfunc _write_lock(rwlock_t *lock)
145{
146 __write_lock(lock);
147}
148EXPORT_SYMBOL(_write_lock);
149#endif
150
151#else /* CONFIG_PREEMPT: */
152
153/* 30/*
31 * The __lock_function inlines are taken from
32 * include/linux/spinlock_api_smp.h
33 */
34#else
35/*
36 * We build the __lock_function inlines here. They are too large for
37 * inlining all over the place, but here is only one user per function
38 * which embedds them into the calling _lock_function below.
39 *
154 * This could be a long-held lock. We both prepare to spin for a long 40 * This could be a long-held lock. We both prepare to spin for a long
155 * time (making _this_ CPU preemptable if possible), and we also signal 41 * time (making _this_ CPU preemptable if possible), and we also signal
156 * towards that other CPU that it should break the lock ASAP. 42 * towards that other CPU that it should break the lock ASAP.
157 *
158 * (We do this in a function because inlining it would be excessive.)
159 */ 43 */
160
161#define BUILD_LOCK_OPS(op, locktype) \ 44#define BUILD_LOCK_OPS(op, locktype) \
162void __lockfunc _##op##_lock(locktype##_t *lock) \ 45void __lockfunc __##op##_lock(locktype##_t *lock) \
163{ \ 46{ \
164 for (;;) { \ 47 for (;;) { \
165 preempt_disable(); \ 48 preempt_disable(); \
@@ -175,9 +58,7 @@ void __lockfunc _##op##_lock(locktype##_t *lock) \
175 (lock)->break_lock = 0; \ 58 (lock)->break_lock = 0; \
176} \ 59} \
177 \ 60 \
178EXPORT_SYMBOL(_##op##_lock); \ 61unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \
179 \
180unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
181{ \ 62{ \
182 unsigned long flags; \ 63 unsigned long flags; \
183 \ 64 \
@@ -198,16 +79,12 @@ unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
198 return flags; \ 79 return flags; \
199} \ 80} \
200 \ 81 \
201EXPORT_SYMBOL(_##op##_lock_irqsave); \ 82void __lockfunc __##op##_lock_irq(locktype##_t *lock) \
202 \
203void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
204{ \ 83{ \
205 _##op##_lock_irqsave(lock); \ 84 _##op##_lock_irqsave(lock); \
206} \ 85} \
207 \ 86 \
208EXPORT_SYMBOL(_##op##_lock_irq); \ 87void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
209 \
210void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
211{ \ 88{ \
212 unsigned long flags; \ 89 unsigned long flags; \
213 \ 90 \
@@ -220,23 +97,21 @@ void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
220 local_bh_disable(); \ 97 local_bh_disable(); \
221 local_irq_restore(flags); \ 98 local_irq_restore(flags); \
222} \ 99} \
223 \
224EXPORT_SYMBOL(_##op##_lock_bh)
225 100
226/* 101/*
227 * Build preemption-friendly versions of the following 102 * Build preemption-friendly versions of the following
228 * lock-spinning functions: 103 * lock-spinning functions:
229 * 104 *
230 * _[spin|read|write]_lock() 105 * __[spin|read|write]_lock()
231 * _[spin|read|write]_lock_irq() 106 * __[spin|read|write]_lock_irq()
232 * _[spin|read|write]_lock_irqsave() 107 * __[spin|read|write]_lock_irqsave()
233 * _[spin|read|write]_lock_bh() 108 * __[spin|read|write]_lock_bh()
234 */ 109 */
235BUILD_LOCK_OPS(spin, spinlock); 110BUILD_LOCK_OPS(spin, spinlock);
236BUILD_LOCK_OPS(read, rwlock); 111BUILD_LOCK_OPS(read, rwlock);
237BUILD_LOCK_OPS(write, rwlock); 112BUILD_LOCK_OPS(write, rwlock);
238 113
239#endif /* CONFIG_PREEMPT */ 114#endif
240 115
241#ifdef CONFIG_DEBUG_LOCK_ALLOC 116#ifdef CONFIG_DEBUG_LOCK_ALLOC
242 117
@@ -248,7 +123,8 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
248} 123}
249EXPORT_SYMBOL(_spin_lock_nested); 124EXPORT_SYMBOL(_spin_lock_nested);
250 125
251unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) 126unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock,
127 int subclass)
252{ 128{
253 unsigned long flags; 129 unsigned long flags;
254 130
@@ -272,7 +148,127 @@ EXPORT_SYMBOL(_spin_lock_nest_lock);
272 148
273#endif 149#endif
274 150
275#ifndef _spin_unlock 151#ifndef CONFIG_INLINE_SPIN_TRYLOCK
152int __lockfunc _spin_trylock(spinlock_t *lock)
153{
154 return __spin_trylock(lock);
155}
156EXPORT_SYMBOL(_spin_trylock);
157#endif
158
159#ifndef CONFIG_INLINE_READ_TRYLOCK
160int __lockfunc _read_trylock(rwlock_t *lock)
161{
162 return __read_trylock(lock);
163}
164EXPORT_SYMBOL(_read_trylock);
165#endif
166
167#ifndef CONFIG_INLINE_WRITE_TRYLOCK
168int __lockfunc _write_trylock(rwlock_t *lock)
169{
170 return __write_trylock(lock);
171}
172EXPORT_SYMBOL(_write_trylock);
173#endif
174
175#ifndef CONFIG_INLINE_READ_LOCK
176void __lockfunc _read_lock(rwlock_t *lock)
177{
178 __read_lock(lock);
179}
180EXPORT_SYMBOL(_read_lock);
181#endif
182
183#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
184unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
185{
186 return __spin_lock_irqsave(lock);
187}
188EXPORT_SYMBOL(_spin_lock_irqsave);
189#endif
190
191#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
192void __lockfunc _spin_lock_irq(spinlock_t *lock)
193{
194 __spin_lock_irq(lock);
195}
196EXPORT_SYMBOL(_spin_lock_irq);
197#endif
198
199#ifndef CONFIG_INLINE_SPIN_LOCK_BH
200void __lockfunc _spin_lock_bh(spinlock_t *lock)
201{
202 __spin_lock_bh(lock);
203}
204EXPORT_SYMBOL(_spin_lock_bh);
205#endif
206
207#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
208unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
209{
210 return __read_lock_irqsave(lock);
211}
212EXPORT_SYMBOL(_read_lock_irqsave);
213#endif
214
215#ifndef CONFIG_INLINE_READ_LOCK_IRQ
216void __lockfunc _read_lock_irq(rwlock_t *lock)
217{
218 __read_lock_irq(lock);
219}
220EXPORT_SYMBOL(_read_lock_irq);
221#endif
222
223#ifndef CONFIG_INLINE_READ_LOCK_BH
224void __lockfunc _read_lock_bh(rwlock_t *lock)
225{
226 __read_lock_bh(lock);
227}
228EXPORT_SYMBOL(_read_lock_bh);
229#endif
230
231#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
232unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
233{
234 return __write_lock_irqsave(lock);
235}
236EXPORT_SYMBOL(_write_lock_irqsave);
237#endif
238
239#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
240void __lockfunc _write_lock_irq(rwlock_t *lock)
241{
242 __write_lock_irq(lock);
243}
244EXPORT_SYMBOL(_write_lock_irq);
245#endif
246
247#ifndef CONFIG_INLINE_WRITE_LOCK_BH
248void __lockfunc _write_lock_bh(rwlock_t *lock)
249{
250 __write_lock_bh(lock);
251}
252EXPORT_SYMBOL(_write_lock_bh);
253#endif
254
255#ifndef CONFIG_INLINE_SPIN_LOCK
256void __lockfunc _spin_lock(spinlock_t *lock)
257{
258 __spin_lock(lock);
259}
260EXPORT_SYMBOL(_spin_lock);
261#endif
262
263#ifndef CONFIG_INLINE_WRITE_LOCK
264void __lockfunc _write_lock(rwlock_t *lock)
265{
266 __write_lock(lock);
267}
268EXPORT_SYMBOL(_write_lock);
269#endif
270
271#ifndef CONFIG_INLINE_SPIN_UNLOCK
276void __lockfunc _spin_unlock(spinlock_t *lock) 272void __lockfunc _spin_unlock(spinlock_t *lock)
277{ 273{
278 __spin_unlock(lock); 274 __spin_unlock(lock);
@@ -280,7 +276,7 @@ void __lockfunc _spin_unlock(spinlock_t *lock)
280EXPORT_SYMBOL(_spin_unlock); 276EXPORT_SYMBOL(_spin_unlock);
281#endif 277#endif
282 278
283#ifndef _write_unlock 279#ifndef CONFIG_INLINE_WRITE_UNLOCK
284void __lockfunc _write_unlock(rwlock_t *lock) 280void __lockfunc _write_unlock(rwlock_t *lock)
285{ 281{
286 __write_unlock(lock); 282 __write_unlock(lock);
@@ -288,7 +284,7 @@ void __lockfunc _write_unlock(rwlock_t *lock)
288EXPORT_SYMBOL(_write_unlock); 284EXPORT_SYMBOL(_write_unlock);
289#endif 285#endif
290 286
291#ifndef _read_unlock 287#ifndef CONFIG_INLINE_READ_UNLOCK
292void __lockfunc _read_unlock(rwlock_t *lock) 288void __lockfunc _read_unlock(rwlock_t *lock)
293{ 289{
294 __read_unlock(lock); 290 __read_unlock(lock);
@@ -296,7 +292,7 @@ void __lockfunc _read_unlock(rwlock_t *lock)
296EXPORT_SYMBOL(_read_unlock); 292EXPORT_SYMBOL(_read_unlock);
297#endif 293#endif
298 294
299#ifndef _spin_unlock_irqrestore 295#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
300void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 296void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
301{ 297{
302 __spin_unlock_irqrestore(lock, flags); 298 __spin_unlock_irqrestore(lock, flags);
@@ -304,7 +300,7 @@ void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
304EXPORT_SYMBOL(_spin_unlock_irqrestore); 300EXPORT_SYMBOL(_spin_unlock_irqrestore);
305#endif 301#endif
306 302
307#ifndef _spin_unlock_irq 303#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
308void __lockfunc _spin_unlock_irq(spinlock_t *lock) 304void __lockfunc _spin_unlock_irq(spinlock_t *lock)
309{ 305{
310 __spin_unlock_irq(lock); 306 __spin_unlock_irq(lock);
@@ -312,7 +308,7 @@ void __lockfunc _spin_unlock_irq(spinlock_t *lock)
312EXPORT_SYMBOL(_spin_unlock_irq); 308EXPORT_SYMBOL(_spin_unlock_irq);
313#endif 309#endif
314 310
315#ifndef _spin_unlock_bh 311#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
316void __lockfunc _spin_unlock_bh(spinlock_t *lock) 312void __lockfunc _spin_unlock_bh(spinlock_t *lock)
317{ 313{
318 __spin_unlock_bh(lock); 314 __spin_unlock_bh(lock);
@@ -320,7 +316,7 @@ void __lockfunc _spin_unlock_bh(spinlock_t *lock)
320EXPORT_SYMBOL(_spin_unlock_bh); 316EXPORT_SYMBOL(_spin_unlock_bh);
321#endif 317#endif
322 318
323#ifndef _read_unlock_irqrestore 319#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
324void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 320void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
325{ 321{
326 __read_unlock_irqrestore(lock, flags); 322 __read_unlock_irqrestore(lock, flags);
@@ -328,7 +324,7 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
328EXPORT_SYMBOL(_read_unlock_irqrestore); 324EXPORT_SYMBOL(_read_unlock_irqrestore);
329#endif 325#endif
330 326
331#ifndef _read_unlock_irq 327#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
332void __lockfunc _read_unlock_irq(rwlock_t *lock) 328void __lockfunc _read_unlock_irq(rwlock_t *lock)
333{ 329{
334 __read_unlock_irq(lock); 330 __read_unlock_irq(lock);
@@ -336,7 +332,7 @@ void __lockfunc _read_unlock_irq(rwlock_t *lock)
336EXPORT_SYMBOL(_read_unlock_irq); 332EXPORT_SYMBOL(_read_unlock_irq);
337#endif 333#endif
338 334
339#ifndef _read_unlock_bh 335#ifndef CONFIG_INLINE_READ_UNLOCK_BH
340void __lockfunc _read_unlock_bh(rwlock_t *lock) 336void __lockfunc _read_unlock_bh(rwlock_t *lock)
341{ 337{
342 __read_unlock_bh(lock); 338 __read_unlock_bh(lock);
@@ -344,7 +340,7 @@ void __lockfunc _read_unlock_bh(rwlock_t *lock)
344EXPORT_SYMBOL(_read_unlock_bh); 340EXPORT_SYMBOL(_read_unlock_bh);
345#endif 341#endif
346 342
347#ifndef _write_unlock_irqrestore 343#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
348void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 344void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
349{ 345{
350 __write_unlock_irqrestore(lock, flags); 346 __write_unlock_irqrestore(lock, flags);
@@ -352,7 +348,7 @@ void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
352EXPORT_SYMBOL(_write_unlock_irqrestore); 348EXPORT_SYMBOL(_write_unlock_irqrestore);
353#endif 349#endif
354 350
355#ifndef _write_unlock_irq 351#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
356void __lockfunc _write_unlock_irq(rwlock_t *lock) 352void __lockfunc _write_unlock_irq(rwlock_t *lock)
357{ 353{
358 __write_unlock_irq(lock); 354 __write_unlock_irq(lock);
@@ -360,7 +356,7 @@ void __lockfunc _write_unlock_irq(rwlock_t *lock)
360EXPORT_SYMBOL(_write_unlock_irq); 356EXPORT_SYMBOL(_write_unlock_irq);
361#endif 357#endif
362 358
363#ifndef _write_unlock_bh 359#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
364void __lockfunc _write_unlock_bh(rwlock_t *lock) 360void __lockfunc _write_unlock_bh(rwlock_t *lock)
365{ 361{
366 __write_unlock_bh(lock); 362 __write_unlock_bh(lock);
@@ -368,7 +364,7 @@ void __lockfunc _write_unlock_bh(rwlock_t *lock)
368EXPORT_SYMBOL(_write_unlock_bh); 364EXPORT_SYMBOL(_write_unlock_bh);
369#endif 365#endif
370 366
371#ifndef _spin_trylock_bh 367#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
372int __lockfunc _spin_trylock_bh(spinlock_t *lock) 368int __lockfunc _spin_trylock_bh(spinlock_t *lock)
373{ 369{
374 return __spin_trylock_bh(lock); 370 return __spin_trylock_bh(lock);
diff --git a/kernel/srcu.c b/kernel/srcu.c
index b0aeeaf22ce4..818d7d9aa03c 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -49,6 +49,7 @@ int init_srcu_struct(struct srcu_struct *sp)
49 sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array); 49 sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
50 return (sp->per_cpu_ref ? 0 : -ENOMEM); 50 return (sp->per_cpu_ref ? 0 : -ENOMEM);
51} 51}
52EXPORT_SYMBOL_GPL(init_srcu_struct);
52 53
53/* 54/*
54 * srcu_readers_active_idx -- returns approximate number of readers 55 * srcu_readers_active_idx -- returns approximate number of readers
@@ -97,6 +98,7 @@ void cleanup_srcu_struct(struct srcu_struct *sp)
97 free_percpu(sp->per_cpu_ref); 98 free_percpu(sp->per_cpu_ref);
98 sp->per_cpu_ref = NULL; 99 sp->per_cpu_ref = NULL;
99} 100}
101EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
100 102
101/** 103/**
102 * srcu_read_lock - register a new reader for an SRCU-protected structure. 104 * srcu_read_lock - register a new reader for an SRCU-protected structure.
@@ -118,6 +120,7 @@ int srcu_read_lock(struct srcu_struct *sp)
118 preempt_enable(); 120 preempt_enable();
119 return idx; 121 return idx;
120} 122}
123EXPORT_SYMBOL_GPL(srcu_read_lock);
121 124
122/** 125/**
123 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. 126 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
@@ -136,22 +139,12 @@ void srcu_read_unlock(struct srcu_struct *sp, int idx)
136 per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--; 139 per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--;
137 preempt_enable(); 140 preempt_enable();
138} 141}
142EXPORT_SYMBOL_GPL(srcu_read_unlock);
139 143
140/** 144/*
141 * synchronize_srcu - wait for prior SRCU read-side critical-section completion 145 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
142 * @sp: srcu_struct with which to synchronize.
143 *
144 * Flip the completed counter, and wait for the old count to drain to zero.
145 * As with classic RCU, the updater must use some separate means of
146 * synchronizing concurrent updates. Can block; must be called from
147 * process context.
148 *
149 * Note that it is illegal to call synchornize_srcu() from the corresponding
150 * SRCU read-side critical section; doing so will result in deadlock.
151 * However, it is perfectly legal to call synchronize_srcu() on one
152 * srcu_struct from some other srcu_struct's read-side critical section.
153 */ 146 */
154void synchronize_srcu(struct srcu_struct *sp) 147void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
155{ 148{
156 int idx; 149 int idx;
157 150
@@ -173,7 +166,7 @@ void synchronize_srcu(struct srcu_struct *sp)
173 return; 166 return;
174 } 167 }
175 168
176 synchronize_sched(); /* Force memory barrier on all CPUs. */ 169 sync_func(); /* Force memory barrier on all CPUs. */
177 170
178 /* 171 /*
179 * The preceding synchronize_sched() ensures that any CPU that 172 * The preceding synchronize_sched() ensures that any CPU that
@@ -190,7 +183,7 @@ void synchronize_srcu(struct srcu_struct *sp)
190 idx = sp->completed & 0x1; 183 idx = sp->completed & 0x1;
191 sp->completed++; 184 sp->completed++;
192 185
193 synchronize_sched(); /* Force memory barrier on all CPUs. */ 186 sync_func(); /* Force memory barrier on all CPUs. */
194 187
195 /* 188 /*
196 * At this point, because of the preceding synchronize_sched(), 189 * At this point, because of the preceding synchronize_sched(),
@@ -203,7 +196,7 @@ void synchronize_srcu(struct srcu_struct *sp)
203 while (srcu_readers_active_idx(sp, idx)) 196 while (srcu_readers_active_idx(sp, idx))
204 schedule_timeout_interruptible(1); 197 schedule_timeout_interruptible(1);
205 198
206 synchronize_sched(); /* Force memory barrier on all CPUs. */ 199 sync_func(); /* Force memory barrier on all CPUs. */
207 200
208 /* 201 /*
209 * The preceding synchronize_sched() forces all srcu_read_unlock() 202 * The preceding synchronize_sched() forces all srcu_read_unlock()
@@ -237,6 +230,47 @@ void synchronize_srcu(struct srcu_struct *sp)
237} 230}
238 231
239/** 232/**
233 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
234 * @sp: srcu_struct with which to synchronize.
235 *
236 * Flip the completed counter, and wait for the old count to drain to zero.
237 * As with classic RCU, the updater must use some separate means of
238 * synchronizing concurrent updates. Can block; must be called from
239 * process context.
240 *
241 * Note that it is illegal to call synchronize_srcu() from the corresponding
242 * SRCU read-side critical section; doing so will result in deadlock.
243 * However, it is perfectly legal to call synchronize_srcu() on one
244 * srcu_struct from some other srcu_struct's read-side critical section.
245 */
246void synchronize_srcu(struct srcu_struct *sp)
247{
248 __synchronize_srcu(sp, synchronize_sched);
249}
250EXPORT_SYMBOL_GPL(synchronize_srcu);
251
252/**
253 * synchronize_srcu_expedited - like synchronize_srcu, but less patient
254 * @sp: srcu_struct with which to synchronize.
255 *
256 * Flip the completed counter, and wait for the old count to drain to zero.
257 * As with classic RCU, the updater must use some separate means of
258 * synchronizing concurrent updates. Can block; must be called from
259 * process context.
260 *
261 * Note that it is illegal to call synchronize_srcu_expedited()
262 * from the corresponding SRCU read-side critical section; doing so
263 * will result in deadlock. However, it is perfectly legal to call
264 * synchronize_srcu_expedited() on one srcu_struct from some other
265 * srcu_struct's read-side critical section.
266 */
267void synchronize_srcu_expedited(struct srcu_struct *sp)
268{
269 __synchronize_srcu(sp, synchronize_sched_expedited);
270}
271EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
272
273/**
240 * srcu_batches_completed - return batches completed. 274 * srcu_batches_completed - return batches completed.
241 * @sp: srcu_struct on which to report batch completion. 275 * @sp: srcu_struct on which to report batch completion.
242 * 276 *
@@ -248,10 +282,4 @@ long srcu_batches_completed(struct srcu_struct *sp)
248{ 282{
249 return sp->completed; 283 return sp->completed;
250} 284}
251
252EXPORT_SYMBOL_GPL(init_srcu_struct);
253EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
254EXPORT_SYMBOL_GPL(srcu_read_lock);
255EXPORT_SYMBOL_GPL(srcu_read_unlock);
256EXPORT_SYMBOL_GPL(synchronize_srcu);
257EXPORT_SYMBOL_GPL(srcu_batches_completed); 285EXPORT_SYMBOL_GPL(srcu_batches_completed);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 0d949c517412..4dbf93a52ee9 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -36,6 +36,7 @@
36#include <linux/sysrq.h> 36#include <linux/sysrq.h>
37#include <linux/highuid.h> 37#include <linux/highuid.h>
38#include <linux/writeback.h> 38#include <linux/writeback.h>
39#include <linux/ratelimit.h>
39#include <linux/hugetlb.h> 40#include <linux/hugetlb.h>
40#include <linux/initrd.h> 41#include <linux/initrd.h>
41#include <linux/key.h> 42#include <linux/key.h>
@@ -158,6 +159,8 @@ extern int no_unaligned_warning;
158extern int unaligned_dump_stack; 159extern int unaligned_dump_stack;
159#endif 160#endif
160 161
162extern struct ratelimit_state printk_ratelimit_state;
163
161#ifdef CONFIG_RT_MUTEXES 164#ifdef CONFIG_RT_MUTEXES
162extern int max_lock_depth; 165extern int max_lock_depth;
163#endif 166#endif
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 7cb6f1922598..e51a1bcb7bed 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2274,7 +2274,6 @@ void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2274#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE 2274#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
2275static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; 2275static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2276static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; 2276static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2277static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
2278 2277
2279static int __init set_ftrace_notrace(char *str) 2278static int __init set_ftrace_notrace(char *str)
2280{ 2279{
@@ -2291,6 +2290,7 @@ static int __init set_ftrace_filter(char *str)
2291__setup("ftrace_filter=", set_ftrace_filter); 2290__setup("ftrace_filter=", set_ftrace_filter);
2292 2291
2293#ifdef CONFIG_FUNCTION_GRAPH_TRACER 2292#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2293static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
2294static int __init set_graph_function(char *str) 2294static int __init set_graph_function(char *str)
2295{ 2295{
2296 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); 2296 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
@@ -2985,7 +2985,7 @@ static ssize_t
2985ftrace_pid_write(struct file *filp, const char __user *ubuf, 2985ftrace_pid_write(struct file *filp, const char __user *ubuf,
2986 size_t cnt, loff_t *ppos) 2986 size_t cnt, loff_t *ppos)
2987{ 2987{
2988 char buf[64]; 2988 char buf[64], *tmp;
2989 long val; 2989 long val;
2990 int ret; 2990 int ret;
2991 2991
@@ -3001,11 +3001,11 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
3001 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid" 3001 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3002 * to clean the filter quietly. 3002 * to clean the filter quietly.
3003 */ 3003 */
3004 strstrip(buf); 3004 tmp = strstrip(buf);
3005 if (strlen(buf) == 0) 3005 if (strlen(tmp) == 0)
3006 return 1; 3006 return 1;
3007 3007
3008 ret = strict_strtol(buf, 10, &val); 3008 ret = strict_strtol(tmp, 10, &val);
3009 if (ret < 0) 3009 if (ret < 0)
3010 return ret; 3010 return ret;
3011 3011
@@ -3391,4 +3391,3 @@ void ftrace_graph_stop(void)
3391 ftrace_stop(); 3391 ftrace_stop();
3392} 3392}
3393#endif 3393#endif
3394
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index db223fe8887f..a1ca4956ab5e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1790,9 +1790,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1790static struct ring_buffer_event * 1790static struct ring_buffer_event *
1791rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 1791rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1792 unsigned long length, unsigned long tail, 1792 unsigned long length, unsigned long tail,
1793 struct buffer_page *commit_page,
1794 struct buffer_page *tail_page, u64 *ts) 1793 struct buffer_page *tail_page, u64 *ts)
1795{ 1794{
1795 struct buffer_page *commit_page = cpu_buffer->commit_page;
1796 struct ring_buffer *buffer = cpu_buffer->buffer; 1796 struct ring_buffer *buffer = cpu_buffer->buffer;
1797 struct buffer_page *next_page; 1797 struct buffer_page *next_page;
1798 int ret; 1798 int ret;
@@ -1895,13 +1895,10 @@ static struct ring_buffer_event *
1895__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 1895__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1896 unsigned type, unsigned long length, u64 *ts) 1896 unsigned type, unsigned long length, u64 *ts)
1897{ 1897{
1898 struct buffer_page *tail_page, *commit_page; 1898 struct buffer_page *tail_page;
1899 struct ring_buffer_event *event; 1899 struct ring_buffer_event *event;
1900 unsigned long tail, write; 1900 unsigned long tail, write;
1901 1901
1902 commit_page = cpu_buffer->commit_page;
1903 /* we just need to protect against interrupts */
1904 barrier();
1905 tail_page = cpu_buffer->tail_page; 1902 tail_page = cpu_buffer->tail_page;
1906 write = local_add_return(length, &tail_page->write); 1903 write = local_add_return(length, &tail_page->write);
1907 1904
@@ -1912,7 +1909,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1912 /* See if we shot pass the end of this buffer page */ 1909 /* See if we shot pass the end of this buffer page */
1913 if (write > BUF_PAGE_SIZE) 1910 if (write > BUF_PAGE_SIZE)
1914 return rb_move_tail(cpu_buffer, length, tail, 1911 return rb_move_tail(cpu_buffer, length, tail,
1915 commit_page, tail_page, ts); 1912 tail_page, ts);
1916 1913
1917 /* We reserved something on the buffer */ 1914 /* We reserved something on the buffer */
1918 1915
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index 573d3cc762c3..b2477caf09c2 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -35,6 +35,28 @@ static int disable_reader;
35module_param(disable_reader, uint, 0644); 35module_param(disable_reader, uint, 0644);
36MODULE_PARM_DESC(disable_reader, "only run producer"); 36MODULE_PARM_DESC(disable_reader, "only run producer");
37 37
38static int write_iteration = 50;
39module_param(write_iteration, uint, 0644);
40MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
41
42static int producer_nice = 19;
43static int consumer_nice = 19;
44
45static int producer_fifo = -1;
46static int consumer_fifo = -1;
47
48module_param(producer_nice, uint, 0644);
49MODULE_PARM_DESC(producer_nice, "nice prio for producer");
50
51module_param(consumer_nice, uint, 0644);
52MODULE_PARM_DESC(consumer_nice, "nice prio for consumer");
53
54module_param(producer_fifo, uint, 0644);
55MODULE_PARM_DESC(producer_fifo, "fifo prio for producer");
56
57module_param(consumer_fifo, uint, 0644);
58MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer");
59
38static int read_events; 60static int read_events;
39 61
40static int kill_test; 62static int kill_test;
@@ -208,15 +230,18 @@ static void ring_buffer_producer(void)
208 do { 230 do {
209 struct ring_buffer_event *event; 231 struct ring_buffer_event *event;
210 int *entry; 232 int *entry;
211 233 int i;
212 event = ring_buffer_lock_reserve(buffer, 10); 234
213 if (!event) { 235 for (i = 0; i < write_iteration; i++) {
214 missed++; 236 event = ring_buffer_lock_reserve(buffer, 10);
215 } else { 237 if (!event) {
216 hit++; 238 missed++;
217 entry = ring_buffer_event_data(event); 239 } else {
218 *entry = smp_processor_id(); 240 hit++;
219 ring_buffer_unlock_commit(buffer, event); 241 entry = ring_buffer_event_data(event);
242 *entry = smp_processor_id();
243 ring_buffer_unlock_commit(buffer, event);
244 }
220 } 245 }
221 do_gettimeofday(&end_tv); 246 do_gettimeofday(&end_tv);
222 247
@@ -263,6 +288,27 @@ static void ring_buffer_producer(void)
263 288
264 if (kill_test) 289 if (kill_test)
265 trace_printk("ERROR!\n"); 290 trace_printk("ERROR!\n");
291
292 if (!disable_reader) {
293 if (consumer_fifo < 0)
294 trace_printk("Running Consumer at nice: %d\n",
295 consumer_nice);
296 else
297 trace_printk("Running Consumer at SCHED_FIFO %d\n",
298 consumer_fifo);
299 }
300 if (producer_fifo < 0)
301 trace_printk("Running Producer at nice: %d\n",
302 producer_nice);
303 else
304 trace_printk("Running Producer at SCHED_FIFO %d\n",
305 producer_fifo);
306
307 /* Let the user know that the test is running at low priority */
308 if (producer_fifo < 0 && consumer_fifo < 0 &&
309 producer_nice == 19 && consumer_nice == 19)
310 trace_printk("WARNING!!! This test is running at lowest priority.\n");
311
266 trace_printk("Time: %lld (usecs)\n", time); 312 trace_printk("Time: %lld (usecs)\n", time);
267 trace_printk("Overruns: %lld\n", overruns); 313 trace_printk("Overruns: %lld\n", overruns);
268 if (disable_reader) 314 if (disable_reader)
@@ -392,6 +438,27 @@ static int __init ring_buffer_benchmark_init(void)
392 if (IS_ERR(producer)) 438 if (IS_ERR(producer))
393 goto out_kill; 439 goto out_kill;
394 440
441 /*
442 * Run them as low-prio background tasks by default:
443 */
444 if (!disable_reader) {
445 if (consumer_fifo >= 0) {
446 struct sched_param param = {
447 .sched_priority = consumer_fifo
448 };
449 sched_setscheduler(consumer, SCHED_FIFO, &param);
450 } else
451 set_user_nice(consumer, consumer_nice);
452 }
453
454 if (producer_fifo >= 0) {
455 struct sched_param param = {
456 .sched_priority = consumer_fifo
457 };
458 sched_setscheduler(producer, SCHED_FIFO, &param);
459 } else
460 set_user_nice(producer, producer_nice);
461
395 return 0; 462 return 0;
396 463
397 out_kill: 464 out_kill:
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9d3067a62d43..874f2893cff0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1361,10 +1361,11 @@ int trace_array_vprintk(struct trace_array *tr,
1361 pause_graph_tracing(); 1361 pause_graph_tracing();
1362 raw_local_irq_save(irq_flags); 1362 raw_local_irq_save(irq_flags);
1363 __raw_spin_lock(&trace_buf_lock); 1363 __raw_spin_lock(&trace_buf_lock);
1364 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1364 if (args == NULL) {
1365 1365 strncpy(trace_buf, fmt, TRACE_BUF_SIZE);
1366 len = min(len, TRACE_BUF_SIZE-1); 1366 len = strlen(trace_buf);
1367 trace_buf[len] = 0; 1367 } else
1368 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1368 1369
1369 size = sizeof(*entry) + len + 1; 1370 size = sizeof(*entry) + len + 1;
1370 buffer = tr->buffer; 1371 buffer = tr->buffer;
@@ -1373,10 +1374,10 @@ int trace_array_vprintk(struct trace_array *tr,
1373 if (!event) 1374 if (!event)
1374 goto out_unlock; 1375 goto out_unlock;
1375 entry = ring_buffer_event_data(event); 1376 entry = ring_buffer_event_data(event);
1376 entry->ip = ip; 1377 entry->ip = ip;
1377 1378
1378 memcpy(&entry->buf, trace_buf, len); 1379 memcpy(&entry->buf, trace_buf, len);
1379 entry->buf[len] = 0; 1380 entry->buf[len] = '\0';
1380 if (!filter_check_discard(call, entry, buffer, event)) 1381 if (!filter_check_discard(call, entry, buffer, event))
1381 ring_buffer_unlock_commit(buffer, event); 1382 ring_buffer_unlock_commit(buffer, event);
1382 1383
@@ -3319,22 +3320,11 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3319 return cnt; 3320 return cnt;
3320} 3321}
3321 3322
3322static int mark_printk(const char *fmt, ...)
3323{
3324 int ret;
3325 va_list args;
3326 va_start(args, fmt);
3327 ret = trace_vprintk(0, fmt, args);
3328 va_end(args);
3329 return ret;
3330}
3331
3332static ssize_t 3323static ssize_t
3333tracing_mark_write(struct file *filp, const char __user *ubuf, 3324tracing_mark_write(struct file *filp, const char __user *ubuf,
3334 size_t cnt, loff_t *fpos) 3325 size_t cnt, loff_t *fpos)
3335{ 3326{
3336 char *buf; 3327 char *buf;
3337 char *end;
3338 3328
3339 if (tracing_disabled) 3329 if (tracing_disabled)
3340 return -EINVAL; 3330 return -EINVAL;
@@ -3342,7 +3332,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3342 if (cnt > TRACE_BUF_SIZE) 3332 if (cnt > TRACE_BUF_SIZE)
3343 cnt = TRACE_BUF_SIZE; 3333 cnt = TRACE_BUF_SIZE;
3344 3334
3345 buf = kmalloc(cnt + 1, GFP_KERNEL); 3335 buf = kmalloc(cnt + 2, GFP_KERNEL);
3346 if (buf == NULL) 3336 if (buf == NULL)
3347 return -ENOMEM; 3337 return -ENOMEM;
3348 3338
@@ -3350,14 +3340,13 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3350 kfree(buf); 3340 kfree(buf);
3351 return -EFAULT; 3341 return -EFAULT;
3352 } 3342 }
3343 if (buf[cnt-1] != '\n') {
3344 buf[cnt] = '\n';
3345 buf[cnt+1] = '\0';
3346 } else
3347 buf[cnt] = '\0';
3353 3348
3354 /* Cut from the first nil or newline. */ 3349 cnt = trace_vprintk(0, buf, NULL);
3355 buf[cnt] = '\0';
3356 end = strchr(buf, '\n');
3357 if (end)
3358 *end = '\0';
3359
3360 cnt = mark_printk("%s\n", buf);
3361 kfree(buf); 3350 kfree(buf);
3362 *fpos += cnt; 3351 *fpos += cnt;
3363 3352
@@ -3730,7 +3719,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
3730 3719
3731 s = kmalloc(sizeof(*s), GFP_KERNEL); 3720 s = kmalloc(sizeof(*s), GFP_KERNEL);
3732 if (!s) 3721 if (!s)
3733 return ENOMEM; 3722 return -ENOMEM;
3734 3723
3735 trace_seq_init(s); 3724 trace_seq_init(s);
3736 3725
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 20c5f92e28a8..878c03f386ba 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -20,6 +20,8 @@
20#include <linux/ktime.h> 20#include <linux/ktime.h>
21#include <linux/trace_clock.h> 21#include <linux/trace_clock.h>
22 22
23#include "trace.h"
24
23/* 25/*
24 * trace_clock_local(): the simplest and least coherent tracing clock. 26 * trace_clock_local(): the simplest and least coherent tracing clock.
25 * 27 *
@@ -28,17 +30,17 @@
28 */ 30 */
29u64 notrace trace_clock_local(void) 31u64 notrace trace_clock_local(void)
30{ 32{
31 unsigned long flags;
32 u64 clock; 33 u64 clock;
34 int resched;
33 35
34 /* 36 /*
35 * sched_clock() is an architecture implemented, fast, scalable, 37 * sched_clock() is an architecture implemented, fast, scalable,
36 * lockless clock. It is not guaranteed to be coherent across 38 * lockless clock. It is not guaranteed to be coherent across
37 * CPUs, nor across CPU idle events. 39 * CPUs, nor across CPU idle events.
38 */ 40 */
39 raw_local_irq_save(flags); 41 resched = ftrace_preempt_disable();
40 clock = sched_clock(); 42 clock = sched_clock();
41 raw_local_irq_restore(flags); 43 ftrace_preempt_enable(resched);
42 44
43 return clock; 45 return clock;
44} 46}
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 934d81fb4ca4..dff8c84ddf17 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -48,11 +48,11 @@
48struct ____ftrace_##name { \ 48struct ____ftrace_##name { \
49 tstruct \ 49 tstruct \
50}; \ 50}; \
51static void __used ____ftrace_check_##name(void) \ 51static void __always_unused ____ftrace_check_##name(void) \
52{ \ 52{ \
53 struct ____ftrace_##name *__entry = NULL; \ 53 struct ____ftrace_##name *__entry = NULL; \
54 \ 54 \
55 /* force cmpile-time check on F_printk() */ \ 55 /* force compile-time check on F_printk() */ \
56 printk(print); \ 56 printk(print); \
57} 57}
58 58
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 12328147132c..67e526b6ae81 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -692,31 +692,29 @@ int schedule_on_each_cpu(work_func_t func)
692 if (!works) 692 if (!works)
693 return -ENOMEM; 693 return -ENOMEM;
694 694
695 get_online_cpus();
696
695 /* 697 /*
696 * when running in keventd don't schedule a work item on itself. 698 * When running in keventd don't schedule a work item on
697 * Can just call directly because the work queue is already bound. 699 * itself. Can just call directly because the work queue is
698 * This also is faster. 700 * already bound. This also is faster.
699 * Make this a generic parameter for other workqueues?
700 */ 701 */
701 if (current_is_keventd()) { 702 if (current_is_keventd())
702 orig = raw_smp_processor_id(); 703 orig = raw_smp_processor_id();
703 INIT_WORK(per_cpu_ptr(works, orig), func);
704 func(per_cpu_ptr(works, orig));
705 }
706 704
707 get_online_cpus();
708 for_each_online_cpu(cpu) { 705 for_each_online_cpu(cpu) {
709 struct work_struct *work = per_cpu_ptr(works, cpu); 706 struct work_struct *work = per_cpu_ptr(works, cpu);
710 707
711 if (cpu == orig)
712 continue;
713 INIT_WORK(work, func); 708 INIT_WORK(work, func);
714 schedule_work_on(cpu, work);
715 }
716 for_each_online_cpu(cpu) {
717 if (cpu != orig) 709 if (cpu != orig)
718 flush_work(per_cpu_ptr(works, cpu)); 710 schedule_work_on(cpu, work);
719 } 711 }
712 if (orig >= 0)
713 func(per_cpu_ptr(works, orig));
714
715 for_each_online_cpu(cpu)
716 flush_work(per_cpu_ptr(works, cpu));
717
720 put_online_cpus(); 718 put_online_cpus();
721 free_percpu(works); 719 free_percpu(works);
722 return 0; 720 return 0;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 234ceb10861f..a79c4d0407ab 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -750,7 +750,7 @@ config RCU_TORTURE_TEST_RUNNABLE
750config RCU_CPU_STALL_DETECTOR 750config RCU_CPU_STALL_DETECTOR
751 bool "Check for stalled CPUs delaying RCU grace periods" 751 bool "Check for stalled CPUs delaying RCU grace periods"
752 depends on TREE_RCU || TREE_PREEMPT_RCU 752 depends on TREE_RCU || TREE_PREEMPT_RCU
753 default n 753 default y
754 help 754 help
755 This option causes RCU to printk information on which 755 This option causes RCU to printk information on which
756 CPUs are delaying the current grace period, but only when 756 CPUs are delaying the current grace period, but only when
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 23abbd93cae1..92cdd9936e3d 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -200,6 +200,9 @@ radix_tree_node_free(struct radix_tree_node *node)
200 * ensure that the addition of a single element in the tree cannot fail. On 200 * ensure that the addition of a single element in the tree cannot fail. On
201 * success, return zero, with preemption disabled. On error, return -ENOMEM 201 * success, return zero, with preemption disabled. On error, return -ENOMEM
202 * with preemption not disabled. 202 * with preemption not disabled.
203 *
204 * To make use of this facility, the radix tree must be initialised without
205 * __GFP_WAIT being passed to INIT_RADIX_TREE().
203 */ 206 */
204int radix_tree_preload(gfp_t gfp_mask) 207int radix_tree_preload(gfp_t gfp_mask)
205{ 208{
@@ -543,7 +546,6 @@ out:
543} 546}
544EXPORT_SYMBOL(radix_tree_tag_clear); 547EXPORT_SYMBOL(radix_tree_tag_clear);
545 548
546#ifndef __KERNEL__ /* Only the test harness uses this at present */
547/** 549/**
548 * radix_tree_tag_get - get a tag on a radix tree node 550 * radix_tree_tag_get - get a tag on a radix tree node
549 * @root: radix tree root 551 * @root: radix tree root
@@ -606,7 +608,6 @@ int radix_tree_tag_get(struct radix_tree_root *root,
606 } 608 }
607} 609}
608EXPORT_SYMBOL(radix_tree_tag_get); 610EXPORT_SYMBOL(radix_tree_tag_get);
609#endif
610 611
611/** 612/**
612 * radix_tree_next_hole - find the next hole (not-present entry) 613 * radix_tree_next_hole - find the next hole (not-present entry)
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 26187edcc7ea..09f5ce1810dc 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -7,15 +7,12 @@
7 * parameter. Now every user can use their own standalone ratelimit_state. 7 * parameter. Now every user can use their own standalone ratelimit_state.
8 * 8 *
9 * This file is released under the GPLv2. 9 * This file is released under the GPLv2.
10 *
11 */ 10 */
12 11
13#include <linux/kernel.h> 12#include <linux/ratelimit.h>
14#include <linux/jiffies.h> 13#include <linux/jiffies.h>
15#include <linux/module.h> 14#include <linux/module.h>
16 15
17static DEFINE_SPINLOCK(ratelimit_lock);
18
19/* 16/*
20 * __ratelimit - rate limiting 17 * __ratelimit - rate limiting
21 * @rs: ratelimit_state data 18 * @rs: ratelimit_state data
@@ -23,35 +20,43 @@ static DEFINE_SPINLOCK(ratelimit_lock);
23 * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks 20 * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks
24 * in every @rs->ratelimit_jiffies 21 * in every @rs->ratelimit_jiffies
25 */ 22 */
26int __ratelimit(struct ratelimit_state *rs) 23int ___ratelimit(struct ratelimit_state *rs, const char *func)
27{ 24{
28 unsigned long flags; 25 unsigned long flags;
26 int ret;
29 27
30 if (!rs->interval) 28 if (!rs->interval)
31 return 1; 29 return 1;
32 30
33 spin_lock_irqsave(&ratelimit_lock, flags); 31 /*
32 * If we contend on this state's lock then almost
33 * by definition we are too busy to print a message,
34 * in addition to the one that will be printed by
35 * the entity that is holding the lock already:
36 */
37 if (!spin_trylock_irqsave(&rs->lock, flags))
38 return 1;
39
34 if (!rs->begin) 40 if (!rs->begin)
35 rs->begin = jiffies; 41 rs->begin = jiffies;
36 42
37 if (time_is_before_jiffies(rs->begin + rs->interval)) { 43 if (time_is_before_jiffies(rs->begin + rs->interval)) {
38 if (rs->missed) 44 if (rs->missed)
39 printk(KERN_WARNING "%s: %d callbacks suppressed\n", 45 printk(KERN_WARNING "%s: %d callbacks suppressed\n",
40 __func__, rs->missed); 46 func, rs->missed);
41 rs->begin = 0; 47 rs->begin = 0;
42 rs->printed = 0; 48 rs->printed = 0;
43 rs->missed = 0; 49 rs->missed = 0;
44 } 50 }
45 if (rs->burst && rs->burst > rs->printed) 51 if (rs->burst && rs->burst > rs->printed) {
46 goto print; 52 rs->printed++;
47 53 ret = 1;
48 rs->missed++; 54 } else {
49 spin_unlock_irqrestore(&ratelimit_lock, flags); 55 rs->missed++;
50 return 0; 56 ret = 0;
57 }
58 spin_unlock_irqrestore(&rs->lock, flags);
51 59
52print: 60 return ret;
53 rs->printed++;
54 spin_unlock_irqrestore(&ratelimit_lock, flags);
55 return 1;
56} 61}
57EXPORT_SYMBOL(__ratelimit); 62EXPORT_SYMBOL(___ratelimit);
diff --git a/lib/string.c b/lib/string.c
index b19b87af65a3..e96421ab9a9a 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -246,13 +246,17 @@ EXPORT_SYMBOL(strlcat);
246#undef strcmp 246#undef strcmp
247int strcmp(const char *cs, const char *ct) 247int strcmp(const char *cs, const char *ct)
248{ 248{
249 signed char __res; 249 unsigned char c1, c2;
250 250
251 while (1) { 251 while (1) {
252 if ((__res = *cs - *ct++) != 0 || !*cs++) 252 c1 = *cs++;
253 c2 = *ct++;
254 if (c1 != c2)
255 return c1 < c2 ? -1 : 1;
256 if (!c1)
253 break; 257 break;
254 } 258 }
255 return __res; 259 return 0;
256} 260}
257EXPORT_SYMBOL(strcmp); 261EXPORT_SYMBOL(strcmp);
258#endif 262#endif
@@ -266,14 +270,18 @@ EXPORT_SYMBOL(strcmp);
266 */ 270 */
267int strncmp(const char *cs, const char *ct, size_t count) 271int strncmp(const char *cs, const char *ct, size_t count)
268{ 272{
269 signed char __res = 0; 273 unsigned char c1, c2;
270 274
271 while (count) { 275 while (count) {
272 if ((__res = *cs - *ct++) != 0 || !*cs++) 276 c1 = *cs++;
277 c2 = *ct++;
278 if (c1 != c2)
279 return c1 < c2 ? -1 : 1;
280 if (!c1)
273 break; 281 break;
274 count--; 282 count--;
275 } 283 }
276 return __res; 284 return 0;
277} 285}
278EXPORT_SYMBOL(strncmp); 286EXPORT_SYMBOL(strncmp);
279#endif 287#endif
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index ac25cd28e807..795472d8ae24 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -97,6 +97,8 @@ static phys_addr_t *io_tlb_orig_addr;
97 */ 97 */
98static DEFINE_SPINLOCK(io_tlb_lock); 98static DEFINE_SPINLOCK(io_tlb_lock);
99 99
100static int late_alloc;
101
100static int __init 102static int __init
101setup_io_tlb_npages(char *str) 103setup_io_tlb_npages(char *str)
102{ 104{
@@ -109,6 +111,7 @@ setup_io_tlb_npages(char *str)
109 ++str; 111 ++str;
110 if (!strcmp(str, "force")) 112 if (!strcmp(str, "force"))
111 swiotlb_force = 1; 113 swiotlb_force = 1;
114
112 return 1; 115 return 1;
113} 116}
114__setup("swiotlb=", setup_io_tlb_npages); 117__setup("swiotlb=", setup_io_tlb_npages);
@@ -121,8 +124,9 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
121 return phys_to_dma(hwdev, virt_to_phys(address)); 124 return phys_to_dma(hwdev, virt_to_phys(address));
122} 125}
123 126
124static void swiotlb_print_info(unsigned long bytes) 127void swiotlb_print_info(void)
125{ 128{
129 unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
126 phys_addr_t pstart, pend; 130 phys_addr_t pstart, pend;
127 131
128 pstart = virt_to_phys(io_tlb_start); 132 pstart = virt_to_phys(io_tlb_start);
@@ -140,7 +144,7 @@ static void swiotlb_print_info(unsigned long bytes)
140 * structures for the software IO TLB used to implement the DMA API. 144 * structures for the software IO TLB used to implement the DMA API.
141 */ 145 */
142void __init 146void __init
143swiotlb_init_with_default_size(size_t default_size) 147swiotlb_init_with_default_size(size_t default_size, int verbose)
144{ 148{
145 unsigned long i, bytes; 149 unsigned long i, bytes;
146 150
@@ -176,14 +180,14 @@ swiotlb_init_with_default_size(size_t default_size)
176 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); 180 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
177 if (!io_tlb_overflow_buffer) 181 if (!io_tlb_overflow_buffer)
178 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 182 panic("Cannot allocate SWIOTLB overflow buffer!\n");
179 183 if (verbose)
180 swiotlb_print_info(bytes); 184 swiotlb_print_info();
181} 185}
182 186
183void __init 187void __init
184swiotlb_init(void) 188swiotlb_init(int verbose)
185{ 189{
186 swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ 190 swiotlb_init_with_default_size(64 * (1<<20), verbose); /* default to 64MB */
187} 191}
188 192
189/* 193/*
@@ -260,7 +264,9 @@ swiotlb_late_init_with_default_size(size_t default_size)
260 if (!io_tlb_overflow_buffer) 264 if (!io_tlb_overflow_buffer)
261 goto cleanup4; 265 goto cleanup4;
262 266
263 swiotlb_print_info(bytes); 267 swiotlb_print_info();
268
269 late_alloc = 1;
264 270
265 return 0; 271 return 0;
266 272
@@ -281,6 +287,32 @@ cleanup1:
281 return -ENOMEM; 287 return -ENOMEM;
282} 288}
283 289
290void __init swiotlb_free(void)
291{
292 if (!io_tlb_overflow_buffer)
293 return;
294
295 if (late_alloc) {
296 free_pages((unsigned long)io_tlb_overflow_buffer,
297 get_order(io_tlb_overflow));
298 free_pages((unsigned long)io_tlb_orig_addr,
299 get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
300 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
301 sizeof(int)));
302 free_pages((unsigned long)io_tlb_start,
303 get_order(io_tlb_nslabs << IO_TLB_SHIFT));
304 } else {
305 free_bootmem_late(__pa(io_tlb_overflow_buffer),
306 io_tlb_overflow);
307 free_bootmem_late(__pa(io_tlb_orig_addr),
308 io_tlb_nslabs * sizeof(phys_addr_t));
309 free_bootmem_late(__pa(io_tlb_list),
310 io_tlb_nslabs * sizeof(int));
311 free_bootmem_late(__pa(io_tlb_start),
312 io_tlb_nslabs << IO_TLB_SHIFT);
313 }
314}
315
284static int is_swiotlb_buffer(phys_addr_t paddr) 316static int is_swiotlb_buffer(phys_addr_t paddr)
285{ 317{
286 return paddr >= virt_to_phys(io_tlb_start) && 318 return paddr >= virt_to_phys(io_tlb_start) &&
diff --git a/mm/Kconfig b/mm/Kconfig
index fd3386242cf0..44cf6f0a3a6d 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -128,12 +128,9 @@ config SPARSEMEM_VMEMMAP
128config MEMORY_HOTPLUG 128config MEMORY_HOTPLUG
129 bool "Allow for memory hot-add" 129 bool "Allow for memory hot-add"
130 depends on SPARSEMEM || X86_64_ACPI_NUMA 130 depends on SPARSEMEM || X86_64_ACPI_NUMA
131 depends on HOTPLUG && !(HIBERNATION && !S390) && ARCH_ENABLE_MEMORY_HOTPLUG 131 depends on HOTPLUG && ARCH_ENABLE_MEMORY_HOTPLUG
132 depends on (IA64 || X86 || PPC_BOOK3S_64 || SUPERH || S390) 132 depends on (IA64 || X86 || PPC_BOOK3S_64 || SUPERH || S390)
133 133
134comment "Memory hotplug is currently incompatible with Software Suspend"
135 depends on SPARSEMEM && HOTPLUG && HIBERNATION && !S390
136
137config MEMORY_HOTPLUG_SPARSE 134config MEMORY_HOTPLUG_SPARSE
138 def_bool y 135 def_bool y
139 depends on SPARSEMEM && MEMORY_HOTPLUG 136 depends on SPARSEMEM && MEMORY_HOTPLUG
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 11aee09dd2a6..67a33a5a1a93 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -604,10 +604,14 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
604 604
605 /* 605 /*
606 * Finally, kill the kernel threads. We don't need to be RCU 606 * Finally, kill the kernel threads. We don't need to be RCU
607 * safe anymore, since the bdi is gone from visibility. 607 * safe anymore, since the bdi is gone from visibility. Force
608 * unfreeze of the thread before calling kthread_stop(), otherwise
609 * it would never exet if it is currently stuck in the refrigerator.
608 */ 610 */
609 list_for_each_entry(wb, &bdi->wb_list, list) 611 list_for_each_entry(wb, &bdi->wb_list, list) {
612 wb->task->flags &= ~PF_FROZEN;
610 kthread_stop(wb->task); 613 kthread_stop(wb->task);
614 }
611} 615}
612 616
613/* 617/*
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 555d5d2731c6..d1dc23cc7f10 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -143,6 +143,30 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
143 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages); 143 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
144} 144}
145 145
146/*
147 * free_bootmem_late - free bootmem pages directly to page allocator
148 * @addr: starting address of the range
149 * @size: size of the range in bytes
150 *
151 * This is only useful when the bootmem allocator has already been torn
152 * down, but we are still initializing the system. Pages are given directly
153 * to the page allocator, no bootmem metadata is updated because it is gone.
154 */
155void __init free_bootmem_late(unsigned long addr, unsigned long size)
156{
157 unsigned long cursor, end;
158
159 kmemleak_free_part(__va(addr), size);
160
161 cursor = PFN_UP(addr);
162 end = PFN_DOWN(addr + size);
163
164 for (; cursor < end; cursor++) {
165 __free_pages_bootmem(pfn_to_page(cursor), 0);
166 totalram_pages++;
167 }
168}
169
146static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) 170static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
147{ 171{
148 int aligned; 172 int aligned;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 821dee596377..2047465cd27c 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -26,6 +26,7 @@
26#include <linux/migrate.h> 26#include <linux/migrate.h>
27#include <linux/page-isolation.h> 27#include <linux/page-isolation.h>
28#include <linux/pfn.h> 28#include <linux/pfn.h>
29#include <linux/suspend.h>
29 30
30#include <asm/tlbflush.h> 31#include <asm/tlbflush.h>
31 32
@@ -447,7 +448,8 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
447} 448}
448#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ 449#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
449 450
450static pg_data_t *hotadd_new_pgdat(int nid, u64 start) 451/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
452static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
451{ 453{
452 struct pglist_data *pgdat; 454 struct pglist_data *pgdat;
453 unsigned long zones_size[MAX_NR_ZONES] = {0}; 455 unsigned long zones_size[MAX_NR_ZONES] = {0};
@@ -484,14 +486,18 @@ int __ref add_memory(int nid, u64 start, u64 size)
484 struct resource *res; 486 struct resource *res;
485 int ret; 487 int ret;
486 488
489 lock_system_sleep();
490
487 res = register_memory_resource(start, size); 491 res = register_memory_resource(start, size);
492 ret = -EEXIST;
488 if (!res) 493 if (!res)
489 return -EEXIST; 494 goto out;
490 495
491 if (!node_online(nid)) { 496 if (!node_online(nid)) {
492 pgdat = hotadd_new_pgdat(nid, start); 497 pgdat = hotadd_new_pgdat(nid, start);
498 ret = -ENOMEM;
493 if (!pgdat) 499 if (!pgdat)
494 return -ENOMEM; 500 goto out;
495 new_pgdat = 1; 501 new_pgdat = 1;
496 } 502 }
497 503
@@ -514,7 +520,8 @@ int __ref add_memory(int nid, u64 start, u64 size)
514 BUG_ON(ret); 520 BUG_ON(ret);
515 } 521 }
516 522
517 return ret; 523 goto out;
524
518error: 525error:
519 /* rollback pgdat allocation and others */ 526 /* rollback pgdat allocation and others */
520 if (new_pgdat) 527 if (new_pgdat)
@@ -522,6 +529,8 @@ error:
522 if (res) 529 if (res)
523 release_memory_resource(res); 530 release_memory_resource(res);
524 531
532out:
533 unlock_system_sleep();
525 return ret; 534 return ret;
526} 535}
527EXPORT_SYMBOL_GPL(add_memory); 536EXPORT_SYMBOL_GPL(add_memory);
@@ -758,6 +767,8 @@ int offline_pages(unsigned long start_pfn,
758 if (!test_pages_in_a_zone(start_pfn, end_pfn)) 767 if (!test_pages_in_a_zone(start_pfn, end_pfn))
759 return -EINVAL; 768 return -EINVAL;
760 769
770 lock_system_sleep();
771
761 zone = page_zone(pfn_to_page(start_pfn)); 772 zone = page_zone(pfn_to_page(start_pfn));
762 node = zone_to_nid(zone); 773 node = zone_to_nid(zone);
763 nr_pages = end_pfn - start_pfn; 774 nr_pages = end_pfn - start_pfn;
@@ -765,7 +776,7 @@ int offline_pages(unsigned long start_pfn,
765 /* set above range as isolated */ 776 /* set above range as isolated */
766 ret = start_isolate_page_range(start_pfn, end_pfn); 777 ret = start_isolate_page_range(start_pfn, end_pfn);
767 if (ret) 778 if (ret)
768 return ret; 779 goto out;
769 780
770 arg.start_pfn = start_pfn; 781 arg.start_pfn = start_pfn;
771 arg.nr_pages = nr_pages; 782 arg.nr_pages = nr_pages;
@@ -843,6 +854,7 @@ repeat:
843 writeback_set_ratelimit(); 854 writeback_set_ratelimit();
844 855
845 memory_notify(MEM_OFFLINE, &arg); 856 memory_notify(MEM_OFFLINE, &arg);
857 unlock_system_sleep();
846 return 0; 858 return 0;
847 859
848failed_removal: 860failed_removal:
@@ -852,6 +864,8 @@ failed_removal:
852 /* pushback to free area */ 864 /* pushback to free area */
853 undo_isolate_page_range(start_pfn, end_pfn); 865 undo_isolate_page_range(start_pfn, end_pfn);
854 866
867out:
868 unlock_system_sleep();
855 return ret; 869 return ret;
856} 870}
857 871
diff --git a/mm/mmap.c b/mm/mmap.c
index 73f5e4b64010..292ddc3cef9c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -20,7 +20,6 @@
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/personality.h> 21#include <linux/personality.h>
22#include <linux/security.h> 22#include <linux/security.h>
23#include <linux/ima.h>
24#include <linux/hugetlb.h> 23#include <linux/hugetlb.h>
25#include <linux/profile.h> 24#include <linux/profile.h>
26#include <linux/module.h> 25#include <linux/module.h>
@@ -1061,9 +1060,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1061 error = security_file_mmap(file, reqprot, prot, flags, addr, 0); 1060 error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
1062 if (error) 1061 if (error)
1063 return error; 1062 return error;
1064 error = ima_file_mmap(file, prot);
1065 if (error)
1066 return error;
1067 1063
1068 return mmap_region(file, addr, len, flags, vm_flags, pgoff); 1064 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
1069} 1065}
diff --git a/mm/percpu.c b/mm/percpu.c
index d90797160c2a..5adfc268b408 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -355,62 +355,86 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
355} 355}
356 356
357/** 357/**
358 * pcpu_extend_area_map - extend area map for allocation 358 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
359 * @chunk: target chunk 359 * @chunk: chunk of interest
360 * 360 *
361 * Extend area map of @chunk so that it can accomodate an allocation. 361 * Determine whether area map of @chunk needs to be extended to
362 * A single allocation can split an area into three areas, so this 362 * accomodate a new allocation.
363 * function makes sure that @chunk->map has at least two extra slots.
364 * 363 *
365 * CONTEXT: 364 * CONTEXT:
366 * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired 365 * pcpu_lock.
367 * if area map is extended.
368 * 366 *
369 * RETURNS: 367 * RETURNS:
370 * 0 if noop, 1 if successfully extended, -errno on failure. 368 * New target map allocation length if extension is necessary, 0
369 * otherwise.
371 */ 370 */
372static int pcpu_extend_area_map(struct pcpu_chunk *chunk, unsigned long *flags) 371static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
373{ 372{
374 int new_alloc; 373 int new_alloc;
375 int *new;
376 size_t size;
377 374
378 /* has enough? */
379 if (chunk->map_alloc >= chunk->map_used + 2) 375 if (chunk->map_alloc >= chunk->map_used + 2)
380 return 0; 376 return 0;
381 377
382 spin_unlock_irqrestore(&pcpu_lock, *flags);
383
384 new_alloc = PCPU_DFL_MAP_ALLOC; 378 new_alloc = PCPU_DFL_MAP_ALLOC;
385 while (new_alloc < chunk->map_used + 2) 379 while (new_alloc < chunk->map_used + 2)
386 new_alloc *= 2; 380 new_alloc *= 2;
387 381
388 new = pcpu_mem_alloc(new_alloc * sizeof(new[0])); 382 return new_alloc;
389 if (!new) { 383}
390 spin_lock_irqsave(&pcpu_lock, *flags); 384
385/**
386 * pcpu_extend_area_map - extend area map of a chunk
387 * @chunk: chunk of interest
388 * @new_alloc: new target allocation length of the area map
389 *
390 * Extend area map of @chunk to have @new_alloc entries.
391 *
392 * CONTEXT:
393 * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
394 *
395 * RETURNS:
396 * 0 on success, -errno on failure.
397 */
398static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
399{
400 int *old = NULL, *new = NULL;
401 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
402 unsigned long flags;
403
404 new = pcpu_mem_alloc(new_size);
405 if (!new)
391 return -ENOMEM; 406 return -ENOMEM;
392 }
393 407
394 /* 408 /* acquire pcpu_lock and switch to new area map */
395 * Acquire pcpu_lock and switch to new area map. Only free 409 spin_lock_irqsave(&pcpu_lock, flags);
396 * could have happened inbetween, so map_used couldn't have 410
397 * grown. 411 if (new_alloc <= chunk->map_alloc)
398 */ 412 goto out_unlock;
399 spin_lock_irqsave(&pcpu_lock, *flags);
400 BUG_ON(new_alloc < chunk->map_used + 2);
401 413
402 size = chunk->map_alloc * sizeof(chunk->map[0]); 414 old_size = chunk->map_alloc * sizeof(chunk->map[0]);
403 memcpy(new, chunk->map, size); 415 memcpy(new, chunk->map, old_size);
404 416
405 /* 417 /*
406 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is 418 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
407 * one of the first chunks and still using static map. 419 * one of the first chunks and still using static map.
408 */ 420 */
409 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) 421 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
410 pcpu_mem_free(chunk->map, size); 422 old = chunk->map;
411 423
412 chunk->map_alloc = new_alloc; 424 chunk->map_alloc = new_alloc;
413 chunk->map = new; 425 chunk->map = new;
426 new = NULL;
427
428out_unlock:
429 spin_unlock_irqrestore(&pcpu_lock, flags);
430
431 /*
432 * pcpu_mem_free() might end up calling vfree() which uses
433 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
434 */
435 pcpu_mem_free(old, old_size);
436 pcpu_mem_free(new, new_size);
437
414 return 0; 438 return 0;
415} 439}
416 440
@@ -1049,7 +1073,7 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
1049 static int warn_limit = 10; 1073 static int warn_limit = 10;
1050 struct pcpu_chunk *chunk; 1074 struct pcpu_chunk *chunk;
1051 const char *err; 1075 const char *err;
1052 int slot, off; 1076 int slot, off, new_alloc;
1053 unsigned long flags; 1077 unsigned long flags;
1054 1078
1055 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 1079 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
@@ -1064,14 +1088,25 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
1064 /* serve reserved allocations from the reserved chunk if available */ 1088 /* serve reserved allocations from the reserved chunk if available */
1065 if (reserved && pcpu_reserved_chunk) { 1089 if (reserved && pcpu_reserved_chunk) {
1066 chunk = pcpu_reserved_chunk; 1090 chunk = pcpu_reserved_chunk;
1067 if (size > chunk->contig_hint || 1091
1068 pcpu_extend_area_map(chunk, &flags) < 0) { 1092 if (size > chunk->contig_hint) {
1069 err = "failed to extend area map of reserved chunk"; 1093 err = "alloc from reserved chunk failed";
1070 goto fail_unlock; 1094 goto fail_unlock;
1071 } 1095 }
1096
1097 while ((new_alloc = pcpu_need_to_extend(chunk))) {
1098 spin_unlock_irqrestore(&pcpu_lock, flags);
1099 if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
1100 err = "failed to extend area map of reserved chunk";
1101 goto fail_unlock_mutex;
1102 }
1103 spin_lock_irqsave(&pcpu_lock, flags);
1104 }
1105
1072 off = pcpu_alloc_area(chunk, size, align); 1106 off = pcpu_alloc_area(chunk, size, align);
1073 if (off >= 0) 1107 if (off >= 0)
1074 goto area_found; 1108 goto area_found;
1109
1075 err = "alloc from reserved chunk failed"; 1110 err = "alloc from reserved chunk failed";
1076 goto fail_unlock; 1111 goto fail_unlock;
1077 } 1112 }
@@ -1083,14 +1118,20 @@ restart:
1083 if (size > chunk->contig_hint) 1118 if (size > chunk->contig_hint)
1084 continue; 1119 continue;
1085 1120
1086 switch (pcpu_extend_area_map(chunk, &flags)) { 1121 new_alloc = pcpu_need_to_extend(chunk);
1087 case 0: 1122 if (new_alloc) {
1088 break; 1123 spin_unlock_irqrestore(&pcpu_lock, flags);
1089 case 1: 1124 if (pcpu_extend_area_map(chunk,
1090 goto restart; /* pcpu_lock dropped, restart */ 1125 new_alloc) < 0) {
1091 default: 1126 err = "failed to extend area map";
1092 err = "failed to extend area map"; 1127 goto fail_unlock_mutex;
1093 goto fail_unlock; 1128 }
1129 spin_lock_irqsave(&pcpu_lock, flags);
1130 /*
1131 * pcpu_lock has been dropped, need to
1132 * restart cpu_slot list walking.
1133 */
1134 goto restart;
1094 } 1135 }
1095 1136
1096 off = pcpu_alloc_area(chunk, size, align); 1137 off = pcpu_alloc_area(chunk, size, align);
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 8836575f9d79..a29c5ab5815c 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -281,8 +281,11 @@ out_uninit_applicant:
281 if (ngrp) 281 if (ngrp)
282 vlan_gvrp_uninit_applicant(real_dev); 282 vlan_gvrp_uninit_applicant(real_dev);
283out_free_group: 283out_free_group:
284 if (ngrp) 284 if (ngrp) {
285 vlan_group_free(ngrp); 285 hlist_del_rcu(&ngrp->hlist);
286 /* Free the group, after all cpu's are done. */
287 call_rcu(&ngrp->rcu, vlan_rcu_free);
288 }
286 return err; 289 return err;
287} 290}
288 291
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index a9750984f772..b7c4224f4e7d 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -211,6 +211,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
211 conn->type = type; 211 conn->type = type;
212 conn->mode = HCI_CM_ACTIVE; 212 conn->mode = HCI_CM_ACTIVE;
213 conn->state = BT_OPEN; 213 conn->state = BT_OPEN;
214 conn->auth_type = HCI_AT_GENERAL_BONDING;
214 215
215 conn->power_save = 1; 216 conn->power_save = 1;
216 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 217 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 77e9fb130adb..947f8bbb4bb3 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -2205,7 +2205,7 @@ static int l2cap_build_conf_req(struct sock *sk, void *data)
2205{ 2205{
2206 struct l2cap_pinfo *pi = l2cap_pi(sk); 2206 struct l2cap_pinfo *pi = l2cap_pi(sk);
2207 struct l2cap_conf_req *req = data; 2207 struct l2cap_conf_req *req = data;
2208 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM }; 2208 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2209 void *ptr = req->data; 2209 void *ptr = req->data;
2210 2210
2211 BT_DBG("sk %p", sk); 2211 BT_DBG("sk %p", sk);
@@ -2394,6 +2394,10 @@ done:
2394 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; 2394 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2395 2395
2396 pi->conf_state |= L2CAP_CONF_MODE_DONE; 2396 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2397
2398 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2399 sizeof(rfc), (unsigned long) &rfc);
2400
2397 break; 2401 break;
2398 2402
2399 case L2CAP_MODE_STREAMING: 2403 case L2CAP_MODE_STREAMING:
@@ -2401,6 +2405,10 @@ done:
2401 pi->max_pdu_size = rfc.max_pdu_size; 2405 pi->max_pdu_size = rfc.max_pdu_size;
2402 2406
2403 pi->conf_state |= L2CAP_CONF_MODE_DONE; 2407 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2408
2409 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2410 sizeof(rfc), (unsigned long) &rfc);
2411
2404 break; 2412 break;
2405 2413
2406 default: 2414 default:
@@ -2410,9 +2418,6 @@ done:
2410 rfc.mode = pi->mode; 2418 rfc.mode = pi->mode;
2411 } 2419 }
2412 2420
2413 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2414 sizeof(rfc), (unsigned long) &rfc);
2415
2416 if (result == L2CAP_CONF_SUCCESS) 2421 if (result == L2CAP_CONF_SUCCESS)
2417 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE; 2422 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2418 } 2423 }
diff --git a/net/core/dev.c b/net/core/dev.c
index b8f74cfb1bfd..fe10551d3671 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -942,14 +942,15 @@ rollback:
942 ret = notifier_to_errno(ret); 942 ret = notifier_to_errno(ret);
943 943
944 if (ret) { 944 if (ret) {
945 if (err) { 945 /* err >= 0 after dev_alloc_name() or stores the first errno */
946 printk(KERN_ERR 946 if (err >= 0) {
947 "%s: name change rollback failed: %d.\n",
948 dev->name, ret);
949 } else {
950 err = ret; 947 err = ret;
951 memcpy(dev->name, oldname, IFNAMSIZ); 948 memcpy(dev->name, oldname, IFNAMSIZ);
952 goto rollback; 949 goto rollback;
950 } else {
951 printk(KERN_ERR
952 "%s: name change rollback failed: %d.\n",
953 dev->name, ret);
953 } 954 }
954 } 955 }
955 956
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 6eb8d47cbf3a..6e79e96cb4f2 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -363,6 +363,7 @@ struct pktgen_dev {
363 * device name (not when the inject is 363 * device name (not when the inject is
364 * started as it used to do.) 364 * started as it used to do.)
365 */ 365 */
366 char odevname[32];
366 struct flow_state *flows; 367 struct flow_state *flows;
367 unsigned cflows; /* Concurrent flows (config) */ 368 unsigned cflows; /* Concurrent flows (config) */
368 unsigned lflow; /* Flow length (config) */ 369 unsigned lflow; /* Flow length (config) */
@@ -426,7 +427,7 @@ static const char version[] =
426static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i); 427static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i);
427static int pktgen_add_device(struct pktgen_thread *t, const char *ifname); 428static int pktgen_add_device(struct pktgen_thread *t, const char *ifname);
428static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, 429static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
429 const char *ifname); 430 const char *ifname, bool exact);
430static int pktgen_device_event(struct notifier_block *, unsigned long, void *); 431static int pktgen_device_event(struct notifier_block *, unsigned long, void *);
431static void pktgen_run_all_threads(void); 432static void pktgen_run_all_threads(void);
432static void pktgen_reset_all_threads(void); 433static void pktgen_reset_all_threads(void);
@@ -528,7 +529,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
528 seq_printf(seq, 529 seq_printf(seq,
529 " frags: %d delay: %llu clone_skb: %d ifname: %s\n", 530 " frags: %d delay: %llu clone_skb: %d ifname: %s\n",
530 pkt_dev->nfrags, (unsigned long long) pkt_dev->delay, 531 pkt_dev->nfrags, (unsigned long long) pkt_dev->delay,
531 pkt_dev->clone_skb, pkt_dev->odev->name); 532 pkt_dev->clone_skb, pkt_dev->odevname);
532 533
533 seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, 534 seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows,
534 pkt_dev->lflow); 535 pkt_dev->lflow);
@@ -1688,13 +1689,13 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
1688 if_lock(t); 1689 if_lock(t);
1689 list_for_each_entry(pkt_dev, &t->if_list, list) 1690 list_for_each_entry(pkt_dev, &t->if_list, list)
1690 if (pkt_dev->running) 1691 if (pkt_dev->running)
1691 seq_printf(seq, "%s ", pkt_dev->odev->name); 1692 seq_printf(seq, "%s ", pkt_dev->odevname);
1692 1693
1693 seq_printf(seq, "\nStopped: "); 1694 seq_printf(seq, "\nStopped: ");
1694 1695
1695 list_for_each_entry(pkt_dev, &t->if_list, list) 1696 list_for_each_entry(pkt_dev, &t->if_list, list)
1696 if (!pkt_dev->running) 1697 if (!pkt_dev->running)
1697 seq_printf(seq, "%s ", pkt_dev->odev->name); 1698 seq_printf(seq, "%s ", pkt_dev->odevname);
1698 1699
1699 if (t->result[0]) 1700 if (t->result[0])
1700 seq_printf(seq, "\nResult: %s\n", t->result); 1701 seq_printf(seq, "\nResult: %s\n", t->result);
@@ -1817,9 +1818,10 @@ static struct pktgen_dev *__pktgen_NN_threads(const char *ifname, int remove)
1817{ 1818{
1818 struct pktgen_thread *t; 1819 struct pktgen_thread *t;
1819 struct pktgen_dev *pkt_dev = NULL; 1820 struct pktgen_dev *pkt_dev = NULL;
1821 bool exact = (remove == FIND);
1820 1822
1821 list_for_each_entry(t, &pktgen_threads, th_list) { 1823 list_for_each_entry(t, &pktgen_threads, th_list) {
1822 pkt_dev = pktgen_find_dev(t, ifname); 1824 pkt_dev = pktgen_find_dev(t, ifname, exact);
1823 if (pkt_dev) { 1825 if (pkt_dev) {
1824 if (remove) { 1826 if (remove) {
1825 if_lock(t); 1827 if_lock(t);
@@ -1994,7 +1996,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
1994 "queue_map_min (zero-based) (%d) exceeds valid range " 1996 "queue_map_min (zero-based) (%d) exceeds valid range "
1995 "[0 - %d] for (%d) queues on %s, resetting\n", 1997 "[0 - %d] for (%d) queues on %s, resetting\n",
1996 pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq, 1998 pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
1997 pkt_dev->odev->name); 1999 pkt_dev->odevname);
1998 pkt_dev->queue_map_min = ntxq - 1; 2000 pkt_dev->queue_map_min = ntxq - 1;
1999 } 2001 }
2000 if (pkt_dev->queue_map_max >= ntxq) { 2002 if (pkt_dev->queue_map_max >= ntxq) {
@@ -2002,7 +2004,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
2002 "queue_map_max (zero-based) (%d) exceeds valid range " 2004 "queue_map_max (zero-based) (%d) exceeds valid range "
2003 "[0 - %d] for (%d) queues on %s, resetting\n", 2005 "[0 - %d] for (%d) queues on %s, resetting\n",
2004 pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq, 2006 pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
2005 pkt_dev->odev->name); 2007 pkt_dev->odevname);
2006 pkt_dev->queue_map_max = ntxq - 1; 2008 pkt_dev->queue_map_max = ntxq - 1;
2007 } 2009 }
2008 2010
@@ -3262,7 +3264,7 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
3262 3264
3263 if (!pkt_dev->running) { 3265 if (!pkt_dev->running) {
3264 printk(KERN_WARNING "pktgen: interface: %s is already " 3266 printk(KERN_WARNING "pktgen: interface: %s is already "
3265 "stopped\n", pkt_dev->odev->name); 3267 "stopped\n", pkt_dev->odevname);
3266 return -EINVAL; 3268 return -EINVAL;
3267 } 3269 }
3268 3270
@@ -3464,7 +3466,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3464 default: /* Drivers are not supposed to return other values! */ 3466 default: /* Drivers are not supposed to return other values! */
3465 if (net_ratelimit()) 3467 if (net_ratelimit())
3466 pr_info("pktgen: %s xmit error: %d\n", 3468 pr_info("pktgen: %s xmit error: %d\n",
3467 odev->name, ret); 3469 pkt_dev->odevname, ret);
3468 pkt_dev->errors++; 3470 pkt_dev->errors++;
3469 /* fallthru */ 3471 /* fallthru */
3470 case NETDEV_TX_LOCKED: 3472 case NETDEV_TX_LOCKED:
@@ -3566,13 +3568,18 @@ static int pktgen_thread_worker(void *arg)
3566} 3568}
3567 3569
3568static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, 3570static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
3569 const char *ifname) 3571 const char *ifname, bool exact)
3570{ 3572{
3571 struct pktgen_dev *p, *pkt_dev = NULL; 3573 struct pktgen_dev *p, *pkt_dev = NULL;
3572 if_lock(t); 3574 size_t len = strlen(ifname);
3573 3575
3576 if_lock(t);
3574 list_for_each_entry(p, &t->if_list, list) 3577 list_for_each_entry(p, &t->if_list, list)
3575 if (strncmp(p->odev->name, ifname, IFNAMSIZ) == 0) { 3578 if (strncmp(p->odevname, ifname, len) == 0) {
3579 if (p->odevname[len]) {
3580 if (exact || p->odevname[len] != '@')
3581 continue;
3582 }
3576 pkt_dev = p; 3583 pkt_dev = p;
3577 break; 3584 break;
3578 } 3585 }
@@ -3628,6 +3635,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3628 if (!pkt_dev) 3635 if (!pkt_dev)
3629 return -ENOMEM; 3636 return -ENOMEM;
3630 3637
3638 strcpy(pkt_dev->odevname, ifname);
3631 pkt_dev->flows = vmalloc(MAX_CFLOWS * sizeof(struct flow_state)); 3639 pkt_dev->flows = vmalloc(MAX_CFLOWS * sizeof(struct flow_state));
3632 if (pkt_dev->flows == NULL) { 3640 if (pkt_dev->flows == NULL) {
3633 kfree(pkt_dev); 3641 kfree(pkt_dev);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 80a96166df39..ec85681a7dd8 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2701,7 +2701,8 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2701 2701
2702 NAPI_GRO_CB(skb)->free = 1; 2702 NAPI_GRO_CB(skb)->free = 1;
2703 goto done; 2703 goto done;
2704 } 2704 } else if (skb_gro_len(p) != pinfo->gso_size)
2705 return -E2BIG;
2705 2706
2706 headroom = skb_headroom(p); 2707 headroom = skb_headroom(p);
2707 nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p)); 2708 nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p));
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 7db1de0497c6..887c03c4e3c6 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -10,7 +10,9 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/socket.h> 11#include <linux/socket.h>
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/ratelimit.h>
13#include <linux/init.h> 14#include <linux/init.h>
15
14#include <net/ip.h> 16#include <net/ip.h>
15#include <net/sock.h> 17#include <net/sock.h>
16 18
diff --git a/net/core/utils.c b/net/core/utils.c
index 83221aee7084..838250241d26 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -24,6 +24,8 @@
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/percpu.h> 25#include <linux/percpu.h>
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/ratelimit.h>
28
27#include <net/sock.h> 29#include <net/sock.h>
28 30
29#include <asm/byteorder.h> 31#include <asm/byteorder.h>
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 575f9bd51ccd..d3fe10be7219 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -563,7 +563,7 @@ out_oversize:
563 printk(KERN_INFO "Oversized IP packet from %pI4.\n", 563 printk(KERN_INFO "Oversized IP packet from %pI4.\n",
564 &qp->saddr); 564 &qp->saddr);
565out_fail: 565out_fail:
566 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMFAILS); 566 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
567 return err; 567 return err;
568} 568}
569 569
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 630a56df7b47..99508d66a642 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -483,8 +483,10 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
483 return -EINVAL; 483 return -EINVAL;
484 } 484 }
485 485
486 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) 486 if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) {
487 dev_put(dev);
487 return -EADDRNOTAVAIL; 488 return -EADDRNOTAVAIL;
489 }
488 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++; 490 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
489 ip_rt_multicast_event(in_dev); 491 ip_rt_multicast_event(in_dev);
490 492
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 98440ad82558..f1813bc71088 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1183,7 +1183,9 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
1183#if TCP_DEBUG 1183#if TCP_DEBUG
1184 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1184 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1185 1185
1186 WARN_ON(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)); 1186 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1187 KERN_INFO "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1188 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1187#endif 1189#endif
1188 1190
1189 if (inet_csk_ack_scheduled(sk)) { 1191 if (inet_csk_ack_scheduled(sk)) {
@@ -1430,11 +1432,13 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1430 /* Now that we have two receive queues this 1432 /* Now that we have two receive queues this
1431 * shouldn't happen. 1433 * shouldn't happen.
1432 */ 1434 */
1433 if (before(*seq, TCP_SKB_CB(skb)->seq)) { 1435 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
1434 printk(KERN_INFO "recvmsg bug: copied %X " 1436 KERN_INFO "recvmsg bug: copied %X "
1435 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq); 1437 "seq %X rcvnxt %X fl %X\n", *seq,
1438 TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
1439 flags))
1436 break; 1440 break;
1437 } 1441
1438 offset = *seq - TCP_SKB_CB(skb)->seq; 1442 offset = *seq - TCP_SKB_CB(skb)->seq;
1439 if (tcp_hdr(skb)->syn) 1443 if (tcp_hdr(skb)->syn)
1440 offset--; 1444 offset--;
@@ -1443,8 +1447,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1443 if (tcp_hdr(skb)->fin) 1447 if (tcp_hdr(skb)->fin)
1444 goto found_fin_ok; 1448 goto found_fin_ok;
1445 WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: " 1449 WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: "
1446 "copied %X seq %X\n", *seq, 1450 "copied %X seq %X rcvnxt %X fl %X\n",
1447 TCP_SKB_CB(skb)->seq); 1451 *seq, TCP_SKB_CB(skb)->seq,
1452 tp->rcv_nxt, flags);
1448 } 1453 }
1449 1454
1450 /* Well, if we have backlog, try to process it now yet. */ 1455 /* Well, if we have backlog, try to process it now yet. */
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index bc064d7933ff..ce8e0e772bab 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -85,10 +85,6 @@ void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *r
85 struct ieee80211_local *local = sdata->local; 85 struct ieee80211_local *local = sdata->local;
86 struct sta_info *sta; 86 struct sta_info *sta;
87 87
88 /* stop HW Rx aggregation. ampdu_action existence
89 * already verified in session init so we add the BUG_ON */
90 BUG_ON(!local->ops->ampdu_action);
91
92 rcu_read_lock(); 88 rcu_read_lock();
93 89
94 sta = sta_info_get(local, ra); 90 sta = sta_info_get(local, ra);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index b09948ceec4a..89e238b001de 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -123,13 +123,18 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
123 ieee80211_tx_skb(sdata, skb, 0); 123 ieee80211_tx_skb(sdata, skb, 0);
124} 124}
125 125
126static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 126int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
127 enum ieee80211_back_parties initiator) 127 enum ieee80211_back_parties initiator)
128{ 128{
129 struct ieee80211_local *local = sta->local; 129 struct ieee80211_local *local = sta->local;
130 int ret; 130 int ret;
131 u8 *state; 131 u8 *state;
132 132
133#ifdef CONFIG_MAC80211_HT_DEBUG
134 printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
135 sta->sta.addr, tid);
136#endif /* CONFIG_MAC80211_HT_DEBUG */
137
133 state = &sta->ampdu_mlme.tid_state_tx[tid]; 138 state = &sta->ampdu_mlme.tid_state_tx[tid];
134 139
135 if (*state == HT_AGG_STATE_OPERATIONAL) 140 if (*state == HT_AGG_STATE_OPERATIONAL)
@@ -143,7 +148,6 @@ static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
143 148
144 /* HW shall not deny going back to legacy */ 149 /* HW shall not deny going back to legacy */
145 if (WARN_ON(ret)) { 150 if (WARN_ON(ret)) {
146 *state = HT_AGG_STATE_OPERATIONAL;
147 /* 151 /*
148 * We may have pending packets get stuck in this case... 152 * We may have pending packets get stuck in this case...
149 * Not bothering with a workaround for now. 153 * Not bothering with a workaround for now.
@@ -173,12 +177,14 @@ static void sta_addba_resp_timer_expired(unsigned long data)
173 177
174 /* check if the TID waits for addBA response */ 178 /* check if the TID waits for addBA response */
175 spin_lock_bh(&sta->lock); 179 spin_lock_bh(&sta->lock);
176 if (!(*state & HT_ADDBA_REQUESTED_MSK)) { 180 if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK)) !=
181 HT_ADDBA_REQUESTED_MSK) {
177 spin_unlock_bh(&sta->lock); 182 spin_unlock_bh(&sta->lock);
178 *state = HT_AGG_STATE_IDLE; 183 *state = HT_AGG_STATE_IDLE;
179#ifdef CONFIG_MAC80211_HT_DEBUG 184#ifdef CONFIG_MAC80211_HT_DEBUG
180 printk(KERN_DEBUG "timer expired on tid %d but we are not " 185 printk(KERN_DEBUG "timer expired on tid %d but we are not "
181 "expecting addBA response there", tid); 186 "(or no longer) expecting addBA response there",
187 tid);
182#endif 188#endif
183 return; 189 return;
184 } 190 }
@@ -523,11 +529,6 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
523 goto unlock; 529 goto unlock;
524 } 530 }
525 531
526#ifdef CONFIG_MAC80211_HT_DEBUG
527 printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
528 sta->sta.addr, tid);
529#endif /* CONFIG_MAC80211_HT_DEBUG */
530
531 ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator); 532 ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator);
532 533
533 unlock: 534 unlock:
@@ -543,7 +544,7 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
543 struct sta_info *sta; 544 struct sta_info *sta;
544 int ret = 0; 545 int ret = 0;
545 546
546 if (WARN_ON(!local->ops->ampdu_action)) 547 if (!local->ops->ampdu_action)
547 return -EINVAL; 548 return -EINVAL;
548 549
549 if (tid >= STA_TID_NUM) 550 if (tid >= STA_TID_NUM)
@@ -666,21 +667,21 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
666 667
667 state = &sta->ampdu_mlme.tid_state_tx[tid]; 668 state = &sta->ampdu_mlme.tid_state_tx[tid];
668 669
669 del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
670
671 spin_lock_bh(&sta->lock); 670 spin_lock_bh(&sta->lock);
672 671
673 if (!(*state & HT_ADDBA_REQUESTED_MSK)) 672 if (!(*state & HT_ADDBA_REQUESTED_MSK))
674 goto timer_still_needed; 673 goto out;
675 674
676 if (mgmt->u.action.u.addba_resp.dialog_token != 675 if (mgmt->u.action.u.addba_resp.dialog_token !=
677 sta->ampdu_mlme.tid_tx[tid]->dialog_token) { 676 sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
678#ifdef CONFIG_MAC80211_HT_DEBUG 677#ifdef CONFIG_MAC80211_HT_DEBUG
679 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); 678 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
680#endif /* CONFIG_MAC80211_HT_DEBUG */ 679#endif /* CONFIG_MAC80211_HT_DEBUG */
681 goto timer_still_needed; 680 goto out;
682 } 681 }
683 682
683 del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
684
684#ifdef CONFIG_MAC80211_HT_DEBUG 685#ifdef CONFIG_MAC80211_HT_DEBUG
685 printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid); 686 printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid);
686#endif /* CONFIG_MAC80211_HT_DEBUG */ 687#endif /* CONFIG_MAC80211_HT_DEBUG */
@@ -699,10 +700,6 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
699 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR); 700 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR);
700 } 701 }
701 702
702 goto out;
703
704 timer_still_needed:
705 add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
706 out: 703 out:
707 spin_unlock_bh(&sta->lock); 704 spin_unlock_bh(&sta->lock);
708} 705}
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 48ef1a282b91..cdc58e61d921 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -141,7 +141,6 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
141 struct sta_info *sta, 141 struct sta_info *sta,
142 struct ieee80211_mgmt *mgmt, size_t len) 142 struct ieee80211_mgmt *mgmt, size_t len)
143{ 143{
144 struct ieee80211_local *local = sdata->local;
145 u16 tid, params; 144 u16 tid, params;
146 u16 initiator; 145 u16 initiator;
147 146
@@ -161,10 +160,9 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
161 WLAN_BACK_INITIATOR, 0); 160 WLAN_BACK_INITIATOR, 0);
162 else { /* WLAN_BACK_RECIPIENT */ 161 else { /* WLAN_BACK_RECIPIENT */
163 spin_lock_bh(&sta->lock); 162 spin_lock_bh(&sta->lock);
164 sta->ampdu_mlme.tid_state_tx[tid] = 163 if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK)
165 HT_AGG_STATE_OPERATIONAL; 164 ___ieee80211_stop_tx_ba_session(sta, tid,
165 WLAN_BACK_RECIPIENT);
166 spin_unlock_bh(&sta->lock); 166 spin_unlock_bh(&sta->lock);
167 ieee80211_stop_tx_ba_session(&local->hw, sta->sta.addr, tid,
168 WLAN_BACK_RECIPIENT);
169 } 167 }
170} 168}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 588005c84a6d..10d316e455de 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -662,6 +662,14 @@ struct ieee80211_local {
662 bool suspended; 662 bool suspended;
663 663
664 /* 664 /*
665 * Resuming is true while suspended, but when we're reprogramming the
666 * hardware -- at that time it's allowed to use ieee80211_queue_work()
667 * again even though some other parts of the stack are still suspended
668 * and we still drop received frames to avoid waking the stack.
669 */
670 bool resuming;
671
672 /*
665 * quiescing is true during the suspend process _only_ to 673 * quiescing is true during the suspend process _only_ to
666 * ease timer cancelling etc. 674 * ease timer cancelling etc.
667 */ 675 */
@@ -1083,6 +1091,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
1083 1091
1084int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 1092int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
1085 enum ieee80211_back_parties initiator); 1093 enum ieee80211_back_parties initiator);
1094int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
1095 enum ieee80211_back_parties initiator);
1086 1096
1087/* Spectrum management */ 1097/* Spectrum management */
1088void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, 1098void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index aeb65b3d2295..e6c08da8da26 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -520,9 +520,9 @@ EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
520 */ 520 */
521static bool ieee80211_can_queue_work(struct ieee80211_local *local) 521static bool ieee80211_can_queue_work(struct ieee80211_local *local)
522{ 522{
523 if (WARN(local->suspended, "queueing ieee80211 work while " 523 if (WARN(local->suspended && !local->resuming,
524 "going to suspend\n")) 524 "queueing ieee80211 work while going to suspend\n"))
525 return false; 525 return false;
526 526
527 return true; 527 return true;
528} 528}
@@ -1025,13 +1025,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1025 struct sta_info *sta; 1025 struct sta_info *sta;
1026 unsigned long flags; 1026 unsigned long flags;
1027 int res; 1027 int res;
1028 bool from_suspend = local->suspended;
1029 1028
1030 /* 1029 if (local->suspended)
1031 * We're going to start the hardware, at that point 1030 local->resuming = true;
1032 * we are no longer suspended and can RX frames.
1033 */
1034 local->suspended = false;
1035 1031
1036 /* restart hardware */ 1032 /* restart hardware */
1037 if (local->open_count) { 1033 if (local->open_count) {
@@ -1129,11 +1125,14 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1129 * If this is for hw restart things are still running. 1125 * If this is for hw restart things are still running.
1130 * We may want to change that later, however. 1126 * We may want to change that later, however.
1131 */ 1127 */
1132 if (!from_suspend) 1128 if (!local->suspended)
1133 return 0; 1129 return 0;
1134 1130
1135#ifdef CONFIG_PM 1131#ifdef CONFIG_PM
1132 /* first set suspended false, then resuming */
1136 local->suspended = false; 1133 local->suspended = false;
1134 mb();
1135 local->resuming = false;
1137 1136
1138 list_for_each_entry(sdata, &local->interfaces, list) { 1137 list_for_each_entry(sdata, &local->interfaces, list) {
1139 switch(sdata->vif.type) { 1138 switch(sdata->vif.type) {
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index c93494fef8ef..d65d3481919c 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -128,9 +128,8 @@ EXPORT_SYMBOL(nf_log_packet);
128 128
129#ifdef CONFIG_PROC_FS 129#ifdef CONFIG_PROC_FS
130static void *seq_start(struct seq_file *seq, loff_t *pos) 130static void *seq_start(struct seq_file *seq, loff_t *pos)
131 __acquires(RCU)
132{ 131{
133 rcu_read_lock(); 132 mutex_lock(&nf_log_mutex);
134 133
135 if (*pos >= ARRAY_SIZE(nf_loggers)) 134 if (*pos >= ARRAY_SIZE(nf_loggers))
136 return NULL; 135 return NULL;
@@ -149,9 +148,8 @@ static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
149} 148}
150 149
151static void seq_stop(struct seq_file *s, void *v) 150static void seq_stop(struct seq_file *s, void *v)
152 __releases(RCU)
153{ 151{
154 rcu_read_unlock(); 152 mutex_unlock(&nf_log_mutex);
155} 153}
156 154
157static int seq_show(struct seq_file *s, void *v) 155static int seq_show(struct seq_file *s, void *v)
@@ -161,7 +159,7 @@ static int seq_show(struct seq_file *s, void *v)
161 struct nf_logger *t; 159 struct nf_logger *t;
162 int ret; 160 int ret;
163 161
164 logger = rcu_dereference(nf_loggers[*pos]); 162 logger = nf_loggers[*pos];
165 163
166 if (!logger) 164 if (!logger)
167 ret = seq_printf(s, "%2lld NONE (", *pos); 165 ret = seq_printf(s, "%2lld NONE (", *pos);
@@ -171,22 +169,16 @@ static int seq_show(struct seq_file *s, void *v)
171 if (ret < 0) 169 if (ret < 0)
172 return ret; 170 return ret;
173 171
174 mutex_lock(&nf_log_mutex);
175 list_for_each_entry(t, &nf_loggers_l[*pos], list[*pos]) { 172 list_for_each_entry(t, &nf_loggers_l[*pos], list[*pos]) {
176 ret = seq_printf(s, "%s", t->name); 173 ret = seq_printf(s, "%s", t->name);
177 if (ret < 0) { 174 if (ret < 0)
178 mutex_unlock(&nf_log_mutex);
179 return ret; 175 return ret;
180 }
181 if (&t->list[*pos] != nf_loggers_l[*pos].prev) { 176 if (&t->list[*pos] != nf_loggers_l[*pos].prev) {
182 ret = seq_printf(s, ","); 177 ret = seq_printf(s, ",");
183 if (ret < 0) { 178 if (ret < 0)
184 mutex_unlock(&nf_log_mutex);
185 return ret; 179 return ret;
186 }
187 } 180 }
188 } 181 }
189 mutex_unlock(&nf_log_mutex);
190 182
191 return seq_printf(s, ")\n"); 183 return seq_printf(s, ")\n");
192} 184}
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index 2e8089ecd0af..2773be6a71dd 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -112,7 +112,7 @@ static bool limit_mt_check(const struct xt_mtchk_param *par)
112 112
113 priv = kmalloc(sizeof(*priv), GFP_KERNEL); 113 priv = kmalloc(sizeof(*priv), GFP_KERNEL);
114 if (priv == NULL) 114 if (priv == NULL)
115 return -ENOMEM; 115 return false;
116 116
117 /* For SMP, we only want to use one set of state. */ 117 /* For SMP, we only want to use one set of state. */
118 r->master = priv; 118 r->master = priv;
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index 63e190504656..4d1a41bbd5d7 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -118,7 +118,7 @@ static int xt_osf_remove_callback(struct sock *ctnl, struct sk_buff *skb,
118{ 118{
119 struct xt_osf_user_finger *f; 119 struct xt_osf_user_finger *f;
120 struct xt_osf_finger *sf; 120 struct xt_osf_finger *sf;
121 int err = ENOENT; 121 int err = -ENOENT;
122 122
123 if (!osf_attrs[OSF_ATTR_FINGER]) 123 if (!osf_attrs[OSF_ATTR_FINGER])
124 return -EINVAL; 124 return -EINVAL;
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index ba2efb960c60..a001f7c1f711 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -1189,6 +1189,7 @@ static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
1189#endif 1189#endif
1190 1190
1191static const struct file_operations rfkill_fops = { 1191static const struct file_operations rfkill_fops = {
1192 .owner = THIS_MODULE,
1192 .open = rfkill_fop_open, 1193 .open = rfkill_fop_open,
1193 .read = rfkill_fop_read, 1194 .read = rfkill_fop_read,
1194 .write = rfkill_fop_write, 1195 .write = rfkill_fop_write,
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 8450960df24f..7eed77a39d0d 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1485,15 +1485,13 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len)
1485 * local endpoint and the remote peer. 1485 * local endpoint and the remote peer.
1486 */ 1486 */
1487int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, 1487int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1488 gfp_t gfp) 1488 sctp_scope_t scope, gfp_t gfp)
1489{ 1489{
1490 sctp_scope_t scope;
1491 int flags; 1490 int flags;
1492 1491
1493 /* Use scoping rules to determine the subset of addresses from 1492 /* Use scoping rules to determine the subset of addresses from
1494 * the endpoint. 1493 * the endpoint.
1495 */ 1494 */
1496 scope = sctp_scope(&asoc->peer.active_path->ipaddr);
1497 flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; 1495 flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1498 if (asoc->peer.ipv4_address) 1496 if (asoc->peer.ipv4_address)
1499 flags |= SCTP_ADDR4_PEERSUPP; 1497 flags |= SCTP_ADDR4_PEERSUPP;
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index c9f20e28521b..23e5e97aa617 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -423,16 +423,6 @@ void sctp_retransmit_mark(struct sctp_outq *q,
423 if ((reason == SCTP_RTXR_FAST_RTX && 423 if ((reason == SCTP_RTXR_FAST_RTX &&
424 (chunk->fast_retransmit == SCTP_NEED_FRTX)) || 424 (chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
425 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) { 425 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {
426 /* If this chunk was sent less then 1 rto ago, do not
427 * retransmit this chunk, but give the peer time
428 * to acknowlege it. Do this only when
429 * retransmitting due to T3 timeout.
430 */
431 if (reason == SCTP_RTXR_T3_RTX &&
432 time_before(jiffies, chunk->sent_at +
433 transport->last_rto))
434 continue;
435
436 /* RFC 2960 6.2.1 Processing a Received SACK 426 /* RFC 2960 6.2.1 Processing a Received SACK
437 * 427 *
438 * C) Any time a DATA chunk is marked for 428 * C) Any time a DATA chunk is marked for
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 8674d4919556..efa516b47e81 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -480,7 +480,6 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
480 * that indicates that we have an outstanding HB. 480 * that indicates that we have an outstanding HB.
481 */ 481 */
482 if (!is_hb || transport->hb_sent) { 482 if (!is_hb || transport->hb_sent) {
483 transport->last_rto = transport->rto;
484 transport->rto = min((transport->rto * 2), transport->asoc->rto_max); 483 transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
485 } 484 }
486} 485}
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index c8fae1983dd1..d4df45022ffa 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -384,6 +384,11 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
384 if (!new_asoc) 384 if (!new_asoc)
385 goto nomem; 385 goto nomem;
386 386
387 if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
388 sctp_scope(sctp_source(chunk)),
389 GFP_ATOMIC) < 0)
390 goto nomem_init;
391
387 /* The call, sctp_process_init(), can fail on memory allocation. */ 392 /* The call, sctp_process_init(), can fail on memory allocation. */
388 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 393 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type,
389 sctp_source(chunk), 394 sctp_source(chunk),
@@ -401,9 +406,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
401 len = ntohs(err_chunk->chunk_hdr->length) - 406 len = ntohs(err_chunk->chunk_hdr->length) -
402 sizeof(sctp_chunkhdr_t); 407 sizeof(sctp_chunkhdr_t);
403 408
404 if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0)
405 goto nomem_init;
406
407 repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); 409 repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
408 if (!repl) 410 if (!repl)
409 goto nomem_init; 411 goto nomem_init;
@@ -1452,6 +1454,10 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
1452 if (!new_asoc) 1454 if (!new_asoc)
1453 goto nomem; 1455 goto nomem;
1454 1456
1457 if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
1458 sctp_scope(sctp_source(chunk)), GFP_ATOMIC) < 0)
1459 goto nomem;
1460
1455 /* In the outbound INIT ACK the endpoint MUST copy its current 1461 /* In the outbound INIT ACK the endpoint MUST copy its current
1456 * Verification Tag and Peers Verification tag into a reserved 1462 * Verification Tag and Peers Verification tag into a reserved
1457 * place (local tie-tag and per tie-tag) within the state cookie. 1463 * place (local tie-tag and per tie-tag) within the state cookie.
@@ -1488,9 +1494,6 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
1488 sizeof(sctp_chunkhdr_t); 1494 sizeof(sctp_chunkhdr_t);
1489 } 1495 }
1490 1496
1491 if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0)
1492 goto nomem;
1493
1494 repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); 1497 repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
1495 if (!repl) 1498 if (!repl)
1496 goto nomem; 1499 goto nomem;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c8d05758661d..3a95fcb17a9e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1080,6 +1080,13 @@ static int __sctp_connect(struct sock* sk,
1080 err = -ENOMEM; 1080 err = -ENOMEM;
1081 goto out_free; 1081 goto out_free;
1082 } 1082 }
1083
1084 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope,
1085 GFP_KERNEL);
1086 if (err < 0) {
1087 goto out_free;
1088 }
1089
1083 } 1090 }
1084 1091
1085 /* Prime the peer's transport structures. */ 1092 /* Prime the peer's transport structures. */
@@ -1095,11 +1102,6 @@ static int __sctp_connect(struct sock* sk,
1095 walk_size += af->sockaddr_len; 1102 walk_size += af->sockaddr_len;
1096 } 1103 }
1097 1104
1098 err = sctp_assoc_set_bind_addr_from_ep(asoc, GFP_KERNEL);
1099 if (err < 0) {
1100 goto out_free;
1101 }
1102
1103 /* In case the user of sctp_connectx() wants an association 1105 /* In case the user of sctp_connectx() wants an association
1104 * id back, assign one now. 1106 * id back, assign one now.
1105 */ 1107 */
@@ -1274,22 +1276,30 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
1274} 1276}
1275 1277
1276/* 1278/*
1277 * New (hopefully final) interface for the API. The option buffer is used 1279 * New (hopefully final) interface for the API.
1278 * both for the returned association id and the addresses. 1280 * We use the sctp_getaddrs_old structure so that use-space library
1281 * can avoid any unnecessary allocations. The only defferent part
1282 * is that we store the actual length of the address buffer into the
1283 * addrs_num structure member. That way we can re-use the existing
1284 * code.
1279 */ 1285 */
1280SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len, 1286SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
1281 char __user *optval, 1287 char __user *optval,
1282 int __user *optlen) 1288 int __user *optlen)
1283{ 1289{
1290 struct sctp_getaddrs_old param;
1284 sctp_assoc_t assoc_id = 0; 1291 sctp_assoc_t assoc_id = 0;
1285 int err = 0; 1292 int err = 0;
1286 1293
1287 if (len < sizeof(assoc_id)) 1294 if (len < sizeof(param))
1288 return -EINVAL; 1295 return -EINVAL;
1289 1296
1297 if (copy_from_user(&param, optval, sizeof(param)))
1298 return -EFAULT;
1299
1290 err = __sctp_setsockopt_connectx(sk, 1300 err = __sctp_setsockopt_connectx(sk,
1291 (struct sockaddr __user *)(optval + sizeof(assoc_id)), 1301 (struct sockaddr __user *)param.addrs,
1292 len - sizeof(assoc_id), &assoc_id); 1302 param.addr_num, &assoc_id);
1293 1303
1294 if (err == 0 || err == -EINPROGRESS) { 1304 if (err == 0 || err == -EINPROGRESS) {
1295 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) 1305 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
@@ -1689,6 +1699,11 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1689 goto out_unlock; 1699 goto out_unlock;
1690 } 1700 }
1691 asoc = new_asoc; 1701 asoc = new_asoc;
1702 err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL);
1703 if (err < 0) {
1704 err = -ENOMEM;
1705 goto out_free;
1706 }
1692 1707
1693 /* If the SCTP_INIT ancillary data is specified, set all 1708 /* If the SCTP_INIT ancillary data is specified, set all
1694 * the association init values accordingly. 1709 * the association init values accordingly.
@@ -1718,11 +1733,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1718 err = -ENOMEM; 1733 err = -ENOMEM;
1719 goto out_free; 1734 goto out_free;
1720 } 1735 }
1721 err = sctp_assoc_set_bind_addr_from_ep(asoc, GFP_KERNEL);
1722 if (err < 0) {
1723 err = -ENOMEM;
1724 goto out_free;
1725 }
1726 } 1736 }
1727 1737
1728 /* ASSERT: we have a valid association at this point. */ 1738 /* ASSERT: we have a valid association at this point. */
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index c256e4839316..37a1184d789f 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -74,7 +74,7 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
74 * given destination transport address, set RTO to the protocol 74 * given destination transport address, set RTO to the protocol
75 * parameter 'RTO.Initial'. 75 * parameter 'RTO.Initial'.
76 */ 76 */
77 peer->last_rto = peer->rto = msecs_to_jiffies(sctp_rto_initial); 77 peer->rto = msecs_to_jiffies(sctp_rto_initial);
78 peer->rtt = 0; 78 peer->rtt = 0;
79 peer->rttvar = 0; 79 peer->rttvar = 0;
80 peer->srtt = 0; 80 peer->srtt = 0;
@@ -308,7 +308,8 @@ void sctp_transport_route(struct sctp_transport *transport,
308 /* Initialize sk->sk_rcv_saddr, if the transport is the 308 /* Initialize sk->sk_rcv_saddr, if the transport is the
309 * association's active path for getsockname(). 309 * association's active path for getsockname().
310 */ 310 */
311 if (asoc && (transport == asoc->peer.active_path)) 311 if (asoc && (!asoc->peer.primary_path ||
312 (transport == asoc->peer.active_path)))
312 opt->pf->af->to_sk_saddr(&transport->saddr, 313 opt->pf->af->to_sk_saddr(&transport->saddr,
313 asoc->base.sk); 314 asoc->base.sk);
314 } else 315 } else
@@ -385,7 +386,6 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
385 tp->rto = tp->asoc->rto_max; 386 tp->rto = tp->asoc->rto_max;
386 387
387 tp->rtt = rtt; 388 tp->rtt = rtt;
388 tp->last_rto = tp->rto;
389 389
390 /* Reset rto_pending so that a new RTT measurement is started when a 390 /* Reset rto_pending so that a new RTT measurement is started when a
391 * new data chunk is sent. 391 * new data chunk is sent.
@@ -601,7 +601,7 @@ void sctp_transport_reset(struct sctp_transport *t)
601 */ 601 */
602 t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); 602 t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
603 t->ssthresh = asoc->peer.i.a_rwnd; 603 t->ssthresh = asoc->peer.i.a_rwnd;
604 t->last_rto = t->rto = asoc->rto_initial; 604 t->rto = asoc->rto_initial;
605 t->rtt = 0; 605 t->rtt = 0;
606 t->srtt = 0; 606 t->srtt = 0;
607 t->rttvar = 0; 607 t->rttvar = 0;
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index 22e8fd89477f..c7450c8f0a7c 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -306,24 +306,25 @@ EXPORT_SYMBOL_GPL(rpc_sockaddr2uaddr);
306 * @sap: buffer into which to plant socket address 306 * @sap: buffer into which to plant socket address
307 * @salen: size of buffer 307 * @salen: size of buffer
308 * 308 *
309 * @uaddr does not have to be '\0'-terminated, but strict_strtoul() and
310 * rpc_pton() require proper string termination to be successful.
311 *
309 * Returns the size of the socket address if successful; otherwise 312 * Returns the size of the socket address if successful; otherwise
310 * zero is returned. 313 * zero is returned.
311 */ 314 */
312size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len, 315size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len,
313 struct sockaddr *sap, const size_t salen) 316 struct sockaddr *sap, const size_t salen)
314{ 317{
315 char *c, buf[RPCBIND_MAXUADDRLEN]; 318 char *c, buf[RPCBIND_MAXUADDRLEN + sizeof('\0')];
316 unsigned long portlo, porthi; 319 unsigned long portlo, porthi;
317 unsigned short port; 320 unsigned short port;
318 321
319 if (uaddr_len > sizeof(buf)) 322 if (uaddr_len > RPCBIND_MAXUADDRLEN)
320 return 0; 323 return 0;
321 324
322 memcpy(buf, uaddr, uaddr_len); 325 memcpy(buf, uaddr, uaddr_len);
323 326
324 buf[uaddr_len] = '\n'; 327 buf[uaddr_len] = '\0';
325 buf[uaddr_len + 1] = '\0';
326
327 c = strrchr(buf, '.'); 328 c = strrchr(buf, '.');
328 if (unlikely(c == NULL)) 329 if (unlikely(c == NULL))
329 return 0; 330 return 0;
@@ -332,9 +333,7 @@ size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len,
332 if (unlikely(portlo > 255)) 333 if (unlikely(portlo > 255))
333 return 0; 334 return 0;
334 335
335 c[0] = '\n'; 336 *c = '\0';
336 c[1] = '\0';
337
338 c = strrchr(buf, '.'); 337 c = strrchr(buf, '.');
339 if (unlikely(c == NULL)) 338 if (unlikely(c == NULL))
340 return 0; 339 return 0;
@@ -345,8 +344,7 @@ size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len,
345 344
346 port = (unsigned short)((porthi << 8) | portlo); 345 port = (unsigned short)((porthi << 8) | portlo);
347 346
348 c[0] = '\0'; 347 *c = '\0';
349
350 if (rpc_pton(buf, strlen(buf), sap, salen) == 0) 348 if (rpc_pton(buf, strlen(buf), sap, salen) == 0)
351 return 0; 349 return 0;
352 350
diff --git a/scripts/dtc/data.c b/scripts/dtc/data.c
index dd2e3d39d4c1..fe555e819bf8 100644
--- a/scripts/dtc/data.c
+++ b/scripts/dtc/data.c
@@ -217,7 +217,7 @@ struct data data_insert_at_marker(struct data d, struct marker *m,
217 return d; 217 return d;
218} 218}
219 219
220struct data data_append_markers(struct data d, struct marker *m) 220static struct data data_append_markers(struct data d, struct marker *m)
221{ 221{
222 struct marker **mp = &d.markers; 222 struct marker **mp = &d.markers;
223 223
diff --git a/scripts/dtc/dtc-lexer.l b/scripts/dtc/dtc-lexer.l
index 44dbfd3f0976..a627bbee91d4 100644
--- a/scripts/dtc/dtc-lexer.l
+++ b/scripts/dtc/dtc-lexer.l
@@ -18,7 +18,7 @@
18 * USA 18 * USA
19 */ 19 */
20 20
21%option noyywrap nounput yylineno 21%option noyywrap noinput nounput yylineno
22 22
23%x INCLUDE 23%x INCLUDE
24%x BYTESTRING 24%x BYTESTRING
diff --git a/scripts/dtc/dtc-lexer.lex.c_shipped b/scripts/dtc/dtc-lexer.lex.c_shipped
index ac392cb040f6..e27cc636e326 100644
--- a/scripts/dtc/dtc-lexer.lex.c_shipped
+++ b/scripts/dtc/dtc-lexer.lex.c_shipped
@@ -9,7 +9,7 @@
9#define FLEX_SCANNER 9#define FLEX_SCANNER
10#define YY_FLEX_MAJOR_VERSION 2 10#define YY_FLEX_MAJOR_VERSION 2
11#define YY_FLEX_MINOR_VERSION 5 11#define YY_FLEX_MINOR_VERSION 5
12#define YY_FLEX_SUBMINOR_VERSION 34 12#define YY_FLEX_SUBMINOR_VERSION 35
13#if YY_FLEX_SUBMINOR_VERSION > 0 13#if YY_FLEX_SUBMINOR_VERSION > 0
14#define FLEX_BETA 14#define FLEX_BETA
15#endif 15#endif
@@ -54,7 +54,6 @@ typedef int flex_int32_t;
54typedef unsigned char flex_uint8_t; 54typedef unsigned char flex_uint8_t;
55typedef unsigned short int flex_uint16_t; 55typedef unsigned short int flex_uint16_t;
56typedef unsigned int flex_uint32_t; 56typedef unsigned int flex_uint32_t;
57#endif /* ! C99 */
58 57
59/* Limits of integral types. */ 58/* Limits of integral types. */
60#ifndef INT8_MIN 59#ifndef INT8_MIN
@@ -85,6 +84,8 @@ typedef unsigned int flex_uint32_t;
85#define UINT32_MAX (4294967295U) 84#define UINT32_MAX (4294967295U)
86#endif 85#endif
87 86
87#endif /* ! C99 */
88
88#endif /* ! FLEXINT_H */ 89#endif /* ! FLEXINT_H */
89 90
90#ifdef __cplusplus 91#ifdef __cplusplus
@@ -141,7 +142,15 @@ typedef unsigned int flex_uint32_t;
141 142
142/* Size of default input buffer. */ 143/* Size of default input buffer. */
143#ifndef YY_BUF_SIZE 144#ifndef YY_BUF_SIZE
145#ifdef __ia64__
146/* On IA-64, the buffer size is 16k, not 8k.
147 * Moreover, YY_BUF_SIZE is 2*YY_READ_BUF_SIZE in the general case.
148 * Ditto for the __ia64__ case accordingly.
149 */
150#define YY_BUF_SIZE 32768
151#else
144#define YY_BUF_SIZE 16384 152#define YY_BUF_SIZE 16384
153#endif /* __ia64__ */
145#endif 154#endif
146 155
147/* The state buf must be large enough to hold one state per character in the main buffer. 156/* The state buf must be large enough to hold one state per character in the main buffer.
@@ -192,13 +201,6 @@ extern FILE *yyin, *yyout;
192 201
193#define unput(c) yyunput( c, (yytext_ptr) ) 202#define unput(c) yyunput( c, (yytext_ptr) )
194 203
195/* The following is because we cannot portably get our hands on size_t
196 * (without autoconf's help, which isn't available because we want
197 * flex-generated scanners to compile on their own).
198 * Given that the standard has decreed that size_t exists since 1989,
199 * I guess we can afford to depend on it. Manoj.
200 */
201
202#ifndef YY_TYPEDEF_YY_SIZE_T 204#ifndef YY_TYPEDEF_YY_SIZE_T
203#define YY_TYPEDEF_YY_SIZE_T 205#define YY_TYPEDEF_YY_SIZE_T
204typedef size_t yy_size_t; 206typedef size_t yy_size_t;
@@ -604,6 +606,7 @@ char *yytext;
604 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 606 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
605 * USA 607 * USA
606 */ 608 */
609#define YY_NO_INPUT 1
607 610
608 611
609 612
@@ -634,7 +637,7 @@ static int dts_version; /* = 0 */
634 637
635static void push_input_file(const char *filename); 638static void push_input_file(const char *filename);
636static int pop_input_file(void); 639static int pop_input_file(void);
637#line 638 "dtc-lexer.lex.c" 640#line 641 "dtc-lexer.lex.c"
638 641
639#define INITIAL 0 642#define INITIAL 0
640#define INCLUDE 1 643#define INCLUDE 1
@@ -656,6 +659,35 @@ static int pop_input_file(void);
656 659
657static int yy_init_globals (void ); 660static int yy_init_globals (void );
658 661
662/* Accessor methods to globals.
663 These are made visible to non-reentrant scanners for convenience. */
664
665int yylex_destroy (void );
666
667int yyget_debug (void );
668
669void yyset_debug (int debug_flag );
670
671YY_EXTRA_TYPE yyget_extra (void );
672
673void yyset_extra (YY_EXTRA_TYPE user_defined );
674
675FILE *yyget_in (void );
676
677void yyset_in (FILE * in_str );
678
679FILE *yyget_out (void );
680
681void yyset_out (FILE * out_str );
682
683int yyget_leng (void );
684
685char *yyget_text (void );
686
687int yyget_lineno (void );
688
689void yyset_lineno (int line_number );
690
659/* Macros after this point can all be overridden by user definitions in 691/* Macros after this point can all be overridden by user definitions in
660 * section 1. 692 * section 1.
661 */ 693 */
@@ -688,7 +720,12 @@ static int input (void );
688 720
689/* Amount of stuff to slurp up with each read. */ 721/* Amount of stuff to slurp up with each read. */
690#ifndef YY_READ_BUF_SIZE 722#ifndef YY_READ_BUF_SIZE
723#ifdef __ia64__
724/* On IA-64, the buffer size is 16k, not 8k */
725#define YY_READ_BUF_SIZE 16384
726#else
691#define YY_READ_BUF_SIZE 8192 727#define YY_READ_BUF_SIZE 8192
728#endif /* __ia64__ */
692#endif 729#endif
693 730
694/* Copy whatever the last rule matched to the standard output. */ 731/* Copy whatever the last rule matched to the standard output. */
@@ -696,7 +733,7 @@ static int input (void );
696/* This used to be an fputs(), but since the string might contain NUL's, 733/* This used to be an fputs(), but since the string might contain NUL's,
697 * we now use fwrite(). 734 * we now use fwrite().
698 */ 735 */
699#define ECHO fwrite( yytext, yyleng, 1, yyout ) 736#define ECHO do { if (fwrite( yytext, yyleng, 1, yyout )) {} } while (0)
700#endif 737#endif
701 738
702/* Gets input and stuffs it into "buf". number of characters read, or YY_NULL, 739/* Gets input and stuffs it into "buf". number of characters read, or YY_NULL,
@@ -707,7 +744,7 @@ static int input (void );
707 if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \ 744 if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \
708 { \ 745 { \
709 int c = '*'; \ 746 int c = '*'; \
710 int n; \ 747 size_t n; \
711 for ( n = 0; n < max_size && \ 748 for ( n = 0; n < max_size && \
712 (c = getc( yyin )) != EOF && c != '\n'; ++n ) \ 749 (c = getc( yyin )) != EOF && c != '\n'; ++n ) \
713 buf[n] = (char) c; \ 750 buf[n] = (char) c; \
@@ -791,7 +828,7 @@ YY_DECL
791 828
792#line 64 "dtc-lexer.l" 829#line 64 "dtc-lexer.l"
793 830
794#line 795 "dtc-lexer.lex.c" 831#line 832 "dtc-lexer.lex.c"
795 832
796 if ( !(yy_init) ) 833 if ( !(yy_init) )
797 { 834 {
@@ -1116,7 +1153,7 @@ YY_RULE_SETUP
1116#line 222 "dtc-lexer.l" 1153#line 222 "dtc-lexer.l"
1117ECHO; 1154ECHO;
1118 YY_BREAK 1155 YY_BREAK
1119#line 1120 "dtc-lexer.lex.c" 1156#line 1157 "dtc-lexer.lex.c"
1120 1157
1121 case YY_END_OF_BUFFER: 1158 case YY_END_OF_BUFFER:
1122 { 1159 {
@@ -1840,8 +1877,8 @@ YY_BUFFER_STATE yy_scan_string (yyconst char * yystr )
1840 1877
1841/** Setup the input buffer state to scan the given bytes. The next call to yylex() will 1878/** Setup the input buffer state to scan the given bytes. The next call to yylex() will
1842 * scan from a @e copy of @a bytes. 1879 * scan from a @e copy of @a bytes.
1843 * @param bytes the byte buffer to scan 1880 * @param yybytes the byte buffer to scan
1844 * @param len the number of bytes in the buffer pointed to by @a bytes. 1881 * @param _yybytes_len the number of bytes in the buffer pointed to by @a bytes.
1845 * 1882 *
1846 * @return the newly allocated buffer state object. 1883 * @return the newly allocated buffer state object.
1847 */ 1884 */
diff --git a/scripts/dtc/libfdt/fdt_ro.c b/scripts/dtc/libfdt/fdt_ro.c
index fbbba44fcd0d..22e692919ff9 100644
--- a/scripts/dtc/libfdt/fdt_ro.c
+++ b/scripts/dtc/libfdt/fdt_ro.c
@@ -411,7 +411,7 @@ int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle)
411 &phandle, sizeof(phandle)); 411 &phandle, sizeof(phandle));
412} 412}
413 413
414int _stringlist_contains(const char *strlist, int listlen, const char *str) 414static int _stringlist_contains(const char *strlist, int listlen, const char *str)
415{ 415{
416 int len = strlen(str); 416 int len = strlen(str);
417 const char *p; 417 const char *p;
diff --git a/scripts/dtc/treesource.c b/scripts/dtc/treesource.c
index ebeb6eb27907..1521ff11bb97 100644
--- a/scripts/dtc/treesource.c
+++ b/scripts/dtc/treesource.c
@@ -52,7 +52,7 @@ static void write_prefix(FILE *f, int level)
52 fputc('\t', f); 52 fputc('\t', f);
53} 53}
54 54
55int isstring(char c) 55static int isstring(char c)
56{ 56{
57 return (isprint(c) 57 return (isprint(c)
58 || (c == '\0') 58 || (c == '\0')
diff --git a/scripts/genksyms/keywords.c_shipped b/scripts/genksyms/keywords.c_shipped
index 971e0113ae7a..287467a2e8c7 100644
--- a/scripts/genksyms/keywords.c_shipped
+++ b/scripts/genksyms/keywords.c_shipped
@@ -1,4 +1,4 @@
1/* ANSI-C code produced by gperf version 3.0.2 */ 1/* ANSI-C code produced by gperf version 3.0.3 */
2/* Command-line: gperf -L ANSI-C -a -C -E -g -H is_reserved_hash -k '1,3,$' -N is_reserved_word -p -t scripts/genksyms/keywords.gperf */ 2/* Command-line: gperf -L ANSI-C -a -C -E -g -H is_reserved_hash -k '1,3,$' -N is_reserved_word -p -t scripts/genksyms/keywords.gperf */
3 3
4#if !((' ' == 32) && ('!' == 33) && ('"' == 34) && ('#' == 35) \ 4#if !((' ' == 32) && ('!' == 33) && ('"' == 34) && ('#' == 35) \
@@ -30,7 +30,9 @@
30 30
31#line 1 "scripts/genksyms/keywords.gperf" 31#line 1 "scripts/genksyms/keywords.gperf"
32 32
33#line 3 "scripts/genksyms/keywords.gperf" 33struct resword;
34static const struct resword *is_reserved_word(register const char *str, register unsigned int len);
35#line 5 "scripts/genksyms/keywords.gperf"
34struct resword { const char *name; int token; }; 36struct resword { const char *name; int token; };
35/* maximum key range = 62, duplicates = 0 */ 37/* maximum key range = 62, duplicates = 0 */
36 38
@@ -78,6 +80,9 @@ is_reserved_hash (register const char *str, register unsigned int len)
78 80
79#ifdef __GNUC__ 81#ifdef __GNUC__
80__inline 82__inline
83#ifdef __GNUC_STDC_INLINE__
84__attribute__ ((__gnu_inline__))
85#endif
81#endif 86#endif
82const struct resword * 87const struct resword *
83is_reserved_word (register const char *str, register unsigned int len) 88is_reserved_word (register const char *str, register unsigned int len)
@@ -94,105 +99,105 @@ is_reserved_word (register const char *str, register unsigned int len)
94 static const struct resword wordlist[] = 99 static const struct resword wordlist[] =
95 { 100 {
96 {""}, {""}, {""}, 101 {""}, {""}, {""},
97#line 26 "scripts/genksyms/keywords.gperf" 102#line 28 "scripts/genksyms/keywords.gperf"
98 {"asm", ASM_KEYW}, 103 {"asm", ASM_KEYW},
99 {""}, 104 {""},
100#line 8 "scripts/genksyms/keywords.gperf" 105#line 10 "scripts/genksyms/keywords.gperf"
101 {"__asm", ASM_KEYW}, 106 {"__asm", ASM_KEYW},
102 {""}, 107 {""},
103#line 9 "scripts/genksyms/keywords.gperf" 108#line 11 "scripts/genksyms/keywords.gperf"
104 {"__asm__", ASM_KEYW}, 109 {"__asm__", ASM_KEYW},
105 {""}, {""}, 110 {""}, {""},
106#line 52 "scripts/genksyms/keywords.gperf" 111#line 54 "scripts/genksyms/keywords.gperf"
107 {"__typeof__", TYPEOF_KEYW}, 112 {"__typeof__", TYPEOF_KEYW},
108 {""}, 113 {""},
109#line 12 "scripts/genksyms/keywords.gperf" 114#line 14 "scripts/genksyms/keywords.gperf"
110 {"__const", CONST_KEYW}, 115 {"__const", CONST_KEYW},
111#line 11 "scripts/genksyms/keywords.gperf"
112 {"__attribute__", ATTRIBUTE_KEYW},
113#line 13 "scripts/genksyms/keywords.gperf" 116#line 13 "scripts/genksyms/keywords.gperf"
117 {"__attribute__", ATTRIBUTE_KEYW},
118#line 15 "scripts/genksyms/keywords.gperf"
114 {"__const__", CONST_KEYW}, 119 {"__const__", CONST_KEYW},
115#line 18 "scripts/genksyms/keywords.gperf" 120#line 20 "scripts/genksyms/keywords.gperf"
116 {"__signed__", SIGNED_KEYW}, 121 {"__signed__", SIGNED_KEYW},
117#line 44 "scripts/genksyms/keywords.gperf" 122#line 46 "scripts/genksyms/keywords.gperf"
118 {"static", STATIC_KEYW}, 123 {"static", STATIC_KEYW},
119#line 20 "scripts/genksyms/keywords.gperf" 124#line 22 "scripts/genksyms/keywords.gperf"
120 {"__volatile__", VOLATILE_KEYW}, 125 {"__volatile__", VOLATILE_KEYW},
121#line 39 "scripts/genksyms/keywords.gperf" 126#line 41 "scripts/genksyms/keywords.gperf"
122 {"int", INT_KEYW}, 127 {"int", INT_KEYW},
123#line 32 "scripts/genksyms/keywords.gperf" 128#line 34 "scripts/genksyms/keywords.gperf"
124 {"char", CHAR_KEYW}, 129 {"char", CHAR_KEYW},
125#line 33 "scripts/genksyms/keywords.gperf" 130#line 35 "scripts/genksyms/keywords.gperf"
126 {"const", CONST_KEYW}, 131 {"const", CONST_KEYW},
127#line 45 "scripts/genksyms/keywords.gperf" 132#line 47 "scripts/genksyms/keywords.gperf"
128 {"struct", STRUCT_KEYW}, 133 {"struct", STRUCT_KEYW},
129#line 24 "scripts/genksyms/keywords.gperf" 134#line 26 "scripts/genksyms/keywords.gperf"
130 {"__restrict__", RESTRICT_KEYW}, 135 {"__restrict__", RESTRICT_KEYW},
131#line 25 "scripts/genksyms/keywords.gperf" 136#line 27 "scripts/genksyms/keywords.gperf"
132 {"restrict", RESTRICT_KEYW}, 137 {"restrict", RESTRICT_KEYW},
133#line 23 "scripts/genksyms/keywords.gperf" 138#line 25 "scripts/genksyms/keywords.gperf"
134 {"_restrict", RESTRICT_KEYW}, 139 {"_restrict", RESTRICT_KEYW},
135#line 16 "scripts/genksyms/keywords.gperf" 140#line 18 "scripts/genksyms/keywords.gperf"
136 {"__inline__", INLINE_KEYW}, 141 {"__inline__", INLINE_KEYW},
137#line 10 "scripts/genksyms/keywords.gperf" 142#line 12 "scripts/genksyms/keywords.gperf"
138 {"__attribute", ATTRIBUTE_KEYW}, 143 {"__attribute", ATTRIBUTE_KEYW},
139 {""}, 144 {""},
140#line 14 "scripts/genksyms/keywords.gperf" 145#line 16 "scripts/genksyms/keywords.gperf"
141 {"__extension__", EXTENSION_KEYW}, 146 {"__extension__", EXTENSION_KEYW},
142#line 35 "scripts/genksyms/keywords.gperf" 147#line 37 "scripts/genksyms/keywords.gperf"
143 {"enum", ENUM_KEYW}, 148 {"enum", ENUM_KEYW},
144#line 19 "scripts/genksyms/keywords.gperf" 149#line 21 "scripts/genksyms/keywords.gperf"
145 {"__volatile", VOLATILE_KEYW}, 150 {"__volatile", VOLATILE_KEYW},
146#line 36 "scripts/genksyms/keywords.gperf" 151#line 38 "scripts/genksyms/keywords.gperf"
147 {"extern", EXTERN_KEYW}, 152 {"extern", EXTERN_KEYW},
148 {""}, 153 {""},
149#line 17 "scripts/genksyms/keywords.gperf" 154#line 19 "scripts/genksyms/keywords.gperf"
150 {"__signed", SIGNED_KEYW}, 155 {"__signed", SIGNED_KEYW},
151#line 7 "scripts/genksyms/keywords.gperf" 156#line 9 "scripts/genksyms/keywords.gperf"
152 {"EXPORT_SYMBOL_GPL_FUTURE", EXPORT_SYMBOL_KEYW}, 157 {"EXPORT_SYMBOL_GPL_FUTURE", EXPORT_SYMBOL_KEYW},
153 {""}, 158 {""},
154#line 51 "scripts/genksyms/keywords.gperf" 159#line 53 "scripts/genksyms/keywords.gperf"
155 {"typeof", TYPEOF_KEYW}, 160 {"typeof", TYPEOF_KEYW},
156#line 46 "scripts/genksyms/keywords.gperf" 161#line 48 "scripts/genksyms/keywords.gperf"
157 {"typedef", TYPEDEF_KEYW}, 162 {"typedef", TYPEDEF_KEYW},
158#line 15 "scripts/genksyms/keywords.gperf" 163#line 17 "scripts/genksyms/keywords.gperf"
159 {"__inline", INLINE_KEYW}, 164 {"__inline", INLINE_KEYW},
160#line 31 "scripts/genksyms/keywords.gperf" 165#line 33 "scripts/genksyms/keywords.gperf"
161 {"auto", AUTO_KEYW}, 166 {"auto", AUTO_KEYW},
162#line 47 "scripts/genksyms/keywords.gperf" 167#line 49 "scripts/genksyms/keywords.gperf"
163 {"union", UNION_KEYW}, 168 {"union", UNION_KEYW},
164 {""}, {""}, 169 {""}, {""},
165#line 48 "scripts/genksyms/keywords.gperf" 170#line 50 "scripts/genksyms/keywords.gperf"
166 {"unsigned", UNSIGNED_KEYW}, 171 {"unsigned", UNSIGNED_KEYW},
167#line 49 "scripts/genksyms/keywords.gperf" 172#line 51 "scripts/genksyms/keywords.gperf"
168 {"void", VOID_KEYW}, 173 {"void", VOID_KEYW},
169#line 42 "scripts/genksyms/keywords.gperf" 174#line 44 "scripts/genksyms/keywords.gperf"
170 {"short", SHORT_KEYW}, 175 {"short", SHORT_KEYW},
171 {""}, {""}, 176 {""}, {""},
172#line 50 "scripts/genksyms/keywords.gperf" 177#line 52 "scripts/genksyms/keywords.gperf"
173 {"volatile", VOLATILE_KEYW}, 178 {"volatile", VOLATILE_KEYW},
174 {""}, 179 {""},
175#line 37 "scripts/genksyms/keywords.gperf" 180#line 39 "scripts/genksyms/keywords.gperf"
176 {"float", FLOAT_KEYW}, 181 {"float", FLOAT_KEYW},
177#line 34 "scripts/genksyms/keywords.gperf" 182#line 36 "scripts/genksyms/keywords.gperf"
178 {"double", DOUBLE_KEYW}, 183 {"double", DOUBLE_KEYW},
179 {""}, 184 {""},
180#line 5 "scripts/genksyms/keywords.gperf" 185#line 7 "scripts/genksyms/keywords.gperf"
181 {"EXPORT_SYMBOL", EXPORT_SYMBOL_KEYW}, 186 {"EXPORT_SYMBOL", EXPORT_SYMBOL_KEYW},
182 {""}, {""}, 187 {""}, {""},
183#line 38 "scripts/genksyms/keywords.gperf" 188#line 40 "scripts/genksyms/keywords.gperf"
184 {"inline", INLINE_KEYW}, 189 {"inline", INLINE_KEYW},
185#line 6 "scripts/genksyms/keywords.gperf" 190#line 8 "scripts/genksyms/keywords.gperf"
186 {"EXPORT_SYMBOL_GPL", EXPORT_SYMBOL_KEYW}, 191 {"EXPORT_SYMBOL_GPL", EXPORT_SYMBOL_KEYW},
187#line 41 "scripts/genksyms/keywords.gperf" 192#line 43 "scripts/genksyms/keywords.gperf"
188 {"register", REGISTER_KEYW}, 193 {"register", REGISTER_KEYW},
189 {""}, 194 {""},
190#line 22 "scripts/genksyms/keywords.gperf" 195#line 24 "scripts/genksyms/keywords.gperf"
191 {"_Bool", BOOL_KEYW}, 196 {"_Bool", BOOL_KEYW},
192#line 43 "scripts/genksyms/keywords.gperf" 197#line 45 "scripts/genksyms/keywords.gperf"
193 {"signed", SIGNED_KEYW}, 198 {"signed", SIGNED_KEYW},
194 {""}, {""}, 199 {""}, {""},
195#line 40 "scripts/genksyms/keywords.gperf" 200#line 42 "scripts/genksyms/keywords.gperf"
196 {"long", LONG_KEYW} 201 {"long", LONG_KEYW}
197 }; 202 };
198 203
diff --git a/scripts/genksyms/keywords.gperf b/scripts/genksyms/keywords.gperf
index 5ef3733225fb..8fe977a4d57b 100644
--- a/scripts/genksyms/keywords.gperf
+++ b/scripts/genksyms/keywords.gperf
@@ -1,4 +1,6 @@
1%{ 1%{
2struct resword;
3static const struct resword *is_reserved_word(register const char *str, register unsigned int len);
2%} 4%}
3struct resword { const char *name; int token; } 5struct resword { const char *name; int token; }
4%% 6%%
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 6d69c7ccdcc7..80599e3a7994 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -30,7 +30,7 @@ silentoldconfig: $(obj)/conf
30 $< -s $(Kconfig) 30 $< -s $(Kconfig)
31 31
32localmodconfig: $(obj)/streamline_config.pl $(obj)/conf 32localmodconfig: $(obj)/streamline_config.pl $(obj)/conf
33 $(Q)perl $< $(Kconfig) > .tmp.config 33 $(Q)perl $< $(srctree) $(Kconfig) > .tmp.config
34 $(Q)if [ -f .config ]; then \ 34 $(Q)if [ -f .config ]; then \
35 cmp -s .tmp.config .config || \ 35 cmp -s .tmp.config .config || \
36 (mv -f .config .config.old.1; \ 36 (mv -f .config .config.old.1; \
@@ -44,7 +44,7 @@ localmodconfig: $(obj)/streamline_config.pl $(obj)/conf
44 $(Q)rm -f .tmp.config 44 $(Q)rm -f .tmp.config
45 45
46localyesconfig: $(obj)/streamline_config.pl $(obj)/conf 46localyesconfig: $(obj)/streamline_config.pl $(obj)/conf
47 $(Q)perl $< $(Kconfig) > .tmp.config 47 $(Q)perl $< $(srctree) $(Kconfig) > .tmp.config
48 $(Q)sed -i s/=m/=y/ .tmp.config 48 $(Q)sed -i s/=m/=y/ .tmp.config
49 $(Q)if [ -f .config ]; then \ 49 $(Q)if [ -f .config ]; then \
50 cmp -s .tmp.config .config || \ 50 cmp -s .tmp.config .config || \
diff --git a/scripts/kconfig/lex.zconf.c_shipped b/scripts/kconfig/lex.zconf.c_shipped
index dc3e81807d13..fdc7113b08d1 100644
--- a/scripts/kconfig/lex.zconf.c_shipped
+++ b/scripts/kconfig/lex.zconf.c_shipped
@@ -160,7 +160,15 @@ typedef unsigned int flex_uint32_t;
160 160
161/* Size of default input buffer. */ 161/* Size of default input buffer. */
162#ifndef YY_BUF_SIZE 162#ifndef YY_BUF_SIZE
163#ifdef __ia64__
164/* On IA-64, the buffer size is 16k, not 8k.
165 * Moreover, YY_BUF_SIZE is 2*YY_READ_BUF_SIZE in the general case.
166 * Ditto for the __ia64__ case accordingly.
167 */
168#define YY_BUF_SIZE 32768
169#else
163#define YY_BUF_SIZE 16384 170#define YY_BUF_SIZE 16384
171#endif /* __ia64__ */
164#endif 172#endif
165 173
166/* The state buf must be large enough to hold one state per character in the main buffer. 174/* The state buf must be large enough to hold one state per character in the main buffer.
@@ -802,7 +810,7 @@ static int last_ts, first_ts;
802static void zconf_endhelp(void); 810static void zconf_endhelp(void);
803static void zconf_endfile(void); 811static void zconf_endfile(void);
804 812
805void new_string(void) 813static void new_string(void)
806{ 814{
807 text = malloc(START_STRSIZE); 815 text = malloc(START_STRSIZE);
808 text_asize = START_STRSIZE; 816 text_asize = START_STRSIZE;
@@ -810,7 +818,7 @@ void new_string(void)
810 *text = 0; 818 *text = 0;
811} 819}
812 820
813void append_string(const char *str, int size) 821static void append_string(const char *str, int size)
814{ 822{
815 int new_size = text_size + size + 1; 823 int new_size = text_size + size + 1;
816 if (new_size > text_asize) { 824 if (new_size > text_asize) {
@@ -824,7 +832,7 @@ void append_string(const char *str, int size)
824 text[text_size] = 0; 832 text[text_size] = 0;
825} 833}
826 834
827void alloc_string(const char *str, int size) 835static void alloc_string(const char *str, int size)
828{ 836{
829 text = malloc(size + 1); 837 text = malloc(size + 1);
830 memcpy(text, str, size); 838 memcpy(text, str, size);
@@ -914,7 +922,12 @@ static int input (void );
914 922
915/* Amount of stuff to slurp up with each read. */ 923/* Amount of stuff to slurp up with each read. */
916#ifndef YY_READ_BUF_SIZE 924#ifndef YY_READ_BUF_SIZE
925#ifdef __ia64__
926/* On IA-64, the buffer size is 16k, not 8k */
927#define YY_READ_BUF_SIZE 16384
928#else
917#define YY_READ_BUF_SIZE 8192 929#define YY_READ_BUF_SIZE 8192
930#endif /* __ia64__ */
918#endif 931#endif
919 932
920/* Copy whatever the last rule matched to the standard output. */ 933/* Copy whatever the last rule matched to the standard output. */
@@ -922,7 +935,7 @@ static int input (void );
922/* This used to be an fputs(), but since the string might contain NUL's, 935/* This used to be an fputs(), but since the string might contain NUL's,
923 * we now use fwrite(). 936 * we now use fwrite().
924 */ 937 */
925#define ECHO fwrite( zconftext, zconfleng, 1, zconfout ) 938#define ECHO do { if (fwrite( zconftext, zconfleng, 1, zconfout )) {} } while (0)
926#endif 939#endif
927 940
928/* Gets input and stuffs it into "buf". number of characters read, or YY_NULL, 941/* Gets input and stuffs it into "buf". number of characters read, or YY_NULL,
@@ -2060,8 +2073,8 @@ YY_BUFFER_STATE zconf_scan_string (yyconst char * yystr )
2060 2073
2061/** Setup the input buffer state to scan the given bytes. The next call to zconflex() will 2074/** Setup the input buffer state to scan the given bytes. The next call to zconflex() will
2062 * scan from a @e copy of @a bytes. 2075 * scan from a @e copy of @a bytes.
2063 * @param bytes the byte buffer to scan 2076 * @param yybytes the byte buffer to scan
2064 * @param len the number of bytes in the buffer pointed to by @a bytes. 2077 * @param _yybytes_len the number of bytes in the buffer pointed to by @a bytes.
2065 * 2078 *
2066 * @return the newly allocated buffer state object. 2079 * @return the newly allocated buffer state object.
2067 */ 2080 */
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index 95984db8e1e0..0d800820c3cd 100644
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -43,7 +43,6 @@
43# make oldconfig 43# make oldconfig
44# 44#
45my $config = ".config"; 45my $config = ".config";
46my $linuxpath = ".";
47 46
48my $uname = `uname -r`; 47my $uname = `uname -r`;
49chomp $uname; 48chomp $uname;
@@ -111,7 +110,11 @@ sub find_config {
111 110
112find_config; 111find_config;
113 112
114my @makefiles = `find $linuxpath -name Makefile`; 113# Get the build source and top level Kconfig file (passed in)
114my $ksource = $ARGV[0];
115my $kconfig = $ARGV[1];
116
117my @makefiles = `find $ksource -name Makefile`;
115my %depends; 118my %depends;
116my %selects; 119my %selects;
117my %prompts; 120my %prompts;
@@ -119,9 +122,6 @@ my %objects;
119my $var; 122my $var;
120my $cont = 0; 123my $cont = 0;
121 124
122# Get the top level Kconfig file (passed in)
123my $kconfig = $ARGV[0];
124
125# prevent recursion 125# prevent recursion
126my %read_kconfigs; 126my %read_kconfigs;
127 127
@@ -132,7 +132,7 @@ sub read_kconfig {
132 my $config; 132 my $config;
133 my @kconfigs; 133 my @kconfigs;
134 134
135 open(KIN, $kconfig) || die "Can't open $kconfig"; 135 open(KIN, "$ksource/$kconfig") || die "Can't open $kconfig";
136 while (<KIN>) { 136 while (<KIN>) {
137 chomp; 137 chomp;
138 138
diff --git a/scripts/kconfig/zconf.gperf b/scripts/kconfig/zconf.gperf
index 25ef5d01c0af..d8bc74249622 100644
--- a/scripts/kconfig/zconf.gperf
+++ b/scripts/kconfig/zconf.gperf
@@ -9,6 +9,8 @@
9 9
10struct kconf_id; 10struct kconf_id;
11 11
12static struct kconf_id *kconf_id_lookup(register const char *str, register unsigned int len);
13
12%% 14%%
13mainmenu, T_MAINMENU, TF_COMMAND 15mainmenu, T_MAINMENU, TF_COMMAND
14menu, T_MENU, TF_COMMAND 16menu, T_MENU, TF_COMMAND
diff --git a/scripts/kconfig/zconf.hash.c_shipped b/scripts/kconfig/zconf.hash.c_shipped
index 5c73d51339d8..c1748faf4634 100644
--- a/scripts/kconfig/zconf.hash.c_shipped
+++ b/scripts/kconfig/zconf.hash.c_shipped
@@ -30,6 +30,8 @@
30#endif 30#endif
31 31
32struct kconf_id; 32struct kconf_id;
33
34static struct kconf_id *kconf_id_lookup(register const char *str, register unsigned int len);
33/* maximum key range = 47, duplicates = 0 */ 35/* maximum key range = 47, duplicates = 0 */
34 36
35#ifdef __GNUC__ 37#ifdef __GNUC__
diff --git a/scripts/kconfig/zconf.l b/scripts/kconfig/zconf.l
index 21ff69c9ad4e..d8f7236cb0a3 100644
--- a/scripts/kconfig/zconf.l
+++ b/scripts/kconfig/zconf.l
@@ -39,7 +39,7 @@ static int last_ts, first_ts;
39static void zconf_endhelp(void); 39static void zconf_endhelp(void);
40static void zconf_endfile(void); 40static void zconf_endfile(void);
41 41
42void new_string(void) 42static void new_string(void)
43{ 43{
44 text = malloc(START_STRSIZE); 44 text = malloc(START_STRSIZE);
45 text_asize = START_STRSIZE; 45 text_asize = START_STRSIZE;
@@ -47,7 +47,7 @@ void new_string(void)
47 *text = 0; 47 *text = 0;
48} 48}
49 49
50void append_string(const char *str, int size) 50static void append_string(const char *str, int size)
51{ 51{
52 int new_size = text_size + size + 1; 52 int new_size = text_size + size + 1;
53 if (new_size > text_asize) { 53 if (new_size > text_asize) {
@@ -61,7 +61,7 @@ void append_string(const char *str, int size)
61 text[text_size] = 0; 61 text[text_size] = 0;
62} 62}
63 63
64void alloc_string(const char *str, int size) 64static void alloc_string(const char *str, int size)
65{ 65{
66 text = malloc(size + 1); 66 text = malloc(size + 1);
67 memcpy(text, str, size); 67 memcpy(text, str, size);
diff --git a/scripts/kconfig/zconf.tab.c_shipped b/scripts/kconfig/zconf.tab.c_shipped
index 95df833b5a9d..6e9dcd59aa87 100644
--- a/scripts/kconfig/zconf.tab.c_shipped
+++ b/scripts/kconfig/zconf.tab.c_shipped
@@ -1,24 +1,23 @@
1/* A Bison parser, made by GNU Bison 2.3. */
2 1
3/* Skeleton implementation for Bison's Yacc-like parsers in C 2/* A Bison parser, made by GNU Bison 2.4.1. */
4 3
5 Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006 4/* Skeleton implementation for Bison's Yacc-like parsers in C
5
6 Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
6 Free Software Foundation, Inc. 7 Free Software Foundation, Inc.
7 8
8 This program is free software; you can redistribute it and/or modify 9 This program is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by 10 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option) 11 the Free Software Foundation, either version 3 of the License, or
11 any later version. 12 (at your option) any later version.
12 13
13 This program is distributed in the hope that it will be useful, 14 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details. 17 GNU General Public License for more details.
17 18
18 You should have received a copy of the GNU General Public License 19 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software 20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 Foundation, Inc., 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
22 21
23/* As a special exception, you may create a larger work that contains 22/* As a special exception, you may create a larger work that contains
24 part or all of the Bison parser skeleton and distribute that work 23 part or all of the Bison parser skeleton and distribute that work
@@ -29,7 +28,7 @@
29 special exception, which will cause the skeleton and the resulting 28 special exception, which will cause the skeleton and the resulting
30 Bison output files to be licensed under the GNU General Public 29 Bison output files to be licensed under the GNU General Public
31 License without this special exception. 30 License without this special exception.
32 31
33 This special exception was added by the Free Software Foundation in 32 This special exception was added by the Free Software Foundation in
34 version 2.2 of Bison. */ 33 version 2.2 of Bison. */
35 34
@@ -47,7 +46,7 @@
47#define YYBISON 1 46#define YYBISON 1
48 47
49/* Bison version. */ 48/* Bison version. */
50#define YYBISON_VERSION "2.3" 49#define YYBISON_VERSION "2.4.1"
51 50
52/* Skeleton name. */ 51/* Skeleton name. */
53#define YYSKELETON_NAME "yacc.c" 52#define YYSKELETON_NAME "yacc.c"
@@ -55,94 +54,23 @@
55/* Pure parsers. */ 54/* Pure parsers. */
56#define YYPURE 0 55#define YYPURE 0
57 56
57/* Push parsers. */
58#define YYPUSH 0
59
60/* Pull parsers. */
61#define YYPULL 1
62
58/* Using locations. */ 63/* Using locations. */
59#define YYLSP_NEEDED 0 64#define YYLSP_NEEDED 0
60 65
61/* Substitute the variable and function names. */ 66/* Substitute the variable and function names. */
62#define yyparse zconfparse 67#define yyparse zconfparse
63#define yylex zconflex 68#define yylex zconflex
64#define yyerror zconferror 69#define yyerror zconferror
65#define yylval zconflval 70#define yylval zconflval
66#define yychar zconfchar 71#define yychar zconfchar
67#define yydebug zconfdebug 72#define yydebug zconfdebug
68#define yynerrs zconfnerrs 73#define yynerrs zconfnerrs
69
70
71/* Tokens. */
72#ifndef YYTOKENTYPE
73# define YYTOKENTYPE
74 /* Put the tokens into the symbol table, so that GDB and other debuggers
75 know about them. */
76 enum yytokentype {
77 T_MAINMENU = 258,
78 T_MENU = 259,
79 T_ENDMENU = 260,
80 T_SOURCE = 261,
81 T_CHOICE = 262,
82 T_ENDCHOICE = 263,
83 T_COMMENT = 264,
84 T_CONFIG = 265,
85 T_MENUCONFIG = 266,
86 T_HELP = 267,
87 T_HELPTEXT = 268,
88 T_IF = 269,
89 T_ENDIF = 270,
90 T_DEPENDS = 271,
91 T_OPTIONAL = 272,
92 T_PROMPT = 273,
93 T_TYPE = 274,
94 T_DEFAULT = 275,
95 T_SELECT = 276,
96 T_RANGE = 277,
97 T_OPTION = 278,
98 T_ON = 279,
99 T_WORD = 280,
100 T_WORD_QUOTE = 281,
101 T_UNEQUAL = 282,
102 T_CLOSE_PAREN = 283,
103 T_OPEN_PAREN = 284,
104 T_EOL = 285,
105 T_OR = 286,
106 T_AND = 287,
107 T_EQUAL = 288,
108 T_NOT = 289
109 };
110#endif
111/* Tokens. */
112#define T_MAINMENU 258
113#define T_MENU 259
114#define T_ENDMENU 260
115#define T_SOURCE 261
116#define T_CHOICE 262
117#define T_ENDCHOICE 263
118#define T_COMMENT 264
119#define T_CONFIG 265
120#define T_MENUCONFIG 266
121#define T_HELP 267
122#define T_HELPTEXT 268
123#define T_IF 269
124#define T_ENDIF 270
125#define T_DEPENDS 271
126#define T_OPTIONAL 272
127#define T_PROMPT 273
128#define T_TYPE 274
129#define T_DEFAULT 275
130#define T_SELECT 276
131#define T_RANGE 277
132#define T_OPTION 278
133#define T_ON 279
134#define T_WORD 280
135#define T_WORD_QUOTE 281
136#define T_UNEQUAL 282
137#define T_CLOSE_PAREN 283
138#define T_OPEN_PAREN 284
139#define T_EOL 285
140#define T_OR 286
141#define T_AND 287
142#define T_EQUAL 288
143#define T_NOT 289
144
145
146 74
147 75
148/* Copy the first part of user declarations. */ 76/* Copy the first part of user declarations. */
@@ -163,8 +91,6 @@
163#define LKC_DIRECT_LINK 91#define LKC_DIRECT_LINK
164#include "lkc.h" 92#include "lkc.h"
165 93
166#include "zconf.hash.c"
167
168#define printd(mask, fmt...) if (cdebug & (mask)) printf(fmt) 94#define printd(mask, fmt...) if (cdebug & (mask)) printf(fmt)
169 95
170#define PRINTD 0x0001 96#define PRINTD 0x0001
@@ -188,6 +114,7 @@ static struct menu *current_menu, *current_entry;
188#endif 114#endif
189 115
190 116
117
191/* Enabling traces. */ 118/* Enabling traces. */
192#ifndef YYDEBUG 119#ifndef YYDEBUG
193# define YYDEBUG 0 120# define YYDEBUG 0
@@ -206,31 +133,77 @@ static struct menu *current_menu, *current_entry;
206# define YYTOKEN_TABLE 0 133# define YYTOKEN_TABLE 0
207#endif 134#endif
208 135
136
137/* Tokens. */
138#ifndef YYTOKENTYPE
139# define YYTOKENTYPE
140 /* Put the tokens into the symbol table, so that GDB and other debuggers
141 know about them. */
142 enum yytokentype {
143 T_MAINMENU = 258,
144 T_MENU = 259,
145 T_ENDMENU = 260,
146 T_SOURCE = 261,
147 T_CHOICE = 262,
148 T_ENDCHOICE = 263,
149 T_COMMENT = 264,
150 T_CONFIG = 265,
151 T_MENUCONFIG = 266,
152 T_HELP = 267,
153 T_HELPTEXT = 268,
154 T_IF = 269,
155 T_ENDIF = 270,
156 T_DEPENDS = 271,
157 T_OPTIONAL = 272,
158 T_PROMPT = 273,
159 T_TYPE = 274,
160 T_DEFAULT = 275,
161 T_SELECT = 276,
162 T_RANGE = 277,
163 T_OPTION = 278,
164 T_ON = 279,
165 T_WORD = 280,
166 T_WORD_QUOTE = 281,
167 T_UNEQUAL = 282,
168 T_CLOSE_PAREN = 283,
169 T_OPEN_PAREN = 284,
170 T_EOL = 285,
171 T_OR = 286,
172 T_AND = 287,
173 T_EQUAL = 288,
174 T_NOT = 289
175 };
176#endif
177
178
179
209#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED 180#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
210typedef union YYSTYPE 181typedef union YYSTYPE
211
212{ 182{
183
184
213 char *string; 185 char *string;
214 struct file *file; 186 struct file *file;
215 struct symbol *symbol; 187 struct symbol *symbol;
216 struct expr *expr; 188 struct expr *expr;
217 struct menu *menu; 189 struct menu *menu;
218 struct kconf_id *id; 190 struct kconf_id *id;
219}
220/* Line 187 of yacc.c. */
221 191
222 YYSTYPE; 192
193
194} YYSTYPE;
195# define YYSTYPE_IS_TRIVIAL 1
223# define yystype YYSTYPE /* obsolescent; will be withdrawn */ 196# define yystype YYSTYPE /* obsolescent; will be withdrawn */
224# define YYSTYPE_IS_DECLARED 1 197# define YYSTYPE_IS_DECLARED 1
225# define YYSTYPE_IS_TRIVIAL 1
226#endif 198#endif
227 199
228 200
229
230/* Copy the second part of user declarations. */ 201/* Copy the second part of user declarations. */
231 202
232 203
233/* Line 216 of yacc.c. */ 204/* Include zconf.hash.c here so it can see the token constants. */
205#include "zconf.hash.c"
206
234 207
235 208
236#ifdef short 209#ifdef short
@@ -306,14 +279,14 @@ typedef short int yytype_int16;
306#if (defined __STDC__ || defined __C99__FUNC__ \ 279#if (defined __STDC__ || defined __C99__FUNC__ \
307 || defined __cplusplus || defined _MSC_VER) 280 || defined __cplusplus || defined _MSC_VER)
308static int 281static int
309YYID (int i) 282YYID (int yyi)
310#else 283#else
311static int 284static int
312YYID (i) 285YYID (yyi)
313 int i; 286 int yyi;
314#endif 287#endif
315{ 288{
316 return i; 289 return yyi;
317} 290}
318#endif 291#endif
319 292
@@ -394,9 +367,9 @@ void free (void *); /* INFRINGES ON USER NAME SPACE */
394/* A type that is properly aligned for any stack member. */ 367/* A type that is properly aligned for any stack member. */
395union yyalloc 368union yyalloc
396{ 369{
397 yytype_int16 yyss; 370 yytype_int16 yyss_alloc;
398 YYSTYPE yyvs; 371 YYSTYPE yyvs_alloc;
399 }; 372};
400 373
401/* The size of the maximum gap between one aligned stack and the next. */ 374/* The size of the maximum gap between one aligned stack and the next. */
402# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1) 375# define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1)
@@ -430,12 +403,12 @@ union yyalloc
430 elements in the stack, and YYPTR gives the new location of the 403 elements in the stack, and YYPTR gives the new location of the
431 stack. Advance YYPTR to a properly aligned location for the next 404 stack. Advance YYPTR to a properly aligned location for the next
432 stack. */ 405 stack. */
433# define YYSTACK_RELOCATE(Stack) \ 406# define YYSTACK_RELOCATE(Stack_alloc, Stack) \
434 do \ 407 do \
435 { \ 408 { \
436 YYSIZE_T yynewbytes; \ 409 YYSIZE_T yynewbytes; \
437 YYCOPY (&yyptr->Stack, Stack, yysize); \ 410 YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \
438 Stack = &yyptr->Stack; \ 411 Stack = &yyptr->Stack_alloc; \
439 yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \ 412 yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
440 yyptr += yynewbytes / sizeof (*yyptr); \ 413 yyptr += yynewbytes / sizeof (*yyptr); \
441 } \ 414 } \
@@ -558,18 +531,18 @@ static const yytype_int8 yyrhs[] =
558/* YYRLINE[YYN] -- source line where rule number YYN was defined. */ 531/* YYRLINE[YYN] -- source line where rule number YYN was defined. */
559static const yytype_uint16 yyrline[] = 532static const yytype_uint16 yyrline[] =
560{ 533{
561 0, 104, 104, 106, 108, 109, 110, 111, 112, 113, 534 0, 107, 107, 109, 111, 112, 113, 114, 115, 116,
562 114, 118, 122, 122, 122, 122, 122, 122, 122, 126, 535 117, 121, 125, 125, 125, 125, 125, 125, 125, 129,
563 127, 128, 129, 130, 131, 135, 136, 142, 150, 156, 536 130, 131, 132, 133, 134, 138, 139, 145, 153, 159,
564 164, 174, 176, 177, 178, 179, 180, 181, 184, 192, 537 167, 177, 179, 180, 181, 182, 183, 184, 187, 195,
565 198, 208, 214, 220, 223, 225, 236, 237, 242, 251, 538 201, 211, 217, 223, 226, 228, 239, 240, 245, 254,
566 256, 264, 267, 269, 270, 271, 272, 273, 276, 282, 539 259, 267, 270, 272, 273, 274, 275, 276, 279, 285,
567 293, 299, 309, 311, 316, 324, 332, 335, 337, 338, 540 296, 302, 312, 314, 319, 327, 335, 338, 340, 341,
568 339, 344, 351, 356, 364, 367, 369, 370, 371, 374, 541 342, 347, 354, 359, 367, 370, 372, 373, 374, 377,
569 382, 389, 396, 402, 409, 411, 412, 413, 416, 424, 542 385, 392, 399, 405, 412, 414, 415, 416, 419, 427,
570 426, 431, 432, 435, 436, 437, 441, 442, 445, 446, 543 429, 434, 435, 438, 439, 440, 444, 445, 448, 449,
571 449, 450, 451, 452, 453, 454, 455, 458, 459, 462, 544 452, 453, 454, 455, 456, 457, 458, 461, 462, 465,
572 463 545 466
573}; 546};
574#endif 547#endif
575 548
@@ -985,17 +958,20 @@ yy_symbol_print (yyoutput, yytype, yyvaluep)
985#if (defined __STDC__ || defined __C99__FUNC__ \ 958#if (defined __STDC__ || defined __C99__FUNC__ \
986 || defined __cplusplus || defined _MSC_VER) 959 || defined __cplusplus || defined _MSC_VER)
987static void 960static void
988yy_stack_print (yytype_int16 *bottom, yytype_int16 *top) 961yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop)
989#else 962#else
990static void 963static void
991yy_stack_print (bottom, top) 964yy_stack_print (yybottom, yytop)
992 yytype_int16 *bottom; 965 yytype_int16 *yybottom;
993 yytype_int16 *top; 966 yytype_int16 *yytop;
994#endif 967#endif
995{ 968{
996 YYFPRINTF (stderr, "Stack now"); 969 YYFPRINTF (stderr, "Stack now");
997 for (; bottom <= top; ++bottom) 970 for (; yybottom <= yytop; yybottom++)
998 YYFPRINTF (stderr, " %d", *bottom); 971 {
972 int yybot = *yybottom;
973 YYFPRINTF (stderr, " %d", yybot);
974 }
999 YYFPRINTF (stderr, "\n"); 975 YYFPRINTF (stderr, "\n");
1000} 976}
1001 977
@@ -1029,11 +1005,11 @@ yy_reduce_print (yyvsp, yyrule)
1029 /* The symbols being reduced. */ 1005 /* The symbols being reduced. */
1030 for (yyi = 0; yyi < yynrhs; yyi++) 1006 for (yyi = 0; yyi < yynrhs; yyi++)
1031 { 1007 {
1032 fprintf (stderr, " $%d = ", yyi + 1); 1008 YYFPRINTF (stderr, " $%d = ", yyi + 1);
1033 yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi], 1009 yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi],
1034 &(yyvsp[(yyi + 1) - (yynrhs)]) 1010 &(yyvsp[(yyi + 1) - (yynrhs)])
1035 ); 1011 );
1036 fprintf (stderr, "\n"); 1012 YYFPRINTF (stderr, "\n");
1037 } 1013 }
1038} 1014}
1039 1015
@@ -1343,10 +1319,8 @@ yydestruct (yymsg, yytype, yyvaluep)
1343 break; 1319 break;
1344 } 1320 }
1345} 1321}
1346
1347 1322
1348/* Prevent warnings from -Wmissing-prototypes. */ 1323/* Prevent warnings from -Wmissing-prototypes. */
1349
1350#ifdef YYPARSE_PARAM 1324#ifdef YYPARSE_PARAM
1351#if defined __STDC__ || defined __cplusplus 1325#if defined __STDC__ || defined __cplusplus
1352int yyparse (void *YYPARSE_PARAM); 1326int yyparse (void *YYPARSE_PARAM);
@@ -1362,11 +1336,10 @@ int yyparse ();
1362#endif /* ! YYPARSE_PARAM */ 1336#endif /* ! YYPARSE_PARAM */
1363 1337
1364 1338
1365 1339/* The lookahead symbol. */
1366/* The look-ahead symbol. */
1367int yychar; 1340int yychar;
1368 1341
1369/* The semantic value of the look-ahead symbol. */ 1342/* The semantic value of the lookahead symbol. */
1370YYSTYPE yylval; 1343YYSTYPE yylval;
1371 1344
1372/* Number of syntax errors so far. */ 1345/* Number of syntax errors so far. */
@@ -1374,9 +1347,9 @@ int yynerrs;
1374 1347
1375 1348
1376 1349
1377/*----------. 1350/*-------------------------.
1378| yyparse. | 1351| yyparse or yypush_parse. |
1379`----------*/ 1352`-------------------------*/
1380 1353
1381#ifdef YYPARSE_PARAM 1354#ifdef YYPARSE_PARAM
1382#if (defined __STDC__ || defined __C99__FUNC__ \ 1355#if (defined __STDC__ || defined __C99__FUNC__ \
@@ -1400,66 +1373,68 @@ yyparse ()
1400#endif 1373#endif
1401#endif 1374#endif
1402{ 1375{
1403
1404 int yystate;
1405 int yyn;
1406 int yyresult;
1407 /* Number of tokens to shift before error messages enabled. */
1408 int yyerrstatus;
1409 /* Look-ahead token as an internal (translated) token number. */
1410 int yytoken = 0;
1411#if YYERROR_VERBOSE
1412 /* Buffer for error messages, and its allocated size. */
1413 char yymsgbuf[128];
1414 char *yymsg = yymsgbuf;
1415 YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
1416#endif
1417
1418 /* Three stacks and their tools:
1419 `yyss': related to states,
1420 `yyvs': related to semantic values,
1421 `yyls': related to locations.
1422 1376
1423 Refer to the stacks thru separate pointers, to allow yyoverflow
1424 to reallocate them elsewhere. */
1425 1377
1426 /* The state stack. */ 1378 int yystate;
1427 yytype_int16 yyssa[YYINITDEPTH]; 1379 /* Number of tokens to shift before error messages enabled. */
1428 yytype_int16 *yyss = yyssa; 1380 int yyerrstatus;
1429 yytype_int16 *yyssp;
1430 1381
1431 /* The semantic value stack. */ 1382 /* The stacks and their tools:
1432 YYSTYPE yyvsa[YYINITDEPTH]; 1383 `yyss': related to states.
1433 YYSTYPE *yyvs = yyvsa; 1384 `yyvs': related to semantic values.
1434 YYSTYPE *yyvsp;
1435 1385
1386 Refer to the stacks thru separate pointers, to allow yyoverflow
1387 to reallocate them elsewhere. */
1436 1388
1389 /* The state stack. */
1390 yytype_int16 yyssa[YYINITDEPTH];
1391 yytype_int16 *yyss;
1392 yytype_int16 *yyssp;
1437 1393
1438#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N)) 1394 /* The semantic value stack. */
1395 YYSTYPE yyvsa[YYINITDEPTH];
1396 YYSTYPE *yyvs;
1397 YYSTYPE *yyvsp;
1439 1398
1440 YYSIZE_T yystacksize = YYINITDEPTH; 1399 YYSIZE_T yystacksize;
1441 1400
1401 int yyn;
1402 int yyresult;
1403 /* Lookahead token as an internal (translated) token number. */
1404 int yytoken;
1442 /* The variables used to return semantic value and location from the 1405 /* The variables used to return semantic value and location from the
1443 action routines. */ 1406 action routines. */
1444 YYSTYPE yyval; 1407 YYSTYPE yyval;
1445 1408
1409#if YYERROR_VERBOSE
1410 /* Buffer for error messages, and its allocated size. */
1411 char yymsgbuf[128];
1412 char *yymsg = yymsgbuf;
1413 YYSIZE_T yymsg_alloc = sizeof yymsgbuf;
1414#endif
1415
1416#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N))
1446 1417
1447 /* The number of symbols on the RHS of the reduced rule. 1418 /* The number of symbols on the RHS of the reduced rule.
1448 Keep to zero when no symbol should be popped. */ 1419 Keep to zero when no symbol should be popped. */
1449 int yylen = 0; 1420 int yylen = 0;
1450 1421
1422 yytoken = 0;
1423 yyss = yyssa;
1424 yyvs = yyvsa;
1425 yystacksize = YYINITDEPTH;
1426
1451 YYDPRINTF ((stderr, "Starting parse\n")); 1427 YYDPRINTF ((stderr, "Starting parse\n"));
1452 1428
1453 yystate = 0; 1429 yystate = 0;
1454 yyerrstatus = 0; 1430 yyerrstatus = 0;
1455 yynerrs = 0; 1431 yynerrs = 0;
1456 yychar = YYEMPTY; /* Cause a token to be read. */ 1432 yychar = YYEMPTY; /* Cause a token to be read. */
1457 1433
1458 /* Initialize stack pointers. 1434 /* Initialize stack pointers.
1459 Waste one element of value and location stack 1435 Waste one element of value and location stack
1460 so that they stay on the same level as the state stack. 1436 so that they stay on the same level as the state stack.
1461 The wasted elements are never initialized. */ 1437 The wasted elements are never initialized. */
1462
1463 yyssp = yyss; 1438 yyssp = yyss;
1464 yyvsp = yyvs; 1439 yyvsp = yyvs;
1465 1440
@@ -1489,7 +1464,6 @@ yyparse ()
1489 YYSTYPE *yyvs1 = yyvs; 1464 YYSTYPE *yyvs1 = yyvs;
1490 yytype_int16 *yyss1 = yyss; 1465 yytype_int16 *yyss1 = yyss;
1491 1466
1492
1493 /* Each stack pointer address is followed by the size of the 1467 /* Each stack pointer address is followed by the size of the
1494 data in use in that stack, in bytes. This used to be a 1468 data in use in that stack, in bytes. This used to be a
1495 conditional around just the two extra args, but that might 1469 conditional around just the two extra args, but that might
@@ -1497,7 +1471,6 @@ yyparse ()
1497 yyoverflow (YY_("memory exhausted"), 1471 yyoverflow (YY_("memory exhausted"),
1498 &yyss1, yysize * sizeof (*yyssp), 1472 &yyss1, yysize * sizeof (*yyssp),
1499 &yyvs1, yysize * sizeof (*yyvsp), 1473 &yyvs1, yysize * sizeof (*yyvsp),
1500
1501 &yystacksize); 1474 &yystacksize);
1502 1475
1503 yyss = yyss1; 1476 yyss = yyss1;
@@ -1520,9 +1493,8 @@ yyparse ()
1520 (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize)); 1493 (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
1521 if (! yyptr) 1494 if (! yyptr)
1522 goto yyexhaustedlab; 1495 goto yyexhaustedlab;
1523 YYSTACK_RELOCATE (yyss); 1496 YYSTACK_RELOCATE (yyss_alloc, yyss);
1524 YYSTACK_RELOCATE (yyvs); 1497 YYSTACK_RELOCATE (yyvs_alloc, yyvs);
1525
1526# undef YYSTACK_RELOCATE 1498# undef YYSTACK_RELOCATE
1527 if (yyss1 != yyssa) 1499 if (yyss1 != yyssa)
1528 YYSTACK_FREE (yyss1); 1500 YYSTACK_FREE (yyss1);
@@ -1533,7 +1505,6 @@ yyparse ()
1533 yyssp = yyss + yysize - 1; 1505 yyssp = yyss + yysize - 1;
1534 yyvsp = yyvs + yysize - 1; 1506 yyvsp = yyvs + yysize - 1;
1535 1507
1536
1537 YYDPRINTF ((stderr, "Stack size increased to %lu\n", 1508 YYDPRINTF ((stderr, "Stack size increased to %lu\n",
1538 (unsigned long int) yystacksize)); 1509 (unsigned long int) yystacksize));
1539 1510
@@ -1543,6 +1514,9 @@ yyparse ()
1543 1514
1544 YYDPRINTF ((stderr, "Entering state %d\n", yystate)); 1515 YYDPRINTF ((stderr, "Entering state %d\n", yystate));
1545 1516
1517 if (yystate == YYFINAL)
1518 YYACCEPT;
1519
1546 goto yybackup; 1520 goto yybackup;
1547 1521
1548/*-----------. 1522/*-----------.
@@ -1551,16 +1525,16 @@ yyparse ()
1551yybackup: 1525yybackup:
1552 1526
1553 /* Do appropriate processing given the current state. Read a 1527 /* Do appropriate processing given the current state. Read a
1554 look-ahead token if we need one and don't already have one. */ 1528 lookahead token if we need one and don't already have one. */
1555 1529
1556 /* First try to decide what to do without reference to look-ahead token. */ 1530 /* First try to decide what to do without reference to lookahead token. */
1557 yyn = yypact[yystate]; 1531 yyn = yypact[yystate];
1558 if (yyn == YYPACT_NINF) 1532 if (yyn == YYPACT_NINF)
1559 goto yydefault; 1533 goto yydefault;
1560 1534
1561 /* Not known => get a look-ahead token if don't already have one. */ 1535 /* Not known => get a lookahead token if don't already have one. */
1562 1536
1563 /* YYCHAR is either YYEMPTY or YYEOF or a valid look-ahead symbol. */ 1537 /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
1564 if (yychar == YYEMPTY) 1538 if (yychar == YYEMPTY)
1565 { 1539 {
1566 YYDPRINTF ((stderr, "Reading a token: ")); 1540 YYDPRINTF ((stderr, "Reading a token: "));
@@ -1592,20 +1566,16 @@ yybackup:
1592 goto yyreduce; 1566 goto yyreduce;
1593 } 1567 }
1594 1568
1595 if (yyn == YYFINAL)
1596 YYACCEPT;
1597
1598 /* Count tokens shifted since error; after three, turn off error 1569 /* Count tokens shifted since error; after three, turn off error
1599 status. */ 1570 status. */
1600 if (yyerrstatus) 1571 if (yyerrstatus)
1601 yyerrstatus--; 1572 yyerrstatus--;
1602 1573
1603 /* Shift the look-ahead token. */ 1574 /* Shift the lookahead token. */
1604 YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc); 1575 YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
1605 1576
1606 /* Discard the shifted token unless it is eof. */ 1577 /* Discard the shifted token. */
1607 if (yychar != YYEOF) 1578 yychar = YYEMPTY;
1608 yychar = YYEMPTY;
1609 1579
1610 yystate = yyn; 1580 yystate = yyn;
1611 *++yyvsp = yylval; 1581 *++yyvsp = yylval;
@@ -2029,7 +1999,6 @@ yyreduce:
2029 break; 1999 break;
2030 2000
2031 2001
2032/* Line 1267 of yacc.c. */
2033 2002
2034 default: break; 2003 default: break;
2035 } 2004 }
@@ -2041,7 +2010,6 @@ yyreduce:
2041 2010
2042 *++yyvsp = yyval; 2011 *++yyvsp = yyval;
2043 2012
2044
2045 /* Now `shift' the result of the reduction. Determine what state 2013 /* Now `shift' the result of the reduction. Determine what state
2046 that goes to, based on the state we popped back to and the rule 2014 that goes to, based on the state we popped back to and the rule
2047 number reduced by. */ 2015 number reduced by. */
@@ -2106,7 +2074,7 @@ yyerrlab:
2106 2074
2107 if (yyerrstatus == 3) 2075 if (yyerrstatus == 3)
2108 { 2076 {
2109 /* If just tried and failed to reuse look-ahead token after an 2077 /* If just tried and failed to reuse lookahead token after an
2110 error, discard it. */ 2078 error, discard it. */
2111 2079
2112 if (yychar <= YYEOF) 2080 if (yychar <= YYEOF)
@@ -2123,7 +2091,7 @@ yyerrlab:
2123 } 2091 }
2124 } 2092 }
2125 2093
2126 /* Else will try to reuse look-ahead token after shifting the error 2094 /* Else will try to reuse lookahead token after shifting the error
2127 token. */ 2095 token. */
2128 goto yyerrlab1; 2096 goto yyerrlab1;
2129 2097
@@ -2180,9 +2148,6 @@ yyerrlab1:
2180 YY_STACK_PRINT (yyss, yyssp); 2148 YY_STACK_PRINT (yyss, yyssp);
2181 } 2149 }
2182 2150
2183 if (yyn == YYFINAL)
2184 YYACCEPT;
2185
2186 *++yyvsp = yylval; 2151 *++yyvsp = yylval;
2187 2152
2188 2153
@@ -2207,7 +2172,7 @@ yyabortlab:
2207 yyresult = 1; 2172 yyresult = 1;
2208 goto yyreturn; 2173 goto yyreturn;
2209 2174
2210#ifndef yyoverflow 2175#if !defined(yyoverflow) || YYERROR_VERBOSE
2211/*-------------------------------------------------. 2176/*-------------------------------------------------.
2212| yyexhaustedlab -- memory exhaustion comes here. | 2177| yyexhaustedlab -- memory exhaustion comes here. |
2213`-------------------------------------------------*/ 2178`-------------------------------------------------*/
@@ -2218,7 +2183,7 @@ yyexhaustedlab:
2218#endif 2183#endif
2219 2184
2220yyreturn: 2185yyreturn:
2221 if (yychar != YYEOF && yychar != YYEMPTY) 2186 if (yychar != YYEMPTY)
2222 yydestruct ("Cleanup: discarding lookahead", 2187 yydestruct ("Cleanup: discarding lookahead",
2223 yytoken, &yylval); 2188 yytoken, &yylval);
2224 /* Do not reclaim the symbols of the rule which action triggered 2189 /* Do not reclaim the symbols of the rule which action triggered
@@ -2284,7 +2249,7 @@ void conf_parse(const char *name)
2284 sym_set_change_count(1); 2249 sym_set_change_count(1);
2285} 2250}
2286 2251
2287const char *zconf_tokenname(int token) 2252static const char *zconf_tokenname(int token)
2288{ 2253{
2289 switch (token) { 2254 switch (token) {
2290 case T_MENU: return "menu"; 2255 case T_MENU: return "menu";
@@ -2348,7 +2313,7 @@ static void zconferror(const char *err)
2348#endif 2313#endif
2349} 2314}
2350 2315
2351void print_quoted_string(FILE *out, const char *str) 2316static void print_quoted_string(FILE *out, const char *str)
2352{ 2317{
2353 const char *p; 2318 const char *p;
2354 int len; 2319 int len;
@@ -2365,7 +2330,7 @@ void print_quoted_string(FILE *out, const char *str)
2365 putc('"', out); 2330 putc('"', out);
2366} 2331}
2367 2332
2368void print_symbol(FILE *out, struct menu *menu) 2333static void print_symbol(FILE *out, struct menu *menu)
2369{ 2334{
2370 struct symbol *sym = menu->sym; 2335 struct symbol *sym = menu->sym;
2371 struct property *prop; 2336 struct property *prop;
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
index 9710b82466f2..8c43491f8cc9 100644
--- a/scripts/kconfig/zconf.y
+++ b/scripts/kconfig/zconf.y
@@ -14,8 +14,6 @@
14#define LKC_DIRECT_LINK 14#define LKC_DIRECT_LINK
15#include "lkc.h" 15#include "lkc.h"
16 16
17#include "zconf.hash.c"
18
19#define printd(mask, fmt...) if (cdebug & (mask)) printf(fmt) 17#define printd(mask, fmt...) if (cdebug & (mask)) printf(fmt)
20 18
21#define PRINTD 0x0001 19#define PRINTD 0x0001
@@ -100,6 +98,11 @@ static struct menu *current_menu, *current_entry;
100 menu_end_menu(); 98 menu_end_menu();
101} if_entry menu_entry choice_entry 99} if_entry menu_entry choice_entry
102 100
101%{
102/* Include zconf.hash.c here so it can see the token constants. */
103#include "zconf.hash.c"
104%}
105
103%% 106%%
104input: stmt_list; 107input: stmt_list;
105 108
@@ -501,7 +504,7 @@ void conf_parse(const char *name)
501 sym_set_change_count(1); 504 sym_set_change_count(1);
502} 505}
503 506
504const char *zconf_tokenname(int token) 507static const char *zconf_tokenname(int token)
505{ 508{
506 switch (token) { 509 switch (token) {
507 case T_MENU: return "menu"; 510 case T_MENU: return "menu";
@@ -565,7 +568,7 @@ static void zconferror(const char *err)
565#endif 568#endif
566} 569}
567 570
568void print_quoted_string(FILE *out, const char *str) 571static void print_quoted_string(FILE *out, const char *str)
569{ 572{
570 const char *p; 573 const char *p;
571 int len; 574 int len;
@@ -582,7 +585,7 @@ void print_quoted_string(FILE *out, const char *str)
582 putc('"', out); 585 putc('"', out);
583} 586}
584 587
585void print_symbol(FILE *out, struct menu *menu) 588static void print_symbol(FILE *out, struct menu *menu)
586{ 589{
587 struct symbol *sym = menu->sym; 590 struct symbol *sym = menu->sym;
588 struct property *prop; 591 struct property *prop;
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index bfb8b2cdd92a..f0d14452632b 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -6,77 +6,93 @@
6# all the offsets to the calls to mcount. 6# all the offsets to the calls to mcount.
7# 7#
8# 8#
9# What we want to end up with is a section in vmlinux called 9# What we want to end up with this is that each object file will have a
10# __mcount_loc that contains a list of pointers to all the 10# section called __mcount_loc that will hold the list of pointers to mcount
11# call sites in the kernel that call mcount. Later on boot up, the kernel 11# callers. After final linking, the vmlinux will have within .init.data the
12# will read this list, save the locations and turn them into nops. 12# list of all callers to mcount between __start_mcount_loc and __stop_mcount_loc.
13# When tracing or profiling is later enabled, these locations will then 13# Later on boot up, the kernel will read this list, save the locations and turn
14# be converted back to pointers to some function. 14# them into nops. When tracing or profiling is later enabled, these locations
15# will then be converted back to pointers to some function.
15# 16#
16# This is no easy feat. This script is called just after the original 17# This is no easy feat. This script is called just after the original
17# object is compiled and before it is linked. 18# object is compiled and before it is linked.
18# 19#
19# The references to the call sites are offsets from the section of text 20# When parse this object file using 'objdump', the references to the call
20# that the call site is in. Hence, all functions in a section that 21# sites are offsets from the section that the call site is in. Hence, all
21# has a call site to mcount, will have the offset from the beginning of 22# functions in a section that has a call site to mcount, will have the
22# the section and not the beginning of the function. 23# offset from the beginning of the section and not the beginning of the
24# function.
25#
26# But where this section will reside finally in vmlinx is undetermined at
27# this point. So we can't use this kind of offsets to record the final
28# address of this call site.
29#
30# The trick is to change the call offset referring the start of a section to
31# referring a function symbol in this section. During the link step, 'ld' will
32# compute the final address according to the information we record.
23# 33#
24# The trick is to find a way to record the beginning of the section.
25# The way we do this is to look at the first function in the section
26# which will also be the location of that section after final link.
27# e.g. 34# e.g.
28# 35#
29# .section ".sched.text", "ax" 36# .section ".sched.text", "ax"
30# .globl my_func
31# my_func:
32# [...] 37# [...]
33# call mcount (offset: 0x5) 38# func1:
39# [...]
40# call mcount (offset: 0x10)
34# [...] 41# [...]
35# ret 42# ret
36# other_func: 43# .globl fun2
44# func2: (offset: 0x20)
37# [...] 45# [...]
38# call mcount (offset: 0x1b) 46# [...]
47# ret
48# func3:
49# [...]
50# call mcount (offset: 0x30)
39# [...] 51# [...]
40# 52#
41# Both relocation offsets for the mcounts in the above example will be 53# Both relocation offsets for the mcounts in the above example will be
42# offset from .sched.text. If we make another file called tmp.s with: 54# offset from .sched.text. If we choose global symbol func2 as a reference and
55# make another file called tmp.s with the new offsets:
43# 56#
44# .section __mcount_loc 57# .section __mcount_loc
45# .quad my_func + 0x5 58# .quad func2 - 0x10
46# .quad my_func + 0x1b 59# .quad func2 + 0x10
47# 60#
48# We can then compile this tmp.s into tmp.o, and link it to the original 61# We can then compile this tmp.s into tmp.o, and link it back to the original
49# object. 62# object.
50# 63#
51# But this gets hard if my_func is not globl (a static function). 64# In our algorithm, we will choose the first global function we meet in this
52# In such a case we have: 65# section as the reference. But this gets hard if there is no global functions
66# in this section. In such a case we have to select a local one. E.g. func1:
53# 67#
54# .section ".sched.text", "ax" 68# .section ".sched.text", "ax"
55# my_func: 69# func1:
56# [...] 70# [...]
57# call mcount (offset: 0x5) 71# call mcount (offset: 0x10)
58# [...] 72# [...]
59# ret 73# ret
60# other_func: 74# func2:
61# [...] 75# [...]
62# call mcount (offset: 0x1b) 76# call mcount (offset: 0x20)
63# [...] 77# [...]
78# .section "other.section"
64# 79#
65# If we make the tmp.s the same as above, when we link together with 80# If we make the tmp.s the same as above, when we link together with
66# the original object, we will end up with two symbols for my_func: 81# the original object, we will end up with two symbols for func1:
67# one local, one global. After final compile, we will end up with 82# one local, one global. After final compile, we will end up with
68# an undefined reference to my_func. 83# an undefined reference to func1 or a wrong reference to another global
84# func1 in other files.
69# 85#
70# Since local objects can reference local variables, we need to find 86# Since local objects can reference local variables, we need to find
71# a way to make tmp.o reference the local objects of the original object 87# a way to make tmp.o reference the local objects of the original object
72# file after it is linked together. To do this, we convert the my_func 88# file after it is linked together. To do this, we convert func1
73# into a global symbol before linking tmp.o. Then after we link tmp.o 89# into a global symbol before linking tmp.o. Then after we link tmp.o
74# we will only have a single symbol for my_func that is global. 90# we will only have a single symbol for func1 that is global.
75# We can convert my_func back into a local symbol and we are done. 91# We can convert func1 back into a local symbol and we are done.
76# 92#
77# Here are the steps we take: 93# Here are the steps we take:
78# 94#
79# 1) Record all the local symbols by using 'nm' 95# 1) Record all the local and weak symbols by using 'nm'
80# 2) Use objdump to find all the call site offsets and sections for 96# 2) Use objdump to find all the call site offsets and sections for
81# mcount. 97# mcount.
82# 3) Compile the list into its own object. 98# 3) Compile the list into its own object.
@@ -86,10 +102,8 @@
86# 6) Link together this new object with the list object. 102# 6) Link together this new object with the list object.
87# 7) Convert the local functions back to local symbols and rename 103# 7) Convert the local functions back to local symbols and rename
88# the result as the original object. 104# the result as the original object.
89# End.
90# 8) Link the object with the list object. 105# 8) Link the object with the list object.
91# 9) Move the result back to the original object. 106# 9) Move the result back to the original object.
92# End.
93# 107#
94 108
95use strict; 109use strict;
@@ -99,7 +113,7 @@ $P =~ s@.*/@@g;
99 113
100my $V = '0.1'; 114my $V = '0.1';
101 115
102if ($#ARGV < 7) { 116if ($#ARGV != 10) {
103 print "usage: $P arch bits objdump objcopy cc ld nm rm mv is_module inputfile\n"; 117 print "usage: $P arch bits objdump objcopy cc ld nm rm mv is_module inputfile\n";
104 print "version: $V\n"; 118 print "version: $V\n";
105 exit(1); 119 exit(1);
@@ -109,7 +123,7 @@ my ($arch, $bits, $objdump, $objcopy, $cc,
109 $ld, $nm, $rm, $mv, $is_module, $inputfile) = @ARGV; 123 $ld, $nm, $rm, $mv, $is_module, $inputfile) = @ARGV;
110 124
111# This file refers to mcount and shouldn't be ftraced, so lets' ignore it 125# This file refers to mcount and shouldn't be ftraced, so lets' ignore it
112if ($inputfile eq "kernel/trace/ftrace.o") { 126if ($inputfile =~ m,kernel/trace/ftrace\.o$,) {
113 exit(0); 127 exit(0);
114} 128}
115 129
@@ -138,13 +152,47 @@ my %weak; # List of weak functions
138my %convert; # List of local functions used that needs conversion 152my %convert; # List of local functions used that needs conversion
139 153
140my $type; 154my $type;
141my $nm_regex; # Find the local functions (return function) 155my $local_regex; # Match a local function (return function)
156my $weak_regex; # Match a weak function (return function)
142my $section_regex; # Find the start of a section 157my $section_regex; # Find the start of a section
143my $function_regex; # Find the name of a function 158my $function_regex; # Find the name of a function
144 # (return offset and func name) 159 # (return offset and func name)
145my $mcount_regex; # Find the call site to mcount (return offset) 160my $mcount_regex; # Find the call site to mcount (return offset)
146my $alignment; # The .align value to use for $mcount_section 161my $alignment; # The .align value to use for $mcount_section
147my $section_type; # Section header plus possible alignment command 162my $section_type; # Section header plus possible alignment command
163my $can_use_local = 0; # If we can use local function references
164
165# Shut up recordmcount if user has older objcopy
166my $quiet_recordmcount = ".tmp_quiet_recordmcount";
167my $print_warning = 1;
168$print_warning = 0 if ( -f $quiet_recordmcount);
169
170##
171# check_objcopy - whether objcopy supports --globalize-symbols
172#
173# --globalize-symbols came out in 2.17, we must test the version
174# of objcopy, and if it is less than 2.17, then we can not
175# record local functions.
176sub check_objcopy
177{
178 open (IN, "$objcopy --version |") or die "error running $objcopy";
179 while (<IN>) {
180 if (/objcopy.*\s(\d+)\.(\d+)/) {
181 $can_use_local = 1 if ($1 > 2 || ($1 == 2 && $2 >= 17));
182 last;
183 }
184 }
185 close (IN);
186
187 if (!$can_use_local && $print_warning) {
188 print STDERR "WARNING: could not find objcopy version or version " .
189 "is less than 2.17.\n" .
190 "\tLocal function references are disabled.\n";
191 open (QUIET, ">$quiet_recordmcount");
192 printf QUIET "Disables the warning from recordmcount.pl\n";
193 close QUIET;
194 }
195}
148 196
149if ($arch eq "x86") { 197if ($arch eq "x86") {
150 if ($bits == 64) { 198 if ($bits == 64) {
@@ -158,7 +206,8 @@ if ($arch eq "x86") {
158# We base the defaults off of i386, the other archs may 206# We base the defaults off of i386, the other archs may
159# feel free to change them in the below if statements. 207# feel free to change them in the below if statements.
160# 208#
161$nm_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\S+)"; 209$local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\S+)";
210$weak_regex = "^[0-9a-fA-F]+\\s+([wW])\\s+(\\S+)";
162$section_regex = "Disassembly of section\\s+(\\S+):"; 211$section_regex = "Disassembly of section\\s+(\\S+):";
163$function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:"; 212$function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
164$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$"; 213$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$";
@@ -207,7 +256,7 @@ if ($arch eq "x86_64") {
207 $cc .= " -m32"; 256 $cc .= " -m32";
208 257
209} elsif ($arch eq "powerpc") { 258} elsif ($arch eq "powerpc") {
210 $nm_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)"; 259 $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
211 $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?.*?)>:"; 260 $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?.*?)>:";
212 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s\\.?_mcount\$"; 261 $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s\\.?_mcount\$";
213 262
@@ -279,44 +328,17 @@ if ($filename =~ m,^(.*)(\.\S),) {
279my $mcount_s = $dirname . "/.tmp_mc_" . $prefix . ".s"; 328my $mcount_s = $dirname . "/.tmp_mc_" . $prefix . ".s";
280my $mcount_o = $dirname . "/.tmp_mc_" . $prefix . ".o"; 329my $mcount_o = $dirname . "/.tmp_mc_" . $prefix . ".o";
281 330
282# 331check_objcopy();
283# --globalize-symbols came out in 2.17, we must test the version
284# of objcopy, and if it is less than 2.17, then we can not
285# record local functions.
286my $use_locals = 01;
287my $local_warn_once = 0;
288my $found_version = 0;
289
290open (IN, "$objcopy --version |") || die "error running $objcopy";
291while (<IN>) {
292 if (/objcopy.*\s(\d+)\.(\d+)/) {
293 my $major = $1;
294 my $minor = $2;
295
296 $found_version = 1;
297 if ($major < 2 ||
298 ($major == 2 && $minor < 17)) {
299 $use_locals = 0;
300 }
301 last;
302 }
303}
304close (IN);
305
306if (!$found_version) {
307 print STDERR "WARNING: could not find objcopy version.\n" .
308 "\tDisabling local function references.\n";
309}
310 332
311# 333#
312# Step 1: find all the local (static functions) and weak symbols. 334# Step 1: find all the local (static functions) and weak symbols.
313# 't' is local, 'w/W' is weak (we never use a weak function) 335# 't' is local, 'w/W' is weak
314# 336#
315open (IN, "$nm $inputfile|") || die "error running $nm"; 337open (IN, "$nm $inputfile|") || die "error running $nm";
316while (<IN>) { 338while (<IN>) {
317 if (/$nm_regex/) { 339 if (/$local_regex/) {
318 $locals{$1} = 1; 340 $locals{$1} = 1;
319 } elsif (/^[0-9a-fA-F]+\s+([wW])\s+(\S+)/) { 341 } elsif (/$weak_regex/) {
320 $weak{$2} = $1; 342 $weak{$2} = $1;
321 } 343 }
322} 344}
@@ -334,26 +356,20 @@ my $offset = 0; # offset of ref_func to section beginning
334# 356#
335sub update_funcs 357sub update_funcs
336{ 358{
337 return if ($#offsets < 0); 359 return unless ($ref_func and @offsets);
338
339 defined($ref_func) || die "No function to reference";
340 360
341 # A section only had a weak function, to represent it. 361 # Sanity check on weak function. A weak function may be overwritten by
342 # Unfortunately, a weak function may be overwritten by another 362 # another function of the same name, making all these offsets incorrect.
343 # function of the same name, making all these offsets incorrect.
344 # To be safe, we simply print a warning and bail.
345 if (defined $weak{$ref_func}) { 363 if (defined $weak{$ref_func}) {
346 print STDERR 364 die "$inputfile: ERROR: referencing weak function" .
347 "$inputfile: WARNING: referencing weak function" .
348 " $ref_func for mcount\n"; 365 " $ref_func for mcount\n";
349 return;
350 } 366 }
351 367
352 # is this function static? If so, note this fact. 368 # is this function static? If so, note this fact.
353 if (defined $locals{$ref_func}) { 369 if (defined $locals{$ref_func}) {
354 370
355 # only use locals if objcopy supports globalize-symbols 371 # only use locals if objcopy supports globalize-symbols
356 if (!$use_locals) { 372 if (!$can_use_local) {
357 return; 373 return;
358 } 374 }
359 $convert{$ref_func} = 1; 375 $convert{$ref_func} = 1;
@@ -379,9 +395,27 @@ open(IN, "$objdump -hdr $inputfile|") || die "error running $objdump";
379 395
380my $text; 396my $text;
381 397
398
399# read headers first
382my $read_headers = 1; 400my $read_headers = 1;
383 401
384while (<IN>) { 402while (<IN>) {
403
404 if ($read_headers && /$mcount_section/) {
405 #
406 # Somehow the make process can execute this script on an
407 # object twice. If it does, we would duplicate the mcount
408 # section and it will cause the function tracer self test
409 # to fail. Check if the mcount section exists, and if it does,
410 # warn and exit.
411 #
412 print STDERR "ERROR: $mcount_section already in $inputfile\n" .
413 "\tThis may be an indication that your build is corrupted.\n" .
414 "\tDelete $inputfile and try again. If the same object file\n" .
415 "\tstill causes an issue, then disable CONFIG_DYNAMIC_FTRACE.\n";
416 exit(-1);
417 }
418
385 # is it a section? 419 # is it a section?
386 if (/$section_regex/) { 420 if (/$section_regex/) {
387 $read_headers = 0; 421 $read_headers = 0;
@@ -393,7 +427,7 @@ while (<IN>) {
393 $read_function = 0; 427 $read_function = 0;
394 } 428 }
395 # print out any recorded offsets 429 # print out any recorded offsets
396 update_funcs() if (defined($ref_func)); 430 update_funcs();
397 431
398 # reset all markers and arrays 432 # reset all markers and arrays
399 $text_found = 0; 433 $text_found = 0;
@@ -422,21 +456,7 @@ while (<IN>) {
422 $offset = hex $1; 456 $offset = hex $1;
423 } 457 }
424 } 458 }
425 } elsif ($read_headers && /$mcount_section/) {
426 #
427 # Somehow the make process can execute this script on an
428 # object twice. If it does, we would duplicate the mcount
429 # section and it will cause the function tracer self test
430 # to fail. Check if the mcount section exists, and if it does,
431 # warn and exit.
432 #
433 print STDERR "ERROR: $mcount_section already in $inputfile\n" .
434 "\tThis may be an indication that your build is corrupted.\n" .
435 "\tDelete $inputfile and try again. If the same object file\n" .
436 "\tstill causes an issue, then disable CONFIG_DYNAMIC_FTRACE.\n";
437 exit(-1);
438 } 459 }
439
440 # is this a call site to mcount? If so, record it to print later 460 # is this a call site to mcount? If so, record it to print later
441 if ($text_found && /$mcount_regex/) { 461 if ($text_found && /$mcount_regex/) {
442 $offsets[$#offsets + 1] = hex $1; 462 $offsets[$#offsets + 1] = hex $1;
@@ -444,7 +464,7 @@ while (<IN>) {
444} 464}
445 465
446# dump out anymore offsets that may have been found 466# dump out anymore offsets that may have been found
447update_funcs() if (defined($ref_func)); 467update_funcs();
448 468
449# If we did not find any mcount callers, we are done (do nothing). 469# If we did not find any mcount callers, we are done (do nothing).
450if (!$opened) { 470if (!$opened) {
diff --git a/scripts/selinux/Makefile b/scripts/selinux/Makefile
index ca4b1ec01822..e8049da1831f 100644
--- a/scripts/selinux/Makefile
+++ b/scripts/selinux/Makefile
@@ -1,2 +1,2 @@
1subdir-y := mdp 1subdir-y := mdp genheaders
2subdir- += mdp 2subdir- += mdp genheaders
diff --git a/scripts/selinux/genheaders/.gitignore b/scripts/selinux/genheaders/.gitignore
new file mode 100644
index 000000000000..4c0b646ff8d5
--- /dev/null
+++ b/scripts/selinux/genheaders/.gitignore
@@ -0,0 +1 @@
genheaders
diff --git a/scripts/selinux/genheaders/Makefile b/scripts/selinux/genheaders/Makefile
new file mode 100644
index 000000000000..417b165008ee
--- /dev/null
+++ b/scripts/selinux/genheaders/Makefile
@@ -0,0 +1,5 @@
1hostprogs-y := genheaders
2HOST_EXTRACFLAGS += -Isecurity/selinux/include
3
4always := $(hostprogs-y)
5clean-files := $(hostprogs-y)
diff --git a/scripts/selinux/genheaders/genheaders.c b/scripts/selinux/genheaders/genheaders.c
new file mode 100644
index 000000000000..24626968055d
--- /dev/null
+++ b/scripts/selinux/genheaders/genheaders.c
@@ -0,0 +1,118 @@
1#include <stdio.h>
2#include <stdlib.h>
3#include <unistd.h>
4#include <string.h>
5#include <errno.h>
6#include <ctype.h>
7
8struct security_class_mapping {
9 const char *name;
10 const char *perms[sizeof(unsigned) * 8 + 1];
11};
12
13#include "classmap.h"
14#include "initial_sid_to_string.h"
15
16#define max(x, y) (((int)(x) > (int)(y)) ? x : y)
17
18const char *progname;
19
20static void usage(void)
21{
22 printf("usage: %s flask.h av_permissions.h\n", progname);
23 exit(1);
24}
25
26static char *stoupperx(const char *s)
27{
28 char *s2 = strdup(s);
29 char *p;
30
31 if (!s2) {
32 fprintf(stderr, "%s: out of memory\n", progname);
33 exit(3);
34 }
35
36 for (p = s2; *p; p++)
37 *p = toupper(*p);
38 return s2;
39}
40
41int main(int argc, char *argv[])
42{
43 int i, j, k;
44 int isids_len;
45 FILE *fout;
46
47 progname = argv[0];
48
49 if (argc < 3)
50 usage();
51
52 fout = fopen(argv[1], "w");
53 if (!fout) {
54 fprintf(stderr, "Could not open %s for writing: %s\n",
55 argv[1], strerror(errno));
56 exit(2);
57 }
58
59 for (i = 0; secclass_map[i].name; i++) {
60 struct security_class_mapping *map = &secclass_map[i];
61 map->name = stoupperx(map->name);
62 for (j = 0; map->perms[j]; j++)
63 map->perms[j] = stoupperx(map->perms[j]);
64 }
65
66 isids_len = sizeof(initial_sid_to_string) / sizeof (char *);
67 for (i = 1; i < isids_len; i++)
68 initial_sid_to_string[i] = stoupperx(initial_sid_to_string[i]);
69
70 fprintf(fout, "/* This file is automatically generated. Do not edit. */\n");
71 fprintf(fout, "#ifndef _SELINUX_FLASK_H_\n#define _SELINUX_FLASK_H_\n\n");
72
73 for (i = 0; secclass_map[i].name; i++) {
74 struct security_class_mapping *map = &secclass_map[i];
75 fprintf(fout, "#define SECCLASS_%s", map->name);
76 for (j = 0; j < max(1, 40 - strlen(map->name)); j++)
77 fprintf(fout, " ");
78 fprintf(fout, "%2d\n", i+1);
79 }
80
81 fprintf(fout, "\n");
82
83 for (i = 1; i < isids_len; i++) {
84 char *s = initial_sid_to_string[i];
85 fprintf(fout, "#define SECINITSID_%s", s);
86 for (j = 0; j < max(1, 40 - strlen(s)); j++)
87 fprintf(fout, " ");
88 fprintf(fout, "%2d\n", i);
89 }
90 fprintf(fout, "\n#define SECINITSID_NUM %d\n", i-1);
91 fprintf(fout, "\n#endif\n");
92 fclose(fout);
93
94 fout = fopen(argv[2], "w");
95 if (!fout) {
96 fprintf(stderr, "Could not open %s for writing: %s\n",
97 argv[2], strerror(errno));
98 exit(4);
99 }
100
101 fprintf(fout, "/* This file is automatically generated. Do not edit. */\n");
102 fprintf(fout, "#ifndef _SELINUX_AV_PERMISSIONS_H_\n#define _SELINUX_AV_PERMISSIONS_H_\n\n");
103
104 for (i = 0; secclass_map[i].name; i++) {
105 struct security_class_mapping *map = &secclass_map[i];
106 for (j = 0; map->perms[j]; j++) {
107 fprintf(fout, "#define %s__%s", map->name,
108 map->perms[j]);
109 for (k = 0; k < max(1, 40 - strlen(map->name) - strlen(map->perms[j])); k++)
110 fprintf(fout, " ");
111 fprintf(fout, "0x%08xUL\n", (1<<j));
112 }
113 }
114
115 fprintf(fout, "\n#endif\n");
116 fclose(fout);
117 exit(0);
118}
diff --git a/scripts/selinux/mdp/mdp.c b/scripts/selinux/mdp/mdp.c
index b4ced8562587..62b34ce1f50d 100644
--- a/scripts/selinux/mdp/mdp.c
+++ b/scripts/selinux/mdp/mdp.c
@@ -29,86 +29,27 @@
29#include <unistd.h> 29#include <unistd.h>
30#include <string.h> 30#include <string.h>
31 31
32#include "flask.h"
33
34static void usage(char *name) 32static void usage(char *name)
35{ 33{
36 printf("usage: %s [-m] policy_file context_file\n", name); 34 printf("usage: %s [-m] policy_file context_file\n", name);
37 exit(1); 35 exit(1);
38} 36}
39 37
40static void find_common_name(char *cname, char *dest, int len) 38/* Class/perm mapping support */
41{ 39struct security_class_mapping {
42 char *start, *end; 40 const char *name;
43 41 const char *perms[sizeof(unsigned) * 8 + 1];
44 start = strchr(cname, '_')+1;
45 end = strchr(start, '_');
46 if (!start || !end || start-cname > len || end-start > len) {
47 printf("Error with commons defines\n");
48 exit(1);
49 }
50 strncpy(dest, start, end-start);
51 dest[end-start] = '\0';
52}
53
54#define S_(x) x,
55static char *classlist[] = {
56#include "class_to_string.h"
57 NULL
58}; 42};
59#undef S_
60 43
44#include "classmap.h"
61#include "initial_sid_to_string.h" 45#include "initial_sid_to_string.h"
62 46
63#define TB_(x) char *x[] = {
64#define TE_(x) NULL };
65#define S_(x) x,
66#include "common_perm_to_string.h"
67#undef TB_
68#undef TE_
69#undef S_
70
71struct common {
72 char *cname;
73 char **perms;
74};
75struct common common[] = {
76#define TB_(x) { #x, x },
77#define S_(x)
78#define TE_(x)
79#include "common_perm_to_string.h"
80#undef TB_
81#undef TE_
82#undef S_
83};
84
85#define S_(x, y, z) {x, #y},
86struct av_inherit {
87 int class;
88 char *common;
89};
90struct av_inherit av_inherit[] = {
91#include "av_inherit.h"
92};
93#undef S_
94
95#include "av_permissions.h"
96#define S_(x, y, z) {x, y, z},
97struct av_perms {
98 int class;
99 int perm_i;
100 char *perm_s;
101};
102struct av_perms av_perms[] = {
103#include "av_perm_to_string.h"
104};
105#undef S_
106
107int main(int argc, char *argv[]) 47int main(int argc, char *argv[])
108{ 48{
109 int i, j, mls = 0; 49 int i, j, mls = 0;
50 int initial_sid_to_string_len;
110 char **arg, *polout, *ctxout; 51 char **arg, *polout, *ctxout;
111 int classlist_len, initial_sid_to_string_len; 52
112 FILE *fout; 53 FILE *fout;
113 54
114 if (argc < 3) 55 if (argc < 3)
@@ -127,64 +68,25 @@ int main(int argc, char *argv[])
127 usage(argv[0]); 68 usage(argv[0]);
128 } 69 }
129 70
130 classlist_len = sizeof(classlist) / sizeof(char *);
131 /* print out the classes */ 71 /* print out the classes */
132 for (i=1; i < classlist_len; i++) { 72 for (i = 0; secclass_map[i].name; i++)
133 if(classlist[i]) 73 fprintf(fout, "class %s\n", secclass_map[i].name);
134 fprintf(fout, "class %s\n", classlist[i]);
135 else
136 fprintf(fout, "class user%d\n", i);
137 }
138 fprintf(fout, "\n"); 74 fprintf(fout, "\n");
139 75
140 initial_sid_to_string_len = sizeof(initial_sid_to_string) / sizeof (char *); 76 initial_sid_to_string_len = sizeof(initial_sid_to_string) / sizeof (char *);
141 /* print out the sids */ 77 /* print out the sids */
142 for (i=1; i < initial_sid_to_string_len; i++) 78 for (i = 1; i < initial_sid_to_string_len; i++)
143 fprintf(fout, "sid %s\n", initial_sid_to_string[i]); 79 fprintf(fout, "sid %s\n", initial_sid_to_string[i]);
144 fprintf(fout, "\n"); 80 fprintf(fout, "\n");
145 81
146 /* print out the commons */
147 for (i=0; i< sizeof(common)/sizeof(struct common); i++) {
148 char cname[101];
149 find_common_name(common[i].cname, cname, 100);
150 cname[100] = '\0';
151 fprintf(fout, "common %s\n{\n", cname);
152 for (j=0; common[i].perms[j]; j++)
153 fprintf(fout, "\t%s\n", common[i].perms[j]);
154 fprintf(fout, "}\n\n");
155 }
156 fprintf(fout, "\n");
157
158 /* print out the class permissions */ 82 /* print out the class permissions */
159 for (i=1; i < classlist_len; i++) { 83 for (i = 0; secclass_map[i].name; i++) {
160 if (classlist[i]) { 84 struct security_class_mapping *map = &secclass_map[i];
161 int firstperm = -1, numperms = 0; 85 fprintf(fout, "class %s\n", map->name);
162 86 fprintf(fout, "{\n");
163 fprintf(fout, "class %s\n", classlist[i]); 87 for (j = 0; map->perms[j]; j++)
164 /* does it inherit from a common? */ 88 fprintf(fout, "\t%s\n", map->perms[j]);
165 for (j=0; j < sizeof(av_inherit)/sizeof(struct av_inherit); j++) 89 fprintf(fout, "}\n\n");
166 if (av_inherit[j].class == i)
167 fprintf(fout, "inherits %s\n", av_inherit[j].common);
168
169 for (j=0; j < sizeof(av_perms)/sizeof(struct av_perms); j++) {
170 if (av_perms[j].class == i) {
171 if (firstperm == -1)
172 firstperm = j;
173 numperms++;
174 }
175 }
176 if (!numperms) {
177 fprintf(fout, "\n");
178 continue;
179 }
180
181 fprintf(fout, "{\n");
182 /* print out the av_perms */
183 for (j=0; j < numperms; j++) {
184 fprintf(fout, "\t%s\n", av_perms[firstperm+j].perm_s);
185 }
186 fprintf(fout, "}\n\n");
187 }
188 } 90 }
189 fprintf(fout, "\n"); 91 fprintf(fout, "\n");
190 92
@@ -197,31 +99,34 @@ int main(int argc, char *argv[])
197 /* types, roles, and allows */ 99 /* types, roles, and allows */
198 fprintf(fout, "type base_t;\n"); 100 fprintf(fout, "type base_t;\n");
199 fprintf(fout, "role base_r types { base_t };\n"); 101 fprintf(fout, "role base_r types { base_t };\n");
200 for (i=1; i < classlist_len; i++) { 102 for (i = 0; secclass_map[i].name; i++)
201 if (classlist[i]) 103 fprintf(fout, "allow base_t base_t:%s *;\n",
202 fprintf(fout, "allow base_t base_t:%s *;\n", classlist[i]); 104 secclass_map[i].name);
203 else
204 fprintf(fout, "allow base_t base_t:user%d *;\n", i);
205 }
206 fprintf(fout, "user user_u roles { base_r };\n"); 105 fprintf(fout, "user user_u roles { base_r };\n");
207 fprintf(fout, "\n"); 106 fprintf(fout, "\n");
208 107
209 /* default sids */ 108 /* default sids */
210 for (i=1; i < initial_sid_to_string_len; i++) 109 for (i = 1; i < initial_sid_to_string_len; i++)
211 fprintf(fout, "sid %s user_u:base_r:base_t\n", initial_sid_to_string[i]); 110 fprintf(fout, "sid %s user_u:base_r:base_t\n", initial_sid_to_string[i]);
212 fprintf(fout, "\n"); 111 fprintf(fout, "\n");
213 112
214
215 fprintf(fout, "fs_use_xattr ext2 user_u:base_r:base_t;\n"); 113 fprintf(fout, "fs_use_xattr ext2 user_u:base_r:base_t;\n");
216 fprintf(fout, "fs_use_xattr ext3 user_u:base_r:base_t;\n"); 114 fprintf(fout, "fs_use_xattr ext3 user_u:base_r:base_t;\n");
115 fprintf(fout, "fs_use_xattr ext4 user_u:base_r:base_t;\n");
217 fprintf(fout, "fs_use_xattr jfs user_u:base_r:base_t;\n"); 116 fprintf(fout, "fs_use_xattr jfs user_u:base_r:base_t;\n");
218 fprintf(fout, "fs_use_xattr xfs user_u:base_r:base_t;\n"); 117 fprintf(fout, "fs_use_xattr xfs user_u:base_r:base_t;\n");
219 fprintf(fout, "fs_use_xattr reiserfs user_u:base_r:base_t;\n"); 118 fprintf(fout, "fs_use_xattr reiserfs user_u:base_r:base_t;\n");
119 fprintf(fout, "fs_use_xattr jffs2 user_u:base_r:base_t;\n");
120 fprintf(fout, "fs_use_xattr gfs2 user_u:base_r:base_t;\n");
121 fprintf(fout, "fs_use_xattr lustre user_u:base_r:base_t;\n");
220 122
123 fprintf(fout, "fs_use_task eventpollfs user_u:base_r:base_t;\n");
221 fprintf(fout, "fs_use_task pipefs user_u:base_r:base_t;\n"); 124 fprintf(fout, "fs_use_task pipefs user_u:base_r:base_t;\n");
222 fprintf(fout, "fs_use_task sockfs user_u:base_r:base_t;\n"); 125 fprintf(fout, "fs_use_task sockfs user_u:base_r:base_t;\n");
223 126
127 fprintf(fout, "fs_use_trans mqueue user_u:base_r:base_t;\n");
224 fprintf(fout, "fs_use_trans devpts user_u:base_r:base_t;\n"); 128 fprintf(fout, "fs_use_trans devpts user_u:base_r:base_t;\n");
129 fprintf(fout, "fs_use_trans hugetlbfs user_u:base_r:base_t;\n");
225 fprintf(fout, "fs_use_trans tmpfs user_u:base_r:base_t;\n"); 130 fprintf(fout, "fs_use_trans tmpfs user_u:base_r:base_t;\n");
226 fprintf(fout, "fs_use_trans shm user_u:base_r:base_t;\n"); 131 fprintf(fout, "fs_use_trans shm user_u:base_r:base_t;\n");
227 132
diff --git a/security/Kconfig b/security/Kconfig
index fb363cd81cf6..226b9556b25f 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -91,28 +91,6 @@ config SECURITY_PATH
91 implement pathname based access controls. 91 implement pathname based access controls.
92 If you are unsure how to answer this question, answer N. 92 If you are unsure how to answer this question, answer N.
93 93
94config SECURITY_FILE_CAPABILITIES
95 bool "File POSIX Capabilities"
96 default n
97 help
98 This enables filesystem capabilities, allowing you to give
99 binaries a subset of root's powers without using setuid 0.
100
101 If in doubt, answer N.
102
103config SECURITY_ROOTPLUG
104 bool "Root Plug Support"
105 depends on USB=y && SECURITY
106 help
107 This is a sample LSM module that should only be used as such.
108 It prevents any programs running with egid == 0 if a specific
109 USB device is not present in the system.
110
111 See <http://www.linuxjournal.com/article.php?sid=6279> for
112 more information about this module.
113
114 If you are unsure how to answer this question, answer N.
115
116config INTEL_TXT 94config INTEL_TXT
117 bool "Enable Intel(R) Trusted Execution Technology (Intel(R) TXT)" 95 bool "Enable Intel(R) Trusted Execution Technology (Intel(R) TXT)"
118 depends on HAVE_INTEL_TXT 96 depends on HAVE_INTEL_TXT
@@ -165,5 +143,37 @@ source security/tomoyo/Kconfig
165 143
166source security/integrity/ima/Kconfig 144source security/integrity/ima/Kconfig
167 145
146choice
147 prompt "Default security module"
148 default DEFAULT_SECURITY_SELINUX if SECURITY_SELINUX
149 default DEFAULT_SECURITY_SMACK if SECURITY_SMACK
150 default DEFAULT_SECURITY_TOMOYO if SECURITY_TOMOYO
151 default DEFAULT_SECURITY_DAC
152
153 help
154 Select the security module that will be used by default if the
155 kernel parameter security= is not specified.
156
157 config DEFAULT_SECURITY_SELINUX
158 bool "SELinux" if SECURITY_SELINUX=y
159
160 config DEFAULT_SECURITY_SMACK
161 bool "Simplified Mandatory Access Control" if SECURITY_SMACK=y
162
163 config DEFAULT_SECURITY_TOMOYO
164 bool "TOMOYO" if SECURITY_TOMOYO=y
165
166 config DEFAULT_SECURITY_DAC
167 bool "Unix Discretionary Access Controls"
168
169endchoice
170
171config DEFAULT_SECURITY
172 string
173 default "selinux" if DEFAULT_SECURITY_SELINUX
174 default "smack" if DEFAULT_SECURITY_SMACK
175 default "tomoyo" if DEFAULT_SECURITY_TOMOYO
176 default "" if DEFAULT_SECURITY_DAC
177
168endmenu 178endmenu
169 179
diff --git a/security/Makefile b/security/Makefile
index 95ecc06392d7..bb44e350c618 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o
18obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o 18obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o
19obj-$(CONFIG_AUDIT) += lsm_audit.o 19obj-$(CONFIG_AUDIT) += lsm_audit.o
20obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/built-in.o 20obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/built-in.o
21obj-$(CONFIG_SECURITY_ROOTPLUG) += root_plug.o
22obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o 21obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
23 22
24# Object integrity file lists 23# Object integrity file lists
diff --git a/security/capability.c b/security/capability.c
index fce07a7bc825..5c700e1a4fd3 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -308,6 +308,22 @@ static int cap_path_truncate(struct path *path, loff_t length,
308{ 308{
309 return 0; 309 return 0;
310} 310}
311
312static int cap_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
313 mode_t mode)
314{
315 return 0;
316}
317
318static int cap_path_chown(struct path *path, uid_t uid, gid_t gid)
319{
320 return 0;
321}
322
323static int cap_path_chroot(struct path *root)
324{
325 return 0;
326}
311#endif 327#endif
312 328
313static int cap_file_permission(struct file *file, int mask) 329static int cap_file_permission(struct file *file, int mask)
@@ -405,7 +421,7 @@ static int cap_kernel_create_files_as(struct cred *new, struct inode *inode)
405 return 0; 421 return 0;
406} 422}
407 423
408static int cap_kernel_module_request(void) 424static int cap_kernel_module_request(char *kmod_name)
409{ 425{
410 return 0; 426 return 0;
411} 427}
@@ -977,6 +993,9 @@ void security_fixup_ops(struct security_operations *ops)
977 set_to_cap_if_null(ops, path_link); 993 set_to_cap_if_null(ops, path_link);
978 set_to_cap_if_null(ops, path_rename); 994 set_to_cap_if_null(ops, path_rename);
979 set_to_cap_if_null(ops, path_truncate); 995 set_to_cap_if_null(ops, path_truncate);
996 set_to_cap_if_null(ops, path_chmod);
997 set_to_cap_if_null(ops, path_chown);
998 set_to_cap_if_null(ops, path_chroot);
980#endif 999#endif
981 set_to_cap_if_null(ops, file_permission); 1000 set_to_cap_if_null(ops, file_permission);
982 set_to_cap_if_null(ops, file_alloc_security); 1001 set_to_cap_if_null(ops, file_alloc_security);
diff --git a/security/commoncap.c b/security/commoncap.c
index fe30751a6cd9..f800fdb3de94 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -1,4 +1,4 @@
1/* Common capabilities, needed by capability.o and root_plug.o 1/* Common capabilities, needed by capability.o.
2 * 2 *
3 * This program is free software; you can redistribute it and/or modify 3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by 4 * it under the terms of the GNU General Public License as published by
@@ -173,7 +173,6 @@ int cap_capget(struct task_struct *target, kernel_cap_t *effective,
173 */ 173 */
174static inline int cap_inh_is_capped(void) 174static inline int cap_inh_is_capped(void)
175{ 175{
176#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
177 176
178 /* they are so limited unless the current task has the CAP_SETPCAP 177 /* they are so limited unless the current task has the CAP_SETPCAP
179 * capability 178 * capability
@@ -181,7 +180,6 @@ static inline int cap_inh_is_capped(void)
181 if (cap_capable(current, current_cred(), CAP_SETPCAP, 180 if (cap_capable(current, current_cred(), CAP_SETPCAP,
182 SECURITY_CAP_AUDIT) == 0) 181 SECURITY_CAP_AUDIT) == 0)
183 return 0; 182 return 0;
184#endif
185 return 1; 183 return 1;
186} 184}
187 185
@@ -239,8 +237,6 @@ static inline void bprm_clear_caps(struct linux_binprm *bprm)
239 bprm->cap_effective = false; 237 bprm->cap_effective = false;
240} 238}
241 239
242#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
243
244/** 240/**
245 * cap_inode_need_killpriv - Determine if inode change affects privileges 241 * cap_inode_need_killpriv - Determine if inode change affects privileges
246 * @dentry: The inode/dentry in being changed with change marked ATTR_KILL_PRIV 242 * @dentry: The inode/dentry in being changed with change marked ATTR_KILL_PRIV
@@ -421,49 +417,6 @@ out:
421 return rc; 417 return rc;
422} 418}
423 419
424#else
425int cap_inode_need_killpriv(struct dentry *dentry)
426{
427 return 0;
428}
429
430int cap_inode_killpriv(struct dentry *dentry)
431{
432 return 0;
433}
434
435int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps)
436{
437 memset(cpu_caps, 0, sizeof(struct cpu_vfs_cap_data));
438 return -ENODATA;
439}
440
441static inline int get_file_caps(struct linux_binprm *bprm, bool *effective)
442{
443 bprm_clear_caps(bprm);
444 return 0;
445}
446#endif
447
448/*
449 * Determine whether a exec'ing process's new permitted capabilities should be
450 * limited to just what it already has.
451 *
452 * This prevents processes that are being ptraced from gaining access to
453 * CAP_SETPCAP, unless the process they're tracing already has it, and the
454 * binary they're executing has filecaps that elevate it.
455 *
456 * Returns 1 if they should be limited, 0 if they are not.
457 */
458static inline int cap_limit_ptraced_target(void)
459{
460#ifndef CONFIG_SECURITY_FILE_CAPABILITIES
461 if (capable(CAP_SETPCAP))
462 return 0;
463#endif
464 return 1;
465}
466
467/** 420/**
468 * cap_bprm_set_creds - Set up the proposed credentials for execve(). 421 * cap_bprm_set_creds - Set up the proposed credentials for execve().
469 * @bprm: The execution parameters, including the proposed creds 422 * @bprm: The execution parameters, including the proposed creds
@@ -523,9 +476,8 @@ skip:
523 new->euid = new->uid; 476 new->euid = new->uid;
524 new->egid = new->gid; 477 new->egid = new->gid;
525 } 478 }
526 if (cap_limit_ptraced_target()) 479 new->cap_permitted = cap_intersect(new->cap_permitted,
527 new->cap_permitted = cap_intersect(new->cap_permitted, 480 old->cap_permitted);
528 old->cap_permitted);
529 } 481 }
530 482
531 new->suid = new->fsuid = new->euid; 483 new->suid = new->fsuid = new->euid;
@@ -739,7 +691,6 @@ int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags)
739 return 0; 691 return 0;
740} 692}
741 693
742#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
743/* 694/*
744 * Rationale: code calling task_setscheduler, task_setioprio, and 695 * Rationale: code calling task_setscheduler, task_setioprio, and
745 * task_setnice, assumes that 696 * task_setnice, assumes that
@@ -820,22 +771,6 @@ static long cap_prctl_drop(struct cred *new, unsigned long cap)
820 return 0; 771 return 0;
821} 772}
822 773
823#else
824int cap_task_setscheduler (struct task_struct *p, int policy,
825 struct sched_param *lp)
826{
827 return 0;
828}
829int cap_task_setioprio (struct task_struct *p, int ioprio)
830{
831 return 0;
832}
833int cap_task_setnice (struct task_struct *p, int nice)
834{
835 return 0;
836}
837#endif
838
839/** 774/**
840 * cap_task_prctl - Implement process control functions for this security module 775 * cap_task_prctl - Implement process control functions for this security module
841 * @option: The process control function requested 776 * @option: The process control function requested
@@ -866,7 +801,6 @@ int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
866 error = !!cap_raised(new->cap_bset, arg2); 801 error = !!cap_raised(new->cap_bset, arg2);
867 goto no_change; 802 goto no_change;
868 803
869#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
870 case PR_CAPBSET_DROP: 804 case PR_CAPBSET_DROP:
871 error = cap_prctl_drop(new, arg2); 805 error = cap_prctl_drop(new, arg2);
872 if (error < 0) 806 if (error < 0)
@@ -917,8 +851,6 @@ int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
917 error = new->securebits; 851 error = new->securebits;
918 goto no_change; 852 goto no_change;
919 853
920#endif /* def CONFIG_SECURITY_FILE_CAPABILITIES */
921
922 case PR_GET_KEEPCAPS: 854 case PR_GET_KEEPCAPS:
923 if (issecure(SECURE_KEEP_CAPS)) 855 if (issecure(SECURE_KEEP_CAPS))
924 error = 1; 856 error = 1;
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 53d9764e8f09..3d7846de8069 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -3,6 +3,7 @@
3config IMA 3config IMA
4 bool "Integrity Measurement Architecture(IMA)" 4 bool "Integrity Measurement Architecture(IMA)"
5 depends on ACPI 5 depends on ACPI
6 depends on SECURITY
6 select SECURITYFS 7 select SECURITYFS
7 select CRYPTO 8 select CRYPTO
8 select CRYPTO_HMAC 9 select CRYPTO_HMAC
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
index b8dd693f8790..a4e2b1dac943 100644
--- a/security/integrity/ima/ima_iint.c
+++ b/security/integrity/ima/ima_iint.c
@@ -58,11 +58,11 @@ struct ima_iint_cache *ima_iint_insert(struct inode *inode)
58 58
59 if (!ima_initialized) 59 if (!ima_initialized)
60 return iint; 60 return iint;
61 iint = kmem_cache_alloc(iint_cache, GFP_KERNEL); 61 iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
62 if (!iint) 62 if (!iint)
63 return iint; 63 return iint;
64 64
65 rc = radix_tree_preload(GFP_KERNEL); 65 rc = radix_tree_preload(GFP_NOFS);
66 if (rc < 0) 66 if (rc < 0)
67 goto out; 67 goto out;
68 68
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 3bb90b6f1dd3..51bd0fd9c9f0 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -354,6 +354,10 @@ static void dump_common_audit_data(struct audit_buffer *ab,
354 } 354 }
355 break; 355 break;
356#endif 356#endif
357 case LSM_AUDIT_DATA_KMOD:
358 audit_log_format(ab, " kmod=");
359 audit_log_untrustedstring(ab, a->u.kmod_name);
360 break;
357 } /* switch (a->type) */ 361 } /* switch (a->type) */
358} 362}
359 363
diff --git a/security/min_addr.c b/security/min_addr.c
index c844eed7915d..fc43c9d37084 100644
--- a/security/min_addr.c
+++ b/security/min_addr.c
@@ -33,6 +33,9 @@ int mmap_min_addr_handler(struct ctl_table *table, int write,
33{ 33{
34 int ret; 34 int ret;
35 35
36 if (!capable(CAP_SYS_RAWIO))
37 return -EPERM;
38
36 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); 39 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
37 40
38 update_mmap_min_addr(); 41 update_mmap_min_addr();
diff --git a/security/root_plug.c b/security/root_plug.c
deleted file mode 100644
index 2f7ffa67c4d2..000000000000
--- a/security/root_plug.c
+++ /dev/null
@@ -1,90 +0,0 @@
1/*
2 * Root Plug sample LSM module
3 *
4 * Originally written for a Linux Journal.
5 *
6 * Copyright (C) 2002 Greg Kroah-Hartman <greg@kroah.com>
7 *
8 * Prevents any programs running with egid == 0 if a specific USB device
9 * is not present in the system. Yes, it can be gotten around, but is a
10 * nice starting point for people to play with, and learn the LSM
11 * interface.
12 *
13 * If you want to turn this into something with a semblance of security,
14 * you need to hook the task_* functions also.
15 *
16 * See http://www.linuxjournal.com/article.php?sid=6279 for more information
17 * about this code.
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License as
21 * published by the Free Software Foundation, version 2 of the
22 * License.
23 */
24
25#include <linux/kernel.h>
26#include <linux/init.h>
27#include <linux/security.h>
28#include <linux/usb.h>
29#include <linux/moduleparam.h>
30
31/* default is a generic type of usb to serial converter */
32static int vendor_id = 0x0557;
33static int product_id = 0x2008;
34
35module_param(vendor_id, uint, 0400);
36module_param(product_id, uint, 0400);
37
38/* should we print out debug messages */
39static int debug = 0;
40
41module_param(debug, bool, 0600);
42
43#define MY_NAME "root_plug"
44
45#define root_dbg(fmt, arg...) \
46 do { \
47 if (debug) \
48 printk(KERN_DEBUG "%s: %s: " fmt , \
49 MY_NAME , __func__ , \
50 ## arg); \
51 } while (0)
52
53static int rootplug_bprm_check_security (struct linux_binprm *bprm)
54{
55 struct usb_device *dev;
56
57 root_dbg("file %s, e_uid = %d, e_gid = %d\n",
58 bprm->filename, bprm->cred->euid, bprm->cred->egid);
59
60 if (bprm->cred->egid == 0) {
61 dev = usb_find_device(vendor_id, product_id);
62 if (!dev) {
63 root_dbg("e_gid = 0, and device not found, "
64 "task not allowed to run...\n");
65 return -EPERM;
66 }
67 usb_put_dev(dev);
68 }
69
70 return 0;
71}
72
73static struct security_operations rootplug_security_ops = {
74 .bprm_check_security = rootplug_bprm_check_security,
75};
76
77static int __init rootplug_init (void)
78{
79 /* register ourselves with the security framework */
80 if (register_security (&rootplug_security_ops)) {
81 printk (KERN_INFO
82 "Failure registering Root Plug module with the kernel\n");
83 return -EINVAL;
84 }
85 printk (KERN_INFO "Root Plug module initialized, "
86 "vendor_id = %4.4x, product id = %4.4x\n", vendor_id, product_id);
87 return 0;
88}
89
90security_initcall (rootplug_init);
diff --git a/security/security.c b/security/security.c
index c4c673240c1c..24e060be9fa5 100644
--- a/security/security.c
+++ b/security/security.c
@@ -16,9 +16,11 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/security.h> 18#include <linux/security.h>
19#include <linux/ima.h>
19 20
20/* Boot-time LSM user choice */ 21/* Boot-time LSM user choice */
21static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1]; 22static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
23 CONFIG_DEFAULT_SECURITY;
22 24
23/* things that live in capability.c */ 25/* things that live in capability.c */
24extern struct security_operations default_security_ops; 26extern struct security_operations default_security_ops;
@@ -79,8 +81,10 @@ __setup("security=", choose_lsm);
79 * 81 *
80 * Return true if: 82 * Return true if:
81 * -The passed LSM is the one chosen by user at boot time, 83 * -The passed LSM is the one chosen by user at boot time,
82 * -or user didn't specify a specific LSM and we're the first to ask 84 * -or the passed LSM is configured as the default and the user did not
83 * for registration permission, 85 * choose an alternate LSM at boot time,
86 * -or there is no default LSM set and the user didn't specify a
87 * specific LSM and we're the first to ask for registration permission,
84 * -or the passed LSM is currently loaded. 88 * -or the passed LSM is currently loaded.
85 * Otherwise, return false. 89 * Otherwise, return false.
86 */ 90 */
@@ -235,7 +239,12 @@ int security_bprm_set_creds(struct linux_binprm *bprm)
235 239
236int security_bprm_check(struct linux_binprm *bprm) 240int security_bprm_check(struct linux_binprm *bprm)
237{ 241{
238 return security_ops->bprm_check_security(bprm); 242 int ret;
243
244 ret = security_ops->bprm_check_security(bprm);
245 if (ret)
246 return ret;
247 return ima_bprm_check(bprm);
239} 248}
240 249
241void security_bprm_committing_creds(struct linux_binprm *bprm) 250void security_bprm_committing_creds(struct linux_binprm *bprm)
@@ -352,12 +361,21 @@ EXPORT_SYMBOL(security_sb_parse_opts_str);
352 361
353int security_inode_alloc(struct inode *inode) 362int security_inode_alloc(struct inode *inode)
354{ 363{
364 int ret;
365
355 inode->i_security = NULL; 366 inode->i_security = NULL;
356 return security_ops->inode_alloc_security(inode); 367 ret = security_ops->inode_alloc_security(inode);
368 if (ret)
369 return ret;
370 ret = ima_inode_alloc(inode);
371 if (ret)
372 security_inode_free(inode);
373 return ret;
357} 374}
358 375
359void security_inode_free(struct inode *inode) 376void security_inode_free(struct inode *inode)
360{ 377{
378 ima_inode_free(inode);
361 security_ops->inode_free_security(inode); 379 security_ops->inode_free_security(inode);
362} 380}
363 381
@@ -434,6 +452,26 @@ int security_path_truncate(struct path *path, loff_t length,
434 return 0; 452 return 0;
435 return security_ops->path_truncate(path, length, time_attrs); 453 return security_ops->path_truncate(path, length, time_attrs);
436} 454}
455
456int security_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
457 mode_t mode)
458{
459 if (unlikely(IS_PRIVATE(dentry->d_inode)))
460 return 0;
461 return security_ops->path_chmod(dentry, mnt, mode);
462}
463
464int security_path_chown(struct path *path, uid_t uid, gid_t gid)
465{
466 if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
467 return 0;
468 return security_ops->path_chown(path, uid, gid);
469}
470
471int security_path_chroot(struct path *path)
472{
473 return security_ops->path_chroot(path);
474}
437#endif 475#endif
438 476
439int security_inode_create(struct inode *dir, struct dentry *dentry, int mode) 477int security_inode_create(struct inode *dir, struct dentry *dentry, int mode)
@@ -628,6 +666,8 @@ int security_file_alloc(struct file *file)
628void security_file_free(struct file *file) 666void security_file_free(struct file *file)
629{ 667{
630 security_ops->file_free_security(file); 668 security_ops->file_free_security(file);
669 if (file->f_dentry)
670 ima_file_free(file);
631} 671}
632 672
633int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 673int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -639,7 +679,12 @@ int security_file_mmap(struct file *file, unsigned long reqprot,
639 unsigned long prot, unsigned long flags, 679 unsigned long prot, unsigned long flags,
640 unsigned long addr, unsigned long addr_only) 680 unsigned long addr, unsigned long addr_only)
641{ 681{
642 return security_ops->file_mmap(file, reqprot, prot, flags, addr, addr_only); 682 int ret;
683
684 ret = security_ops->file_mmap(file, reqprot, prot, flags, addr, addr_only);
685 if (ret)
686 return ret;
687 return ima_file_mmap(file, prot);
643} 688}
644 689
645int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, 690int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
@@ -719,9 +764,9 @@ int security_kernel_create_files_as(struct cred *new, struct inode *inode)
719 return security_ops->kernel_create_files_as(new, inode); 764 return security_ops->kernel_create_files_as(new, inode);
720} 765}
721 766
722int security_kernel_module_request(void) 767int security_kernel_module_request(char *kmod_name)
723{ 768{
724 return security_ops->kernel_module_request(); 769 return security_ops->kernel_module_request(kmod_name);
725} 770}
726 771
727int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags) 772int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags)
diff --git a/security/selinux/.gitignore b/security/selinux/.gitignore
new file mode 100644
index 000000000000..2e5040a3d48b
--- /dev/null
+++ b/security/selinux/.gitignore
@@ -0,0 +1,2 @@
1av_permissions.h
2flask.h
diff --git a/security/selinux/Makefile b/security/selinux/Makefile
index d47fc5e545e0..f013982df417 100644
--- a/security/selinux/Makefile
+++ b/security/selinux/Makefile
@@ -18,5 +18,13 @@ selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o
18 18
19selinux-$(CONFIG_NETLABEL) += netlabel.o 19selinux-$(CONFIG_NETLABEL) += netlabel.o
20 20
21EXTRA_CFLAGS += -Isecurity/selinux/include 21EXTRA_CFLAGS += -Isecurity/selinux -Isecurity/selinux/include
22 22
23$(obj)/avc.o: $(obj)/flask.h
24
25quiet_cmd_flask = GEN $(obj)/flask.h $(obj)/av_permissions.h
26 cmd_flask = scripts/selinux/genheaders/genheaders $(obj)/flask.h $(obj)/av_permissions.h
27
28targets += flask.h
29$(obj)/flask.h: $(src)/include/classmap.h FORCE
30 $(call if_changed,flask)
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index b4b5da1c0a42..f2dde268165a 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -31,43 +31,7 @@
31#include <net/ipv6.h> 31#include <net/ipv6.h>
32#include "avc.h" 32#include "avc.h"
33#include "avc_ss.h" 33#include "avc_ss.h"
34 34#include "classmap.h"
35static const struct av_perm_to_string av_perm_to_string[] = {
36#define S_(c, v, s) { c, v, s },
37#include "av_perm_to_string.h"
38#undef S_
39};
40
41static const char *class_to_string[] = {
42#define S_(s) s,
43#include "class_to_string.h"
44#undef S_
45};
46
47#define TB_(s) static const char *s[] = {
48#define TE_(s) };
49#define S_(s) s,
50#include "common_perm_to_string.h"
51#undef TB_
52#undef TE_
53#undef S_
54
55static const struct av_inherit av_inherit[] = {
56#define S_(c, i, b) { .tclass = c,\
57 .common_pts = common_##i##_perm_to_string,\
58 .common_base = b },
59#include "av_inherit.h"
60#undef S_
61};
62
63const struct selinux_class_perm selinux_class_perm = {
64 .av_perm_to_string = av_perm_to_string,
65 .av_pts_len = ARRAY_SIZE(av_perm_to_string),
66 .class_to_string = class_to_string,
67 .cts_len = ARRAY_SIZE(class_to_string),
68 .av_inherit = av_inherit,
69 .av_inherit_len = ARRAY_SIZE(av_inherit)
70};
71 35
72#define AVC_CACHE_SLOTS 512 36#define AVC_CACHE_SLOTS 512
73#define AVC_DEF_CACHE_THRESHOLD 512 37#define AVC_DEF_CACHE_THRESHOLD 512
@@ -139,52 +103,28 @@ static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
139 */ 103 */
140static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av) 104static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av)
141{ 105{
142 const char **common_pts = NULL; 106 const char **perms;
143 u32 common_base = 0; 107 int i, perm;
144 int i, i2, perm;
145 108
146 if (av == 0) { 109 if (av == 0) {
147 audit_log_format(ab, " null"); 110 audit_log_format(ab, " null");
148 return; 111 return;
149 } 112 }
150 113
151 for (i = 0; i < ARRAY_SIZE(av_inherit); i++) { 114 perms = secclass_map[tclass-1].perms;
152 if (av_inherit[i].tclass == tclass) {
153 common_pts = av_inherit[i].common_pts;
154 common_base = av_inherit[i].common_base;
155 break;
156 }
157 }
158 115
159 audit_log_format(ab, " {"); 116 audit_log_format(ab, " {");
160 i = 0; 117 i = 0;
161 perm = 1; 118 perm = 1;
162 while (perm < common_base) { 119 while (i < (sizeof(av) * 8)) {
163 if (perm & av) { 120 if ((perm & av) && perms[i]) {
164 audit_log_format(ab, " %s", common_pts[i]); 121 audit_log_format(ab, " %s", perms[i]);
165 av &= ~perm; 122 av &= ~perm;
166 } 123 }
167 i++; 124 i++;
168 perm <<= 1; 125 perm <<= 1;
169 } 126 }
170 127
171 while (i < sizeof(av) * 8) {
172 if (perm & av) {
173 for (i2 = 0; i2 < ARRAY_SIZE(av_perm_to_string); i2++) {
174 if ((av_perm_to_string[i2].tclass == tclass) &&
175 (av_perm_to_string[i2].value == perm))
176 break;
177 }
178 if (i2 < ARRAY_SIZE(av_perm_to_string)) {
179 audit_log_format(ab, " %s",
180 av_perm_to_string[i2].name);
181 av &= ~perm;
182 }
183 }
184 i++;
185 perm <<= 1;
186 }
187
188 if (av) 128 if (av)
189 audit_log_format(ab, " 0x%x", av); 129 audit_log_format(ab, " 0x%x", av);
190 130
@@ -219,8 +159,8 @@ static void avc_dump_query(struct audit_buffer *ab, u32 ssid, u32 tsid, u16 tcla
219 kfree(scontext); 159 kfree(scontext);
220 } 160 }
221 161
222 BUG_ON(tclass >= ARRAY_SIZE(class_to_string) || !class_to_string[tclass]); 162 BUG_ON(tclass >= ARRAY_SIZE(secclass_map));
223 audit_log_format(ab, " tclass=%s", class_to_string[tclass]); 163 audit_log_format(ab, " tclass=%s", secclass_map[tclass-1].name);
224} 164}
225 165
226/** 166/**
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index bb230d5d7085..c96d63ec4753 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -91,7 +91,6 @@
91 91
92#define NUM_SEL_MNT_OPTS 5 92#define NUM_SEL_MNT_OPTS 5
93 93
94extern unsigned int policydb_loaded_version;
95extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm); 94extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
96extern struct security_operations *security_ops; 95extern struct security_operations *security_ops;
97 96
@@ -3338,9 +3337,18 @@ static int selinux_kernel_create_files_as(struct cred *new, struct inode *inode)
3338 return 0; 3337 return 0;
3339} 3338}
3340 3339
3341static int selinux_kernel_module_request(void) 3340static int selinux_kernel_module_request(char *kmod_name)
3342{ 3341{
3343 return task_has_system(current, SYSTEM__MODULE_REQUEST); 3342 u32 sid;
3343 struct common_audit_data ad;
3344
3345 sid = task_sid(current);
3346
3347 COMMON_AUDIT_DATA_INIT(&ad, KMOD);
3348 ad.u.kmod_name = kmod_name;
3349
3350 return avc_has_perm(sid, SECINITSID_KERNEL, SECCLASS_SYSTEM,
3351 SYSTEM__MODULE_REQUEST, &ad);
3344} 3352}
3345 3353
3346static int selinux_task_setpgid(struct task_struct *p, pid_t pgid) 3354static int selinux_task_setpgid(struct task_struct *p, pid_t pgid)
@@ -4714,10 +4722,7 @@ static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
4714 if (err) 4722 if (err)
4715 return err; 4723 return err;
4716 4724
4717 if (policydb_loaded_version >= POLICYDB_VERSION_NLCLASS) 4725 return selinux_nlmsg_perm(sk, skb);
4718 err = selinux_nlmsg_perm(sk, skb);
4719
4720 return err;
4721} 4726}
4722 4727
4723static int selinux_netlink_recv(struct sk_buff *skb, int capability) 4728static int selinux_netlink_recv(struct sk_buff *skb, int capability)
@@ -5830,12 +5835,12 @@ int selinux_disable(void)
5830 selinux_disabled = 1; 5835 selinux_disabled = 1;
5831 selinux_enabled = 0; 5836 selinux_enabled = 0;
5832 5837
5833 /* Try to destroy the avc node cache */
5834 avc_disable();
5835
5836 /* Reset security_ops to the secondary module, dummy or capability. */ 5838 /* Reset security_ops to the secondary module, dummy or capability. */
5837 security_ops = secondary_ops; 5839 security_ops = secondary_ops;
5838 5840
5841 /* Try to destroy the avc node cache */
5842 avc_disable();
5843
5839 /* Unregister netfilter hooks. */ 5844 /* Unregister netfilter hooks. */
5840 selinux_nf_ip_exit(); 5845 selinux_nf_ip_exit();
5841 5846
diff --git a/security/selinux/include/av_inherit.h b/security/selinux/include/av_inherit.h
deleted file mode 100644
index abedcd704dae..000000000000
--- a/security/selinux/include/av_inherit.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/* This file is automatically generated. Do not edit. */
2 S_(SECCLASS_DIR, file, 0x00020000UL)
3 S_(SECCLASS_FILE, file, 0x00020000UL)
4 S_(SECCLASS_LNK_FILE, file, 0x00020000UL)
5 S_(SECCLASS_CHR_FILE, file, 0x00020000UL)
6 S_(SECCLASS_BLK_FILE, file, 0x00020000UL)
7 S_(SECCLASS_SOCK_FILE, file, 0x00020000UL)
8 S_(SECCLASS_FIFO_FILE, file, 0x00020000UL)
9 S_(SECCLASS_SOCKET, socket, 0x00400000UL)
10 S_(SECCLASS_TCP_SOCKET, socket, 0x00400000UL)
11 S_(SECCLASS_UDP_SOCKET, socket, 0x00400000UL)
12 S_(SECCLASS_RAWIP_SOCKET, socket, 0x00400000UL)
13 S_(SECCLASS_NETLINK_SOCKET, socket, 0x00400000UL)
14 S_(SECCLASS_PACKET_SOCKET, socket, 0x00400000UL)
15 S_(SECCLASS_KEY_SOCKET, socket, 0x00400000UL)
16 S_(SECCLASS_UNIX_STREAM_SOCKET, socket, 0x00400000UL)
17 S_(SECCLASS_UNIX_DGRAM_SOCKET, socket, 0x00400000UL)
18 S_(SECCLASS_TUN_SOCKET, socket, 0x00400000UL)
19 S_(SECCLASS_IPC, ipc, 0x00000200UL)
20 S_(SECCLASS_SEM, ipc, 0x00000200UL)
21 S_(SECCLASS_MSGQ, ipc, 0x00000200UL)
22 S_(SECCLASS_SHM, ipc, 0x00000200UL)
23 S_(SECCLASS_NETLINK_ROUTE_SOCKET, socket, 0x00400000UL)
24 S_(SECCLASS_NETLINK_FIREWALL_SOCKET, socket, 0x00400000UL)
25 S_(SECCLASS_NETLINK_TCPDIAG_SOCKET, socket, 0x00400000UL)
26 S_(SECCLASS_NETLINK_NFLOG_SOCKET, socket, 0x00400000UL)
27 S_(SECCLASS_NETLINK_XFRM_SOCKET, socket, 0x00400000UL)
28 S_(SECCLASS_NETLINK_SELINUX_SOCKET, socket, 0x00400000UL)
29 S_(SECCLASS_NETLINK_AUDIT_SOCKET, socket, 0x00400000UL)
30 S_(SECCLASS_NETLINK_IP6FW_SOCKET, socket, 0x00400000UL)
31 S_(SECCLASS_NETLINK_DNRT_SOCKET, socket, 0x00400000UL)
32 S_(SECCLASS_NETLINK_KOBJECT_UEVENT_SOCKET, socket, 0x00400000UL)
33 S_(SECCLASS_APPLETALK_SOCKET, socket, 0x00400000UL)
34 S_(SECCLASS_DCCP_SOCKET, socket, 0x00400000UL)
diff --git a/security/selinux/include/av_perm_to_string.h b/security/selinux/include/av_perm_to_string.h
deleted file mode 100644
index 2b683ad83d21..000000000000
--- a/security/selinux/include/av_perm_to_string.h
+++ /dev/null
@@ -1,183 +0,0 @@
1/* This file is automatically generated. Do not edit. */
2 S_(SECCLASS_FILESYSTEM, FILESYSTEM__MOUNT, "mount")
3 S_(SECCLASS_FILESYSTEM, FILESYSTEM__REMOUNT, "remount")
4 S_(SECCLASS_FILESYSTEM, FILESYSTEM__UNMOUNT, "unmount")
5 S_(SECCLASS_FILESYSTEM, FILESYSTEM__GETATTR, "getattr")
6 S_(SECCLASS_FILESYSTEM, FILESYSTEM__RELABELFROM, "relabelfrom")
7 S_(SECCLASS_FILESYSTEM, FILESYSTEM__RELABELTO, "relabelto")
8 S_(SECCLASS_FILESYSTEM, FILESYSTEM__TRANSITION, "transition")
9 S_(SECCLASS_FILESYSTEM, FILESYSTEM__ASSOCIATE, "associate")
10 S_(SECCLASS_FILESYSTEM, FILESYSTEM__QUOTAMOD, "quotamod")
11 S_(SECCLASS_FILESYSTEM, FILESYSTEM__QUOTAGET, "quotaget")
12 S_(SECCLASS_DIR, DIR__ADD_NAME, "add_name")
13 S_(SECCLASS_DIR, DIR__REMOVE_NAME, "remove_name")
14 S_(SECCLASS_DIR, DIR__REPARENT, "reparent")
15 S_(SECCLASS_DIR, DIR__SEARCH, "search")
16 S_(SECCLASS_DIR, DIR__RMDIR, "rmdir")
17 S_(SECCLASS_DIR, DIR__OPEN, "open")
18 S_(SECCLASS_FILE, FILE__EXECUTE_NO_TRANS, "execute_no_trans")
19 S_(SECCLASS_FILE, FILE__ENTRYPOINT, "entrypoint")
20 S_(SECCLASS_FILE, FILE__EXECMOD, "execmod")
21 S_(SECCLASS_FILE, FILE__OPEN, "open")
22 S_(SECCLASS_CHR_FILE, CHR_FILE__EXECUTE_NO_TRANS, "execute_no_trans")
23 S_(SECCLASS_CHR_FILE, CHR_FILE__ENTRYPOINT, "entrypoint")
24 S_(SECCLASS_CHR_FILE, CHR_FILE__EXECMOD, "execmod")
25 S_(SECCLASS_CHR_FILE, CHR_FILE__OPEN, "open")
26 S_(SECCLASS_BLK_FILE, BLK_FILE__OPEN, "open")
27 S_(SECCLASS_SOCK_FILE, SOCK_FILE__OPEN, "open")
28 S_(SECCLASS_FIFO_FILE, FIFO_FILE__OPEN, "open")
29 S_(SECCLASS_FD, FD__USE, "use")
30 S_(SECCLASS_TCP_SOCKET, TCP_SOCKET__CONNECTTO, "connectto")
31 S_(SECCLASS_TCP_SOCKET, TCP_SOCKET__NEWCONN, "newconn")
32 S_(SECCLASS_TCP_SOCKET, TCP_SOCKET__ACCEPTFROM, "acceptfrom")
33 S_(SECCLASS_TCP_SOCKET, TCP_SOCKET__NODE_BIND, "node_bind")
34 S_(SECCLASS_TCP_SOCKET, TCP_SOCKET__NAME_CONNECT, "name_connect")
35 S_(SECCLASS_UDP_SOCKET, UDP_SOCKET__NODE_BIND, "node_bind")
36 S_(SECCLASS_RAWIP_SOCKET, RAWIP_SOCKET__NODE_BIND, "node_bind")
37 S_(SECCLASS_NODE, NODE__TCP_RECV, "tcp_recv")
38 S_(SECCLASS_NODE, NODE__TCP_SEND, "tcp_send")
39 S_(SECCLASS_NODE, NODE__UDP_RECV, "udp_recv")
40 S_(SECCLASS_NODE, NODE__UDP_SEND, "udp_send")
41 S_(SECCLASS_NODE, NODE__RAWIP_RECV, "rawip_recv")
42 S_(SECCLASS_NODE, NODE__RAWIP_SEND, "rawip_send")
43 S_(SECCLASS_NODE, NODE__ENFORCE_DEST, "enforce_dest")
44 S_(SECCLASS_NODE, NODE__DCCP_RECV, "dccp_recv")
45 S_(SECCLASS_NODE, NODE__DCCP_SEND, "dccp_send")
46 S_(SECCLASS_NODE, NODE__RECVFROM, "recvfrom")
47 S_(SECCLASS_NODE, NODE__SENDTO, "sendto")
48 S_(SECCLASS_NETIF, NETIF__TCP_RECV, "tcp_recv")
49 S_(SECCLASS_NETIF, NETIF__TCP_SEND, "tcp_send")
50 S_(SECCLASS_NETIF, NETIF__UDP_RECV, "udp_recv")
51 S_(SECCLASS_NETIF, NETIF__UDP_SEND, "udp_send")
52 S_(SECCLASS_NETIF, NETIF__RAWIP_RECV, "rawip_recv")
53 S_(SECCLASS_NETIF, NETIF__RAWIP_SEND, "rawip_send")
54 S_(SECCLASS_NETIF, NETIF__DCCP_RECV, "dccp_recv")
55 S_(SECCLASS_NETIF, NETIF__DCCP_SEND, "dccp_send")
56 S_(SECCLASS_NETIF, NETIF__INGRESS, "ingress")
57 S_(SECCLASS_NETIF, NETIF__EGRESS, "egress")
58 S_(SECCLASS_UNIX_STREAM_SOCKET, UNIX_STREAM_SOCKET__CONNECTTO, "connectto")
59 S_(SECCLASS_UNIX_STREAM_SOCKET, UNIX_STREAM_SOCKET__NEWCONN, "newconn")
60 S_(SECCLASS_UNIX_STREAM_SOCKET, UNIX_STREAM_SOCKET__ACCEPTFROM, "acceptfrom")
61 S_(SECCLASS_PROCESS, PROCESS__FORK, "fork")
62 S_(SECCLASS_PROCESS, PROCESS__TRANSITION, "transition")
63 S_(SECCLASS_PROCESS, PROCESS__SIGCHLD, "sigchld")
64 S_(SECCLASS_PROCESS, PROCESS__SIGKILL, "sigkill")
65 S_(SECCLASS_PROCESS, PROCESS__SIGSTOP, "sigstop")
66 S_(SECCLASS_PROCESS, PROCESS__SIGNULL, "signull")
67 S_(SECCLASS_PROCESS, PROCESS__SIGNAL, "signal")
68 S_(SECCLASS_PROCESS, PROCESS__PTRACE, "ptrace")
69 S_(SECCLASS_PROCESS, PROCESS__GETSCHED, "getsched")
70 S_(SECCLASS_PROCESS, PROCESS__SETSCHED, "setsched")
71 S_(SECCLASS_PROCESS, PROCESS__GETSESSION, "getsession")
72 S_(SECCLASS_PROCESS, PROCESS__GETPGID, "getpgid")
73 S_(SECCLASS_PROCESS, PROCESS__SETPGID, "setpgid")
74 S_(SECCLASS_PROCESS, PROCESS__GETCAP, "getcap")
75 S_(SECCLASS_PROCESS, PROCESS__SETCAP, "setcap")
76 S_(SECCLASS_PROCESS, PROCESS__SHARE, "share")
77 S_(SECCLASS_PROCESS, PROCESS__GETATTR, "getattr")
78 S_(SECCLASS_PROCESS, PROCESS__SETEXEC, "setexec")
79 S_(SECCLASS_PROCESS, PROCESS__SETFSCREATE, "setfscreate")
80 S_(SECCLASS_PROCESS, PROCESS__NOATSECURE, "noatsecure")
81 S_(SECCLASS_PROCESS, PROCESS__SIGINH, "siginh")
82 S_(SECCLASS_PROCESS, PROCESS__SETRLIMIT, "setrlimit")
83 S_(SECCLASS_PROCESS, PROCESS__RLIMITINH, "rlimitinh")
84 S_(SECCLASS_PROCESS, PROCESS__DYNTRANSITION, "dyntransition")
85 S_(SECCLASS_PROCESS, PROCESS__SETCURRENT, "setcurrent")
86 S_(SECCLASS_PROCESS, PROCESS__EXECMEM, "execmem")
87 S_(SECCLASS_PROCESS, PROCESS__EXECSTACK, "execstack")
88 S_(SECCLASS_PROCESS, PROCESS__EXECHEAP, "execheap")
89 S_(SECCLASS_PROCESS, PROCESS__SETKEYCREATE, "setkeycreate")
90 S_(SECCLASS_PROCESS, PROCESS__SETSOCKCREATE, "setsockcreate")
91 S_(SECCLASS_MSGQ, MSGQ__ENQUEUE, "enqueue")
92 S_(SECCLASS_MSG, MSG__SEND, "send")
93 S_(SECCLASS_MSG, MSG__RECEIVE, "receive")
94 S_(SECCLASS_SHM, SHM__LOCK, "lock")
95 S_(SECCLASS_SECURITY, SECURITY__COMPUTE_AV, "compute_av")
96 S_(SECCLASS_SECURITY, SECURITY__COMPUTE_CREATE, "compute_create")
97 S_(SECCLASS_SECURITY, SECURITY__COMPUTE_MEMBER, "compute_member")
98 S_(SECCLASS_SECURITY, SECURITY__CHECK_CONTEXT, "check_context")
99 S_(SECCLASS_SECURITY, SECURITY__LOAD_POLICY, "load_policy")
100 S_(SECCLASS_SECURITY, SECURITY__COMPUTE_RELABEL, "compute_relabel")
101 S_(SECCLASS_SECURITY, SECURITY__COMPUTE_USER, "compute_user")
102 S_(SECCLASS_SECURITY, SECURITY__SETENFORCE, "setenforce")
103 S_(SECCLASS_SECURITY, SECURITY__SETBOOL, "setbool")
104 S_(SECCLASS_SECURITY, SECURITY__SETSECPARAM, "setsecparam")
105 S_(SECCLASS_SECURITY, SECURITY__SETCHECKREQPROT, "setcheckreqprot")
106 S_(SECCLASS_SYSTEM, SYSTEM__IPC_INFO, "ipc_info")
107 S_(SECCLASS_SYSTEM, SYSTEM__SYSLOG_READ, "syslog_read")
108 S_(SECCLASS_SYSTEM, SYSTEM__SYSLOG_MOD, "syslog_mod")
109 S_(SECCLASS_SYSTEM, SYSTEM__SYSLOG_CONSOLE, "syslog_console")
110 S_(SECCLASS_SYSTEM, SYSTEM__MODULE_REQUEST, "module_request")
111 S_(SECCLASS_CAPABILITY, CAPABILITY__CHOWN, "chown")
112 S_(SECCLASS_CAPABILITY, CAPABILITY__DAC_OVERRIDE, "dac_override")
113 S_(SECCLASS_CAPABILITY, CAPABILITY__DAC_READ_SEARCH, "dac_read_search")
114 S_(SECCLASS_CAPABILITY, CAPABILITY__FOWNER, "fowner")
115 S_(SECCLASS_CAPABILITY, CAPABILITY__FSETID, "fsetid")
116 S_(SECCLASS_CAPABILITY, CAPABILITY__KILL, "kill")
117 S_(SECCLASS_CAPABILITY, CAPABILITY__SETGID, "setgid")
118 S_(SECCLASS_CAPABILITY, CAPABILITY__SETUID, "setuid")
119 S_(SECCLASS_CAPABILITY, CAPABILITY__SETPCAP, "setpcap")
120 S_(SECCLASS_CAPABILITY, CAPABILITY__LINUX_IMMUTABLE, "linux_immutable")
121 S_(SECCLASS_CAPABILITY, CAPABILITY__NET_BIND_SERVICE, "net_bind_service")
122 S_(SECCLASS_CAPABILITY, CAPABILITY__NET_BROADCAST, "net_broadcast")
123 S_(SECCLASS_CAPABILITY, CAPABILITY__NET_ADMIN, "net_admin")
124 S_(SECCLASS_CAPABILITY, CAPABILITY__NET_RAW, "net_raw")
125 S_(SECCLASS_CAPABILITY, CAPABILITY__IPC_LOCK, "ipc_lock")
126 S_(SECCLASS_CAPABILITY, CAPABILITY__IPC_OWNER, "ipc_owner")
127 S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_MODULE, "sys_module")
128 S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_RAWIO, "sys_rawio")
129 S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_CHROOT, "sys_chroot")
130 S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_PTRACE, "sys_ptrace")
131 S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_PACCT, "sys_pacct")
132 S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_ADMIN, "sys_admin")
133 S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_BOOT, "sys_boot")
134 S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_NICE, "sys_nice")
135 S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_RESOURCE, "sys_resource")
136 S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_TIME, "sys_time")
137 S_(SECCLASS_CAPABILITY, CAPABILITY__SYS_TTY_CONFIG, "sys_tty_config")
138 S_(SECCLASS_CAPABILITY, CAPABILITY__MKNOD, "mknod")
139 S_(SECCLASS_CAPABILITY, CAPABILITY__LEASE, "lease")
140 S_(SECCLASS_CAPABILITY, CAPABILITY__AUDIT_WRITE, "audit_write")
141 S_(SECCLASS_CAPABILITY, CAPABILITY__AUDIT_CONTROL, "audit_control")
142 S_(SECCLASS_CAPABILITY, CAPABILITY__SETFCAP, "setfcap")
143 S_(SECCLASS_CAPABILITY2, CAPABILITY2__MAC_OVERRIDE, "mac_override")
144 S_(SECCLASS_CAPABILITY2, CAPABILITY2__MAC_ADMIN, "mac_admin")
145 S_(SECCLASS_NETLINK_ROUTE_SOCKET, NETLINK_ROUTE_SOCKET__NLMSG_READ, "nlmsg_read")
146 S_(SECCLASS_NETLINK_ROUTE_SOCKET, NETLINK_ROUTE_SOCKET__NLMSG_WRITE, "nlmsg_write")
147 S_(SECCLASS_NETLINK_FIREWALL_SOCKET, NETLINK_FIREWALL_SOCKET__NLMSG_READ, "nlmsg_read")
148 S_(SECCLASS_NETLINK_FIREWALL_SOCKET, NETLINK_FIREWALL_SOCKET__NLMSG_WRITE, "nlmsg_write")
149 S_(SECCLASS_NETLINK_TCPDIAG_SOCKET, NETLINK_TCPDIAG_SOCKET__NLMSG_READ, "nlmsg_read")
150 S_(SECCLASS_NETLINK_TCPDIAG_SOCKET, NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE, "nlmsg_write")
151 S_(SECCLASS_NETLINK_XFRM_SOCKET, NETLINK_XFRM_SOCKET__NLMSG_READ, "nlmsg_read")
152 S_(SECCLASS_NETLINK_XFRM_SOCKET, NETLINK_XFRM_SOCKET__NLMSG_WRITE, "nlmsg_write")
153 S_(SECCLASS_NETLINK_AUDIT_SOCKET, NETLINK_AUDIT_SOCKET__NLMSG_READ, "nlmsg_read")
154 S_(SECCLASS_NETLINK_AUDIT_SOCKET, NETLINK_AUDIT_SOCKET__NLMSG_WRITE, "nlmsg_write")
155 S_(SECCLASS_NETLINK_AUDIT_SOCKET, NETLINK_AUDIT_SOCKET__NLMSG_RELAY, "nlmsg_relay")
156 S_(SECCLASS_NETLINK_AUDIT_SOCKET, NETLINK_AUDIT_SOCKET__NLMSG_READPRIV, "nlmsg_readpriv")
157 S_(SECCLASS_NETLINK_AUDIT_SOCKET, NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT, "nlmsg_tty_audit")
158 S_(SECCLASS_NETLINK_IP6FW_SOCKET, NETLINK_IP6FW_SOCKET__NLMSG_READ, "nlmsg_read")
159 S_(SECCLASS_NETLINK_IP6FW_SOCKET, NETLINK_IP6FW_SOCKET__NLMSG_WRITE, "nlmsg_write")
160 S_(SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, "sendto")
161 S_(SECCLASS_ASSOCIATION, ASSOCIATION__RECVFROM, "recvfrom")
162 S_(SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, "setcontext")
163 S_(SECCLASS_ASSOCIATION, ASSOCIATION__POLMATCH, "polmatch")
164 S_(SECCLASS_PACKET, PACKET__SEND, "send")
165 S_(SECCLASS_PACKET, PACKET__RECV, "recv")
166 S_(SECCLASS_PACKET, PACKET__RELABELTO, "relabelto")
167 S_(SECCLASS_PACKET, PACKET__FLOW_IN, "flow_in")
168 S_(SECCLASS_PACKET, PACKET__FLOW_OUT, "flow_out")
169 S_(SECCLASS_PACKET, PACKET__FORWARD_IN, "forward_in")
170 S_(SECCLASS_PACKET, PACKET__FORWARD_OUT, "forward_out")
171 S_(SECCLASS_KEY, KEY__VIEW, "view")
172 S_(SECCLASS_KEY, KEY__READ, "read")
173 S_(SECCLASS_KEY, KEY__WRITE, "write")
174 S_(SECCLASS_KEY, KEY__SEARCH, "search")
175 S_(SECCLASS_KEY, KEY__LINK, "link")
176 S_(SECCLASS_KEY, KEY__SETATTR, "setattr")
177 S_(SECCLASS_KEY, KEY__CREATE, "create")
178 S_(SECCLASS_DCCP_SOCKET, DCCP_SOCKET__NODE_BIND, "node_bind")
179 S_(SECCLASS_DCCP_SOCKET, DCCP_SOCKET__NAME_CONNECT, "name_connect")
180 S_(SECCLASS_MEMPROTECT, MEMPROTECT__MMAP_ZERO, "mmap_zero")
181 S_(SECCLASS_PEER, PEER__RECV, "recv")
182 S_(SECCLASS_KERNEL_SERVICE, KERNEL_SERVICE__USE_AS_OVERRIDE, "use_as_override")
183 S_(SECCLASS_KERNEL_SERVICE, KERNEL_SERVICE__CREATE_FILES_AS, "create_files_as")
diff --git a/security/selinux/include/av_permissions.h b/security/selinux/include/av_permissions.h
deleted file mode 100644
index 0546d616ccac..000000000000
--- a/security/selinux/include/av_permissions.h
+++ /dev/null
@@ -1,870 +0,0 @@
1/* This file is automatically generated. Do not edit. */
2#define COMMON_FILE__IOCTL 0x00000001UL
3#define COMMON_FILE__READ 0x00000002UL
4#define COMMON_FILE__WRITE 0x00000004UL
5#define COMMON_FILE__CREATE 0x00000008UL
6#define COMMON_FILE__GETATTR 0x00000010UL
7#define COMMON_FILE__SETATTR 0x00000020UL
8#define COMMON_FILE__LOCK 0x00000040UL
9#define COMMON_FILE__RELABELFROM 0x00000080UL
10#define COMMON_FILE__RELABELTO 0x00000100UL
11#define COMMON_FILE__APPEND 0x00000200UL
12#define COMMON_FILE__UNLINK 0x00000400UL
13#define COMMON_FILE__LINK 0x00000800UL
14#define COMMON_FILE__RENAME 0x00001000UL
15#define COMMON_FILE__EXECUTE 0x00002000UL
16#define COMMON_FILE__SWAPON 0x00004000UL
17#define COMMON_FILE__QUOTAON 0x00008000UL
18#define COMMON_FILE__MOUNTON 0x00010000UL
19#define COMMON_SOCKET__IOCTL 0x00000001UL
20#define COMMON_SOCKET__READ 0x00000002UL
21#define COMMON_SOCKET__WRITE 0x00000004UL
22#define COMMON_SOCKET__CREATE 0x00000008UL
23#define COMMON_SOCKET__GETATTR 0x00000010UL
24#define COMMON_SOCKET__SETATTR 0x00000020UL
25#define COMMON_SOCKET__LOCK 0x00000040UL
26#define COMMON_SOCKET__RELABELFROM 0x00000080UL
27#define COMMON_SOCKET__RELABELTO 0x00000100UL
28#define COMMON_SOCKET__APPEND 0x00000200UL
29#define COMMON_SOCKET__BIND 0x00000400UL
30#define COMMON_SOCKET__CONNECT 0x00000800UL
31#define COMMON_SOCKET__LISTEN 0x00001000UL
32#define COMMON_SOCKET__ACCEPT 0x00002000UL
33#define COMMON_SOCKET__GETOPT 0x00004000UL
34#define COMMON_SOCKET__SETOPT 0x00008000UL
35#define COMMON_SOCKET__SHUTDOWN 0x00010000UL
36#define COMMON_SOCKET__RECVFROM 0x00020000UL
37#define COMMON_SOCKET__SENDTO 0x00040000UL
38#define COMMON_SOCKET__RECV_MSG 0x00080000UL
39#define COMMON_SOCKET__SEND_MSG 0x00100000UL
40#define COMMON_SOCKET__NAME_BIND 0x00200000UL
41#define COMMON_IPC__CREATE 0x00000001UL
42#define COMMON_IPC__DESTROY 0x00000002UL
43#define COMMON_IPC__GETATTR 0x00000004UL
44#define COMMON_IPC__SETATTR 0x00000008UL
45#define COMMON_IPC__READ 0x00000010UL
46#define COMMON_IPC__WRITE 0x00000020UL
47#define COMMON_IPC__ASSOCIATE 0x00000040UL
48#define COMMON_IPC__UNIX_READ 0x00000080UL
49#define COMMON_IPC__UNIX_WRITE 0x00000100UL
50#define FILESYSTEM__MOUNT 0x00000001UL
51#define FILESYSTEM__REMOUNT 0x00000002UL
52#define FILESYSTEM__UNMOUNT 0x00000004UL
53#define FILESYSTEM__GETATTR 0x00000008UL
54#define FILESYSTEM__RELABELFROM 0x00000010UL
55#define FILESYSTEM__RELABELTO 0x00000020UL
56#define FILESYSTEM__TRANSITION 0x00000040UL
57#define FILESYSTEM__ASSOCIATE 0x00000080UL
58#define FILESYSTEM__QUOTAMOD 0x00000100UL
59#define FILESYSTEM__QUOTAGET 0x00000200UL
60#define DIR__IOCTL 0x00000001UL
61#define DIR__READ 0x00000002UL
62#define DIR__WRITE 0x00000004UL
63#define DIR__CREATE 0x00000008UL
64#define DIR__GETATTR 0x00000010UL
65#define DIR__SETATTR 0x00000020UL
66#define DIR__LOCK 0x00000040UL
67#define DIR__RELABELFROM 0x00000080UL
68#define DIR__RELABELTO 0x00000100UL
69#define DIR__APPEND 0x00000200UL
70#define DIR__UNLINK 0x00000400UL
71#define DIR__LINK 0x00000800UL
72#define DIR__RENAME 0x00001000UL
73#define DIR__EXECUTE 0x00002000UL
74#define DIR__SWAPON 0x00004000UL
75#define DIR__QUOTAON 0x00008000UL
76#define DIR__MOUNTON 0x00010000UL
77#define DIR__ADD_NAME 0x00020000UL
78#define DIR__REMOVE_NAME 0x00040000UL
79#define DIR__REPARENT 0x00080000UL
80#define DIR__SEARCH 0x00100000UL
81#define DIR__RMDIR 0x00200000UL
82#define DIR__OPEN 0x00400000UL
83#define FILE__IOCTL 0x00000001UL
84#define FILE__READ 0x00000002UL
85#define FILE__WRITE 0x00000004UL
86#define FILE__CREATE 0x00000008UL
87#define FILE__GETATTR 0x00000010UL
88#define FILE__SETATTR 0x00000020UL
89#define FILE__LOCK 0x00000040UL
90#define FILE__RELABELFROM 0x00000080UL
91#define FILE__RELABELTO 0x00000100UL
92#define FILE__APPEND 0x00000200UL
93#define FILE__UNLINK 0x00000400UL
94#define FILE__LINK 0x00000800UL
95#define FILE__RENAME 0x00001000UL
96#define FILE__EXECUTE 0x00002000UL
97#define FILE__SWAPON 0x00004000UL
98#define FILE__QUOTAON 0x00008000UL
99#define FILE__MOUNTON 0x00010000UL
100#define FILE__EXECUTE_NO_TRANS 0x00020000UL
101#define FILE__ENTRYPOINT 0x00040000UL
102#define FILE__EXECMOD 0x00080000UL
103#define FILE__OPEN 0x00100000UL
104#define LNK_FILE__IOCTL 0x00000001UL
105#define LNK_FILE__READ 0x00000002UL
106#define LNK_FILE__WRITE 0x00000004UL
107#define LNK_FILE__CREATE 0x00000008UL
108#define LNK_FILE__GETATTR 0x00000010UL
109#define LNK_FILE__SETATTR 0x00000020UL
110#define LNK_FILE__LOCK 0x00000040UL
111#define LNK_FILE__RELABELFROM 0x00000080UL
112#define LNK_FILE__RELABELTO 0x00000100UL
113#define LNK_FILE__APPEND 0x00000200UL
114#define LNK_FILE__UNLINK 0x00000400UL
115#define LNK_FILE__LINK 0x00000800UL
116#define LNK_FILE__RENAME 0x00001000UL
117#define LNK_FILE__EXECUTE 0x00002000UL
118#define LNK_FILE__SWAPON 0x00004000UL
119#define LNK_FILE__QUOTAON 0x00008000UL
120#define LNK_FILE__MOUNTON 0x00010000UL
121#define CHR_FILE__IOCTL 0x00000001UL
122#define CHR_FILE__READ 0x00000002UL
123#define CHR_FILE__WRITE 0x00000004UL
124#define CHR_FILE__CREATE 0x00000008UL
125#define CHR_FILE__GETATTR 0x00000010UL
126#define CHR_FILE__SETATTR 0x00000020UL
127#define CHR_FILE__LOCK 0x00000040UL
128#define CHR_FILE__RELABELFROM 0x00000080UL
129#define CHR_FILE__RELABELTO 0x00000100UL
130#define CHR_FILE__APPEND 0x00000200UL
131#define CHR_FILE__UNLINK 0x00000400UL
132#define CHR_FILE__LINK 0x00000800UL
133#define CHR_FILE__RENAME 0x00001000UL
134#define CHR_FILE__EXECUTE 0x00002000UL
135#define CHR_FILE__SWAPON 0x00004000UL
136#define CHR_FILE__QUOTAON 0x00008000UL
137#define CHR_FILE__MOUNTON 0x00010000UL
138#define CHR_FILE__EXECUTE_NO_TRANS 0x00020000UL
139#define CHR_FILE__ENTRYPOINT 0x00040000UL
140#define CHR_FILE__EXECMOD 0x00080000UL
141#define CHR_FILE__OPEN 0x00100000UL
142#define BLK_FILE__IOCTL 0x00000001UL
143#define BLK_FILE__READ 0x00000002UL
144#define BLK_FILE__WRITE 0x00000004UL
145#define BLK_FILE__CREATE 0x00000008UL
146#define BLK_FILE__GETATTR 0x00000010UL
147#define BLK_FILE__SETATTR 0x00000020UL
148#define BLK_FILE__LOCK 0x00000040UL
149#define BLK_FILE__RELABELFROM 0x00000080UL
150#define BLK_FILE__RELABELTO 0x00000100UL
151#define BLK_FILE__APPEND 0x00000200UL
152#define BLK_FILE__UNLINK 0x00000400UL
153#define BLK_FILE__LINK 0x00000800UL
154#define BLK_FILE__RENAME 0x00001000UL
155#define BLK_FILE__EXECUTE 0x00002000UL
156#define BLK_FILE__SWAPON 0x00004000UL
157#define BLK_FILE__QUOTAON 0x00008000UL
158#define BLK_FILE__MOUNTON 0x00010000UL
159#define BLK_FILE__OPEN 0x00020000UL
160#define SOCK_FILE__IOCTL 0x00000001UL
161#define SOCK_FILE__READ 0x00000002UL
162#define SOCK_FILE__WRITE 0x00000004UL
163#define SOCK_FILE__CREATE 0x00000008UL
164#define SOCK_FILE__GETATTR 0x00000010UL
165#define SOCK_FILE__SETATTR 0x00000020UL
166#define SOCK_FILE__LOCK 0x00000040UL
167#define SOCK_FILE__RELABELFROM 0x00000080UL
168#define SOCK_FILE__RELABELTO 0x00000100UL
169#define SOCK_FILE__APPEND 0x00000200UL
170#define SOCK_FILE__UNLINK 0x00000400UL
171#define SOCK_FILE__LINK 0x00000800UL
172#define SOCK_FILE__RENAME 0x00001000UL
173#define SOCK_FILE__EXECUTE 0x00002000UL
174#define SOCK_FILE__SWAPON 0x00004000UL
175#define SOCK_FILE__QUOTAON 0x00008000UL
176#define SOCK_FILE__MOUNTON 0x00010000UL
177#define SOCK_FILE__OPEN 0x00020000UL
178#define FIFO_FILE__IOCTL 0x00000001UL
179#define FIFO_FILE__READ 0x00000002UL
180#define FIFO_FILE__WRITE 0x00000004UL
181#define FIFO_FILE__CREATE 0x00000008UL
182#define FIFO_FILE__GETATTR 0x00000010UL
183#define FIFO_FILE__SETATTR 0x00000020UL
184#define FIFO_FILE__LOCK 0x00000040UL
185#define FIFO_FILE__RELABELFROM 0x00000080UL
186#define FIFO_FILE__RELABELTO 0x00000100UL
187#define FIFO_FILE__APPEND 0x00000200UL
188#define FIFO_FILE__UNLINK 0x00000400UL
189#define FIFO_FILE__LINK 0x00000800UL
190#define FIFO_FILE__RENAME 0x00001000UL
191#define FIFO_FILE__EXECUTE 0x00002000UL
192#define FIFO_FILE__SWAPON 0x00004000UL
193#define FIFO_FILE__QUOTAON 0x00008000UL
194#define FIFO_FILE__MOUNTON 0x00010000UL
195#define FIFO_FILE__OPEN 0x00020000UL
196#define FD__USE 0x00000001UL
197#define SOCKET__IOCTL 0x00000001UL
198#define SOCKET__READ 0x00000002UL
199#define SOCKET__WRITE 0x00000004UL
200#define SOCKET__CREATE 0x00000008UL
201#define SOCKET__GETATTR 0x00000010UL
202#define SOCKET__SETATTR 0x00000020UL
203#define SOCKET__LOCK 0x00000040UL
204#define SOCKET__RELABELFROM 0x00000080UL
205#define SOCKET__RELABELTO 0x00000100UL
206#define SOCKET__APPEND 0x00000200UL
207#define SOCKET__BIND 0x00000400UL
208#define SOCKET__CONNECT 0x00000800UL
209#define SOCKET__LISTEN 0x00001000UL
210#define SOCKET__ACCEPT 0x00002000UL
211#define SOCKET__GETOPT 0x00004000UL
212#define SOCKET__SETOPT 0x00008000UL
213#define SOCKET__SHUTDOWN 0x00010000UL
214#define SOCKET__RECVFROM 0x00020000UL
215#define SOCKET__SENDTO 0x00040000UL
216#define SOCKET__RECV_MSG 0x00080000UL
217#define SOCKET__SEND_MSG 0x00100000UL
218#define SOCKET__NAME_BIND 0x00200000UL
219#define TCP_SOCKET__IOCTL 0x00000001UL
220#define TCP_SOCKET__READ 0x00000002UL
221#define TCP_SOCKET__WRITE 0x00000004UL
222#define TCP_SOCKET__CREATE 0x00000008UL
223#define TCP_SOCKET__GETATTR 0x00000010UL
224#define TCP_SOCKET__SETATTR 0x00000020UL
225#define TCP_SOCKET__LOCK 0x00000040UL
226#define TCP_SOCKET__RELABELFROM 0x00000080UL
227#define TCP_SOCKET__RELABELTO 0x00000100UL
228#define TCP_SOCKET__APPEND 0x00000200UL
229#define TCP_SOCKET__BIND 0x00000400UL
230#define TCP_SOCKET__CONNECT 0x00000800UL
231#define TCP_SOCKET__LISTEN 0x00001000UL
232#define TCP_SOCKET__ACCEPT 0x00002000UL
233#define TCP_SOCKET__GETOPT 0x00004000UL
234#define TCP_SOCKET__SETOPT 0x00008000UL
235#define TCP_SOCKET__SHUTDOWN 0x00010000UL
236#define TCP_SOCKET__RECVFROM 0x00020000UL
237#define TCP_SOCKET__SENDTO 0x00040000UL
238#define TCP_SOCKET__RECV_MSG 0x00080000UL
239#define TCP_SOCKET__SEND_MSG 0x00100000UL
240#define TCP_SOCKET__NAME_BIND 0x00200000UL
241#define TCP_SOCKET__CONNECTTO 0x00400000UL
242#define TCP_SOCKET__NEWCONN 0x00800000UL
243#define TCP_SOCKET__ACCEPTFROM 0x01000000UL
244#define TCP_SOCKET__NODE_BIND 0x02000000UL
245#define TCP_SOCKET__NAME_CONNECT 0x04000000UL
246#define UDP_SOCKET__IOCTL 0x00000001UL
247#define UDP_SOCKET__READ 0x00000002UL
248#define UDP_SOCKET__WRITE 0x00000004UL
249#define UDP_SOCKET__CREATE 0x00000008UL
250#define UDP_SOCKET__GETATTR 0x00000010UL
251#define UDP_SOCKET__SETATTR 0x00000020UL
252#define UDP_SOCKET__LOCK 0x00000040UL
253#define UDP_SOCKET__RELABELFROM 0x00000080UL
254#define UDP_SOCKET__RELABELTO 0x00000100UL
255#define UDP_SOCKET__APPEND 0x00000200UL
256#define UDP_SOCKET__BIND 0x00000400UL
257#define UDP_SOCKET__CONNECT 0x00000800UL
258#define UDP_SOCKET__LISTEN 0x00001000UL
259#define UDP_SOCKET__ACCEPT 0x00002000UL
260#define UDP_SOCKET__GETOPT 0x00004000UL
261#define UDP_SOCKET__SETOPT 0x00008000UL
262#define UDP_SOCKET__SHUTDOWN 0x00010000UL
263#define UDP_SOCKET__RECVFROM 0x00020000UL
264#define UDP_SOCKET__SENDTO 0x00040000UL
265#define UDP_SOCKET__RECV_MSG 0x00080000UL
266#define UDP_SOCKET__SEND_MSG 0x00100000UL
267#define UDP_SOCKET__NAME_BIND 0x00200000UL
268#define UDP_SOCKET__NODE_BIND 0x00400000UL
269#define RAWIP_SOCKET__IOCTL 0x00000001UL
270#define RAWIP_SOCKET__READ 0x00000002UL
271#define RAWIP_SOCKET__WRITE 0x00000004UL
272#define RAWIP_SOCKET__CREATE 0x00000008UL
273#define RAWIP_SOCKET__GETATTR 0x00000010UL
274#define RAWIP_SOCKET__SETATTR 0x00000020UL
275#define RAWIP_SOCKET__LOCK 0x00000040UL
276#define RAWIP_SOCKET__RELABELFROM 0x00000080UL
277#define RAWIP_SOCKET__RELABELTO 0x00000100UL
278#define RAWIP_SOCKET__APPEND 0x00000200UL
279#define RAWIP_SOCKET__BIND 0x00000400UL
280#define RAWIP_SOCKET__CONNECT 0x00000800UL
281#define RAWIP_SOCKET__LISTEN 0x00001000UL
282#define RAWIP_SOCKET__ACCEPT 0x00002000UL
283#define RAWIP_SOCKET__GETOPT 0x00004000UL
284#define RAWIP_SOCKET__SETOPT 0x00008000UL
285#define RAWIP_SOCKET__SHUTDOWN 0x00010000UL
286#define RAWIP_SOCKET__RECVFROM 0x00020000UL
287#define RAWIP_SOCKET__SENDTO 0x00040000UL
288#define RAWIP_SOCKET__RECV_MSG 0x00080000UL
289#define RAWIP_SOCKET__SEND_MSG 0x00100000UL
290#define RAWIP_SOCKET__NAME_BIND 0x00200000UL
291#define RAWIP_SOCKET__NODE_BIND 0x00400000UL
292#define NODE__TCP_RECV 0x00000001UL
293#define NODE__TCP_SEND 0x00000002UL
294#define NODE__UDP_RECV 0x00000004UL
295#define NODE__UDP_SEND 0x00000008UL
296#define NODE__RAWIP_RECV 0x00000010UL
297#define NODE__RAWIP_SEND 0x00000020UL
298#define NODE__ENFORCE_DEST 0x00000040UL
299#define NODE__DCCP_RECV 0x00000080UL
300#define NODE__DCCP_SEND 0x00000100UL
301#define NODE__RECVFROM 0x00000200UL
302#define NODE__SENDTO 0x00000400UL
303#define NETIF__TCP_RECV 0x00000001UL
304#define NETIF__TCP_SEND 0x00000002UL
305#define NETIF__UDP_RECV 0x00000004UL
306#define NETIF__UDP_SEND 0x00000008UL
307#define NETIF__RAWIP_RECV 0x00000010UL
308#define NETIF__RAWIP_SEND 0x00000020UL
309#define NETIF__DCCP_RECV 0x00000040UL
310#define NETIF__DCCP_SEND 0x00000080UL
311#define NETIF__INGRESS 0x00000100UL
312#define NETIF__EGRESS 0x00000200UL
313#define NETLINK_SOCKET__IOCTL 0x00000001UL
314#define NETLINK_SOCKET__READ 0x00000002UL
315#define NETLINK_SOCKET__WRITE 0x00000004UL
316#define NETLINK_SOCKET__CREATE 0x00000008UL
317#define NETLINK_SOCKET__GETATTR 0x00000010UL
318#define NETLINK_SOCKET__SETATTR 0x00000020UL
319#define NETLINK_SOCKET__LOCK 0x00000040UL
320#define NETLINK_SOCKET__RELABELFROM 0x00000080UL
321#define NETLINK_SOCKET__RELABELTO 0x00000100UL
322#define NETLINK_SOCKET__APPEND 0x00000200UL
323#define NETLINK_SOCKET__BIND 0x00000400UL
324#define NETLINK_SOCKET__CONNECT 0x00000800UL
325#define NETLINK_SOCKET__LISTEN 0x00001000UL
326#define NETLINK_SOCKET__ACCEPT 0x00002000UL
327#define NETLINK_SOCKET__GETOPT 0x00004000UL
328#define NETLINK_SOCKET__SETOPT 0x00008000UL
329#define NETLINK_SOCKET__SHUTDOWN 0x00010000UL
330#define NETLINK_SOCKET__RECVFROM 0x00020000UL
331#define NETLINK_SOCKET__SENDTO 0x00040000UL
332#define NETLINK_SOCKET__RECV_MSG 0x00080000UL
333#define NETLINK_SOCKET__SEND_MSG 0x00100000UL
334#define NETLINK_SOCKET__NAME_BIND 0x00200000UL
335#define PACKET_SOCKET__IOCTL 0x00000001UL
336#define PACKET_SOCKET__READ 0x00000002UL
337#define PACKET_SOCKET__WRITE 0x00000004UL
338#define PACKET_SOCKET__CREATE 0x00000008UL
339#define PACKET_SOCKET__GETATTR 0x00000010UL
340#define PACKET_SOCKET__SETATTR 0x00000020UL
341#define PACKET_SOCKET__LOCK 0x00000040UL
342#define PACKET_SOCKET__RELABELFROM 0x00000080UL
343#define PACKET_SOCKET__RELABELTO 0x00000100UL
344#define PACKET_SOCKET__APPEND 0x00000200UL
345#define PACKET_SOCKET__BIND 0x00000400UL
346#define PACKET_SOCKET__CONNECT 0x00000800UL
347#define PACKET_SOCKET__LISTEN 0x00001000UL
348#define PACKET_SOCKET__ACCEPT 0x00002000UL
349#define PACKET_SOCKET__GETOPT 0x00004000UL
350#define PACKET_SOCKET__SETOPT 0x00008000UL
351#define PACKET_SOCKET__SHUTDOWN 0x00010000UL
352#define PACKET_SOCKET__RECVFROM 0x00020000UL
353#define PACKET_SOCKET__SENDTO 0x00040000UL
354#define PACKET_SOCKET__RECV_MSG 0x00080000UL
355#define PACKET_SOCKET__SEND_MSG 0x00100000UL
356#define PACKET_SOCKET__NAME_BIND 0x00200000UL
357#define KEY_SOCKET__IOCTL 0x00000001UL
358#define KEY_SOCKET__READ 0x00000002UL
359#define KEY_SOCKET__WRITE 0x00000004UL
360#define KEY_SOCKET__CREATE 0x00000008UL
361#define KEY_SOCKET__GETATTR 0x00000010UL
362#define KEY_SOCKET__SETATTR 0x00000020UL
363#define KEY_SOCKET__LOCK 0x00000040UL
364#define KEY_SOCKET__RELABELFROM 0x00000080UL
365#define KEY_SOCKET__RELABELTO 0x00000100UL
366#define KEY_SOCKET__APPEND 0x00000200UL
367#define KEY_SOCKET__BIND 0x00000400UL
368#define KEY_SOCKET__CONNECT 0x00000800UL
369#define KEY_SOCKET__LISTEN 0x00001000UL
370#define KEY_SOCKET__ACCEPT 0x00002000UL
371#define KEY_SOCKET__GETOPT 0x00004000UL
372#define KEY_SOCKET__SETOPT 0x00008000UL
373#define KEY_SOCKET__SHUTDOWN 0x00010000UL
374#define KEY_SOCKET__RECVFROM 0x00020000UL
375#define KEY_SOCKET__SENDTO 0x00040000UL
376#define KEY_SOCKET__RECV_MSG 0x00080000UL
377#define KEY_SOCKET__SEND_MSG 0x00100000UL
378#define KEY_SOCKET__NAME_BIND 0x00200000UL
379#define UNIX_STREAM_SOCKET__IOCTL 0x00000001UL
380#define UNIX_STREAM_SOCKET__READ 0x00000002UL
381#define UNIX_STREAM_SOCKET__WRITE 0x00000004UL
382#define UNIX_STREAM_SOCKET__CREATE 0x00000008UL
383#define UNIX_STREAM_SOCKET__GETATTR 0x00000010UL
384#define UNIX_STREAM_SOCKET__SETATTR 0x00000020UL
385#define UNIX_STREAM_SOCKET__LOCK 0x00000040UL
386#define UNIX_STREAM_SOCKET__RELABELFROM 0x00000080UL
387#define UNIX_STREAM_SOCKET__RELABELTO 0x00000100UL
388#define UNIX_STREAM_SOCKET__APPEND 0x00000200UL
389#define UNIX_STREAM_SOCKET__BIND 0x00000400UL
390#define UNIX_STREAM_SOCKET__CONNECT 0x00000800UL
391#define UNIX_STREAM_SOCKET__LISTEN 0x00001000UL
392#define UNIX_STREAM_SOCKET__ACCEPT 0x00002000UL
393#define UNIX_STREAM_SOCKET__GETOPT 0x00004000UL
394#define UNIX_STREAM_SOCKET__SETOPT 0x00008000UL
395#define UNIX_STREAM_SOCKET__SHUTDOWN 0x00010000UL
396#define UNIX_STREAM_SOCKET__RECVFROM 0x00020000UL
397#define UNIX_STREAM_SOCKET__SENDTO 0x00040000UL
398#define UNIX_STREAM_SOCKET__RECV_MSG 0x00080000UL
399#define UNIX_STREAM_SOCKET__SEND_MSG 0x00100000UL
400#define UNIX_STREAM_SOCKET__NAME_BIND 0x00200000UL
401#define UNIX_STREAM_SOCKET__CONNECTTO 0x00400000UL
402#define UNIX_STREAM_SOCKET__NEWCONN 0x00800000UL
403#define UNIX_STREAM_SOCKET__ACCEPTFROM 0x01000000UL
404#define UNIX_DGRAM_SOCKET__IOCTL 0x00000001UL
405#define UNIX_DGRAM_SOCKET__READ 0x00000002UL
406#define UNIX_DGRAM_SOCKET__WRITE 0x00000004UL
407#define UNIX_DGRAM_SOCKET__CREATE 0x00000008UL
408#define UNIX_DGRAM_SOCKET__GETATTR 0x00000010UL
409#define UNIX_DGRAM_SOCKET__SETATTR 0x00000020UL
410#define UNIX_DGRAM_SOCKET__LOCK 0x00000040UL
411#define UNIX_DGRAM_SOCKET__RELABELFROM 0x00000080UL
412#define UNIX_DGRAM_SOCKET__RELABELTO 0x00000100UL
413#define UNIX_DGRAM_SOCKET__APPEND 0x00000200UL
414#define UNIX_DGRAM_SOCKET__BIND 0x00000400UL
415#define UNIX_DGRAM_SOCKET__CONNECT 0x00000800UL
416#define UNIX_DGRAM_SOCKET__LISTEN 0x00001000UL
417#define UNIX_DGRAM_SOCKET__ACCEPT 0x00002000UL
418#define UNIX_DGRAM_SOCKET__GETOPT 0x00004000UL
419#define UNIX_DGRAM_SOCKET__SETOPT 0x00008000UL
420#define UNIX_DGRAM_SOCKET__SHUTDOWN 0x00010000UL
421#define UNIX_DGRAM_SOCKET__RECVFROM 0x00020000UL
422#define UNIX_DGRAM_SOCKET__SENDTO 0x00040000UL
423#define UNIX_DGRAM_SOCKET__RECV_MSG 0x00080000UL
424#define UNIX_DGRAM_SOCKET__SEND_MSG 0x00100000UL
425#define UNIX_DGRAM_SOCKET__NAME_BIND 0x00200000UL
426#define TUN_SOCKET__IOCTL 0x00000001UL
427#define TUN_SOCKET__READ 0x00000002UL
428#define TUN_SOCKET__WRITE 0x00000004UL
429#define TUN_SOCKET__CREATE 0x00000008UL
430#define TUN_SOCKET__GETATTR 0x00000010UL
431#define TUN_SOCKET__SETATTR 0x00000020UL
432#define TUN_SOCKET__LOCK 0x00000040UL
433#define TUN_SOCKET__RELABELFROM 0x00000080UL
434#define TUN_SOCKET__RELABELTO 0x00000100UL
435#define TUN_SOCKET__APPEND 0x00000200UL
436#define TUN_SOCKET__BIND 0x00000400UL
437#define TUN_SOCKET__CONNECT 0x00000800UL
438#define TUN_SOCKET__LISTEN 0x00001000UL
439#define TUN_SOCKET__ACCEPT 0x00002000UL
440#define TUN_SOCKET__GETOPT 0x00004000UL
441#define TUN_SOCKET__SETOPT 0x00008000UL
442#define TUN_SOCKET__SHUTDOWN 0x00010000UL
443#define TUN_SOCKET__RECVFROM 0x00020000UL
444#define TUN_SOCKET__SENDTO 0x00040000UL
445#define TUN_SOCKET__RECV_MSG 0x00080000UL
446#define TUN_SOCKET__SEND_MSG 0x00100000UL
447#define TUN_SOCKET__NAME_BIND 0x00200000UL
448#define PROCESS__FORK 0x00000001UL
449#define PROCESS__TRANSITION 0x00000002UL
450#define PROCESS__SIGCHLD 0x00000004UL
451#define PROCESS__SIGKILL 0x00000008UL
452#define PROCESS__SIGSTOP 0x00000010UL
453#define PROCESS__SIGNULL 0x00000020UL
454#define PROCESS__SIGNAL 0x00000040UL
455#define PROCESS__PTRACE 0x00000080UL
456#define PROCESS__GETSCHED 0x00000100UL
457#define PROCESS__SETSCHED 0x00000200UL
458#define PROCESS__GETSESSION 0x00000400UL
459#define PROCESS__GETPGID 0x00000800UL
460#define PROCESS__SETPGID 0x00001000UL
461#define PROCESS__GETCAP 0x00002000UL
462#define PROCESS__SETCAP 0x00004000UL
463#define PROCESS__SHARE 0x00008000UL
464#define PROCESS__GETATTR 0x00010000UL
465#define PROCESS__SETEXEC 0x00020000UL
466#define PROCESS__SETFSCREATE 0x00040000UL
467#define PROCESS__NOATSECURE 0x00080000UL
468#define PROCESS__SIGINH 0x00100000UL
469#define PROCESS__SETRLIMIT 0x00200000UL
470#define PROCESS__RLIMITINH 0x00400000UL
471#define PROCESS__DYNTRANSITION 0x00800000UL
472#define PROCESS__SETCURRENT 0x01000000UL
473#define PROCESS__EXECMEM 0x02000000UL
474#define PROCESS__EXECSTACK 0x04000000UL
475#define PROCESS__EXECHEAP 0x08000000UL
476#define PROCESS__SETKEYCREATE 0x10000000UL
477#define PROCESS__SETSOCKCREATE 0x20000000UL
478#define IPC__CREATE 0x00000001UL
479#define IPC__DESTROY 0x00000002UL
480#define IPC__GETATTR 0x00000004UL
481#define IPC__SETATTR 0x00000008UL
482#define IPC__READ 0x00000010UL
483#define IPC__WRITE 0x00000020UL
484#define IPC__ASSOCIATE 0x00000040UL
485#define IPC__UNIX_READ 0x00000080UL
486#define IPC__UNIX_WRITE 0x00000100UL
487#define SEM__CREATE 0x00000001UL
488#define SEM__DESTROY 0x00000002UL
489#define SEM__GETATTR 0x00000004UL
490#define SEM__SETATTR 0x00000008UL
491#define SEM__READ 0x00000010UL
492#define SEM__WRITE 0x00000020UL
493#define SEM__ASSOCIATE 0x00000040UL
494#define SEM__UNIX_READ 0x00000080UL
495#define SEM__UNIX_WRITE 0x00000100UL
496#define MSGQ__CREATE 0x00000001UL
497#define MSGQ__DESTROY 0x00000002UL
498#define MSGQ__GETATTR 0x00000004UL
499#define MSGQ__SETATTR 0x00000008UL
500#define MSGQ__READ 0x00000010UL
501#define MSGQ__WRITE 0x00000020UL
502#define MSGQ__ASSOCIATE 0x00000040UL
503#define MSGQ__UNIX_READ 0x00000080UL
504#define MSGQ__UNIX_WRITE 0x00000100UL
505#define MSGQ__ENQUEUE 0x00000200UL
506#define MSG__SEND 0x00000001UL
507#define MSG__RECEIVE 0x00000002UL
508#define SHM__CREATE 0x00000001UL
509#define SHM__DESTROY 0x00000002UL
510#define SHM__GETATTR 0x00000004UL
511#define SHM__SETATTR 0x00000008UL
512#define SHM__READ 0x00000010UL
513#define SHM__WRITE 0x00000020UL
514#define SHM__ASSOCIATE 0x00000040UL
515#define SHM__UNIX_READ 0x00000080UL
516#define SHM__UNIX_WRITE 0x00000100UL
517#define SHM__LOCK 0x00000200UL
518#define SECURITY__COMPUTE_AV 0x00000001UL
519#define SECURITY__COMPUTE_CREATE 0x00000002UL
520#define SECURITY__COMPUTE_MEMBER 0x00000004UL
521#define SECURITY__CHECK_CONTEXT 0x00000008UL
522#define SECURITY__LOAD_POLICY 0x00000010UL
523#define SECURITY__COMPUTE_RELABEL 0x00000020UL
524#define SECURITY__COMPUTE_USER 0x00000040UL
525#define SECURITY__SETENFORCE 0x00000080UL
526#define SECURITY__SETBOOL 0x00000100UL
527#define SECURITY__SETSECPARAM 0x00000200UL
528#define SECURITY__SETCHECKREQPROT 0x00000400UL
529#define SYSTEM__IPC_INFO 0x00000001UL
530#define SYSTEM__SYSLOG_READ 0x00000002UL
531#define SYSTEM__SYSLOG_MOD 0x00000004UL
532#define SYSTEM__SYSLOG_CONSOLE 0x00000008UL
533#define SYSTEM__MODULE_REQUEST 0x00000010UL
534#define CAPABILITY__CHOWN 0x00000001UL
535#define CAPABILITY__DAC_OVERRIDE 0x00000002UL
536#define CAPABILITY__DAC_READ_SEARCH 0x00000004UL
537#define CAPABILITY__FOWNER 0x00000008UL
538#define CAPABILITY__FSETID 0x00000010UL
539#define CAPABILITY__KILL 0x00000020UL
540#define CAPABILITY__SETGID 0x00000040UL
541#define CAPABILITY__SETUID 0x00000080UL
542#define CAPABILITY__SETPCAP 0x00000100UL
543#define CAPABILITY__LINUX_IMMUTABLE 0x00000200UL
544#define CAPABILITY__NET_BIND_SERVICE 0x00000400UL
545#define CAPABILITY__NET_BROADCAST 0x00000800UL
546#define CAPABILITY__NET_ADMIN 0x00001000UL
547#define CAPABILITY__NET_RAW 0x00002000UL
548#define CAPABILITY__IPC_LOCK 0x00004000UL
549#define CAPABILITY__IPC_OWNER 0x00008000UL
550#define CAPABILITY__SYS_MODULE 0x00010000UL
551#define CAPABILITY__SYS_RAWIO 0x00020000UL
552#define CAPABILITY__SYS_CHROOT 0x00040000UL
553#define CAPABILITY__SYS_PTRACE 0x00080000UL
554#define CAPABILITY__SYS_PACCT 0x00100000UL
555#define CAPABILITY__SYS_ADMIN 0x00200000UL
556#define CAPABILITY__SYS_BOOT 0x00400000UL
557#define CAPABILITY__SYS_NICE 0x00800000UL
558#define CAPABILITY__SYS_RESOURCE 0x01000000UL
559#define CAPABILITY__SYS_TIME 0x02000000UL
560#define CAPABILITY__SYS_TTY_CONFIG 0x04000000UL
561#define CAPABILITY__MKNOD 0x08000000UL
562#define CAPABILITY__LEASE 0x10000000UL
563#define CAPABILITY__AUDIT_WRITE 0x20000000UL
564#define CAPABILITY__AUDIT_CONTROL 0x40000000UL
565#define CAPABILITY__SETFCAP 0x80000000UL
566#define CAPABILITY2__MAC_OVERRIDE 0x00000001UL
567#define CAPABILITY2__MAC_ADMIN 0x00000002UL
568#define NETLINK_ROUTE_SOCKET__IOCTL 0x00000001UL
569#define NETLINK_ROUTE_SOCKET__READ 0x00000002UL
570#define NETLINK_ROUTE_SOCKET__WRITE 0x00000004UL
571#define NETLINK_ROUTE_SOCKET__CREATE 0x00000008UL
572#define NETLINK_ROUTE_SOCKET__GETATTR 0x00000010UL
573#define NETLINK_ROUTE_SOCKET__SETATTR 0x00000020UL
574#define NETLINK_ROUTE_SOCKET__LOCK 0x00000040UL
575#define NETLINK_ROUTE_SOCKET__RELABELFROM 0x00000080UL
576#define NETLINK_ROUTE_SOCKET__RELABELTO 0x00000100UL
577#define NETLINK_ROUTE_SOCKET__APPEND 0x00000200UL
578#define NETLINK_ROUTE_SOCKET__BIND 0x00000400UL
579#define NETLINK_ROUTE_SOCKET__CONNECT 0x00000800UL
580#define NETLINK_ROUTE_SOCKET__LISTEN 0x00001000UL
581#define NETLINK_ROUTE_SOCKET__ACCEPT 0x00002000UL
582#define NETLINK_ROUTE_SOCKET__GETOPT 0x00004000UL
583#define NETLINK_ROUTE_SOCKET__SETOPT 0x00008000UL
584#define NETLINK_ROUTE_SOCKET__SHUTDOWN 0x00010000UL
585#define NETLINK_ROUTE_SOCKET__RECVFROM 0x00020000UL
586#define NETLINK_ROUTE_SOCKET__SENDTO 0x00040000UL
587#define NETLINK_ROUTE_SOCKET__RECV_MSG 0x00080000UL
588#define NETLINK_ROUTE_SOCKET__SEND_MSG 0x00100000UL
589#define NETLINK_ROUTE_SOCKET__NAME_BIND 0x00200000UL
590#define NETLINK_ROUTE_SOCKET__NLMSG_READ 0x00400000UL
591#define NETLINK_ROUTE_SOCKET__NLMSG_WRITE 0x00800000UL
592#define NETLINK_FIREWALL_SOCKET__IOCTL 0x00000001UL
593#define NETLINK_FIREWALL_SOCKET__READ 0x00000002UL
594#define NETLINK_FIREWALL_SOCKET__WRITE 0x00000004UL
595#define NETLINK_FIREWALL_SOCKET__CREATE 0x00000008UL
596#define NETLINK_FIREWALL_SOCKET__GETATTR 0x00000010UL
597#define NETLINK_FIREWALL_SOCKET__SETATTR 0x00000020UL
598#define NETLINK_FIREWALL_SOCKET__LOCK 0x00000040UL
599#define NETLINK_FIREWALL_SOCKET__RELABELFROM 0x00000080UL
600#define NETLINK_FIREWALL_SOCKET__RELABELTO 0x00000100UL
601#define NETLINK_FIREWALL_SOCKET__APPEND 0x00000200UL
602#define NETLINK_FIREWALL_SOCKET__BIND 0x00000400UL
603#define NETLINK_FIREWALL_SOCKET__CONNECT 0x00000800UL
604#define NETLINK_FIREWALL_SOCKET__LISTEN 0x00001000UL
605#define NETLINK_FIREWALL_SOCKET__ACCEPT 0x00002000UL
606#define NETLINK_FIREWALL_SOCKET__GETOPT 0x00004000UL
607#define NETLINK_FIREWALL_SOCKET__SETOPT 0x00008000UL
608#define NETLINK_FIREWALL_SOCKET__SHUTDOWN 0x00010000UL
609#define NETLINK_FIREWALL_SOCKET__RECVFROM 0x00020000UL
610#define NETLINK_FIREWALL_SOCKET__SENDTO 0x00040000UL
611#define NETLINK_FIREWALL_SOCKET__RECV_MSG 0x00080000UL
612#define NETLINK_FIREWALL_SOCKET__SEND_MSG 0x00100000UL
613#define NETLINK_FIREWALL_SOCKET__NAME_BIND 0x00200000UL
614#define NETLINK_FIREWALL_SOCKET__NLMSG_READ 0x00400000UL
615#define NETLINK_FIREWALL_SOCKET__NLMSG_WRITE 0x00800000UL
616#define NETLINK_TCPDIAG_SOCKET__IOCTL 0x00000001UL
617#define NETLINK_TCPDIAG_SOCKET__READ 0x00000002UL
618#define NETLINK_TCPDIAG_SOCKET__WRITE 0x00000004UL
619#define NETLINK_TCPDIAG_SOCKET__CREATE 0x00000008UL
620#define NETLINK_TCPDIAG_SOCKET__GETATTR 0x00000010UL
621#define NETLINK_TCPDIAG_SOCKET__SETATTR 0x00000020UL
622#define NETLINK_TCPDIAG_SOCKET__LOCK 0x00000040UL
623#define NETLINK_TCPDIAG_SOCKET__RELABELFROM 0x00000080UL
624#define NETLINK_TCPDIAG_SOCKET__RELABELTO 0x00000100UL
625#define NETLINK_TCPDIAG_SOCKET__APPEND 0x00000200UL
626#define NETLINK_TCPDIAG_SOCKET__BIND 0x00000400UL
627#define NETLINK_TCPDIAG_SOCKET__CONNECT 0x00000800UL
628#define NETLINK_TCPDIAG_SOCKET__LISTEN 0x00001000UL
629#define NETLINK_TCPDIAG_SOCKET__ACCEPT 0x00002000UL
630#define NETLINK_TCPDIAG_SOCKET__GETOPT 0x00004000UL
631#define NETLINK_TCPDIAG_SOCKET__SETOPT 0x00008000UL
632#define NETLINK_TCPDIAG_SOCKET__SHUTDOWN 0x00010000UL
633#define NETLINK_TCPDIAG_SOCKET__RECVFROM 0x00020000UL
634#define NETLINK_TCPDIAG_SOCKET__SENDTO 0x00040000UL
635#define NETLINK_TCPDIAG_SOCKET__RECV_MSG 0x00080000UL
636#define NETLINK_TCPDIAG_SOCKET__SEND_MSG 0x00100000UL
637#define NETLINK_TCPDIAG_SOCKET__NAME_BIND 0x00200000UL
638#define NETLINK_TCPDIAG_SOCKET__NLMSG_READ 0x00400000UL
639#define NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE 0x00800000UL
640#define NETLINK_NFLOG_SOCKET__IOCTL 0x00000001UL
641#define NETLINK_NFLOG_SOCKET__READ 0x00000002UL
642#define NETLINK_NFLOG_SOCKET__WRITE 0x00000004UL
643#define NETLINK_NFLOG_SOCKET__CREATE 0x00000008UL
644#define NETLINK_NFLOG_SOCKET__GETATTR 0x00000010UL
645#define NETLINK_NFLOG_SOCKET__SETATTR 0x00000020UL
646#define NETLINK_NFLOG_SOCKET__LOCK 0x00000040UL
647#define NETLINK_NFLOG_SOCKET__RELABELFROM 0x00000080UL
648#define NETLINK_NFLOG_SOCKET__RELABELTO 0x00000100UL
649#define NETLINK_NFLOG_SOCKET__APPEND 0x00000200UL
650#define NETLINK_NFLOG_SOCKET__BIND 0x00000400UL
651#define NETLINK_NFLOG_SOCKET__CONNECT 0x00000800UL
652#define NETLINK_NFLOG_SOCKET__LISTEN 0x00001000UL
653#define NETLINK_NFLOG_SOCKET__ACCEPT 0x00002000UL
654#define NETLINK_NFLOG_SOCKET__GETOPT 0x00004000UL
655#define NETLINK_NFLOG_SOCKET__SETOPT 0x00008000UL
656#define NETLINK_NFLOG_SOCKET__SHUTDOWN 0x00010000UL
657#define NETLINK_NFLOG_SOCKET__RECVFROM 0x00020000UL
658#define NETLINK_NFLOG_SOCKET__SENDTO 0x00040000UL
659#define NETLINK_NFLOG_SOCKET__RECV_MSG 0x00080000UL
660#define NETLINK_NFLOG_SOCKET__SEND_MSG 0x00100000UL
661#define NETLINK_NFLOG_SOCKET__NAME_BIND 0x00200000UL
662#define NETLINK_XFRM_SOCKET__IOCTL 0x00000001UL
663#define NETLINK_XFRM_SOCKET__READ 0x00000002UL
664#define NETLINK_XFRM_SOCKET__WRITE 0x00000004UL
665#define NETLINK_XFRM_SOCKET__CREATE 0x00000008UL
666#define NETLINK_XFRM_SOCKET__GETATTR 0x00000010UL
667#define NETLINK_XFRM_SOCKET__SETATTR 0x00000020UL
668#define NETLINK_XFRM_SOCKET__LOCK 0x00000040UL
669#define NETLINK_XFRM_SOCKET__RELABELFROM 0x00000080UL
670#define NETLINK_XFRM_SOCKET__RELABELTO 0x00000100UL
671#define NETLINK_XFRM_SOCKET__APPEND 0x00000200UL
672#define NETLINK_XFRM_SOCKET__BIND 0x00000400UL
673#define NETLINK_XFRM_SOCKET__CONNECT 0x00000800UL
674#define NETLINK_XFRM_SOCKET__LISTEN 0x00001000UL
675#define NETLINK_XFRM_SOCKET__ACCEPT 0x00002000UL
676#define NETLINK_XFRM_SOCKET__GETOPT 0x00004000UL
677#define NETLINK_XFRM_SOCKET__SETOPT 0x00008000UL
678#define NETLINK_XFRM_SOCKET__SHUTDOWN 0x00010000UL
679#define NETLINK_XFRM_SOCKET__RECVFROM 0x00020000UL
680#define NETLINK_XFRM_SOCKET__SENDTO 0x00040000UL
681#define NETLINK_XFRM_SOCKET__RECV_MSG 0x00080000UL
682#define NETLINK_XFRM_SOCKET__SEND_MSG 0x00100000UL
683#define NETLINK_XFRM_SOCKET__NAME_BIND 0x00200000UL
684#define NETLINK_XFRM_SOCKET__NLMSG_READ 0x00400000UL
685#define NETLINK_XFRM_SOCKET__NLMSG_WRITE 0x00800000UL
686#define NETLINK_SELINUX_SOCKET__IOCTL 0x00000001UL
687#define NETLINK_SELINUX_SOCKET__READ 0x00000002UL
688#define NETLINK_SELINUX_SOCKET__WRITE 0x00000004UL
689#define NETLINK_SELINUX_SOCKET__CREATE 0x00000008UL
690#define NETLINK_SELINUX_SOCKET__GETATTR 0x00000010UL
691#define NETLINK_SELINUX_SOCKET__SETATTR 0x00000020UL
692#define NETLINK_SELINUX_SOCKET__LOCK 0x00000040UL
693#define NETLINK_SELINUX_SOCKET__RELABELFROM 0x00000080UL
694#define NETLINK_SELINUX_SOCKET__RELABELTO 0x00000100UL
695#define NETLINK_SELINUX_SOCKET__APPEND 0x00000200UL
696#define NETLINK_SELINUX_SOCKET__BIND 0x00000400UL
697#define NETLINK_SELINUX_SOCKET__CONNECT 0x00000800UL
698#define NETLINK_SELINUX_SOCKET__LISTEN 0x00001000UL
699#define NETLINK_SELINUX_SOCKET__ACCEPT 0x00002000UL
700#define NETLINK_SELINUX_SOCKET__GETOPT 0x00004000UL
701#define NETLINK_SELINUX_SOCKET__SETOPT 0x00008000UL
702#define NETLINK_SELINUX_SOCKET__SHUTDOWN 0x00010000UL
703#define NETLINK_SELINUX_SOCKET__RECVFROM 0x00020000UL
704#define NETLINK_SELINUX_SOCKET__SENDTO 0x00040000UL
705#define NETLINK_SELINUX_SOCKET__RECV_MSG 0x00080000UL
706#define NETLINK_SELINUX_SOCKET__SEND_MSG 0x00100000UL
707#define NETLINK_SELINUX_SOCKET__NAME_BIND 0x00200000UL
708#define NETLINK_AUDIT_SOCKET__IOCTL 0x00000001UL
709#define NETLINK_AUDIT_SOCKET__READ 0x00000002UL
710#define NETLINK_AUDIT_SOCKET__WRITE 0x00000004UL
711#define NETLINK_AUDIT_SOCKET__CREATE 0x00000008UL
712#define NETLINK_AUDIT_SOCKET__GETATTR 0x00000010UL
713#define NETLINK_AUDIT_SOCKET__SETATTR 0x00000020UL
714#define NETLINK_AUDIT_SOCKET__LOCK 0x00000040UL
715#define NETLINK_AUDIT_SOCKET__RELABELFROM 0x00000080UL
716#define NETLINK_AUDIT_SOCKET__RELABELTO 0x00000100UL
717#define NETLINK_AUDIT_SOCKET__APPEND 0x00000200UL
718#define NETLINK_AUDIT_SOCKET__BIND 0x00000400UL
719#define NETLINK_AUDIT_SOCKET__CONNECT 0x00000800UL
720#define NETLINK_AUDIT_SOCKET__LISTEN 0x00001000UL
721#define NETLINK_AUDIT_SOCKET__ACCEPT 0x00002000UL
722#define NETLINK_AUDIT_SOCKET__GETOPT 0x00004000UL
723#define NETLINK_AUDIT_SOCKET__SETOPT 0x00008000UL
724#define NETLINK_AUDIT_SOCKET__SHUTDOWN 0x00010000UL
725#define NETLINK_AUDIT_SOCKET__RECVFROM 0x00020000UL
726#define NETLINK_AUDIT_SOCKET__SENDTO 0x00040000UL
727#define NETLINK_AUDIT_SOCKET__RECV_MSG 0x00080000UL
728#define NETLINK_AUDIT_SOCKET__SEND_MSG 0x00100000UL
729#define NETLINK_AUDIT_SOCKET__NAME_BIND 0x00200000UL
730#define NETLINK_AUDIT_SOCKET__NLMSG_READ 0x00400000UL
731#define NETLINK_AUDIT_SOCKET__NLMSG_WRITE 0x00800000UL
732#define NETLINK_AUDIT_SOCKET__NLMSG_RELAY 0x01000000UL
733#define NETLINK_AUDIT_SOCKET__NLMSG_READPRIV 0x02000000UL
734#define NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT 0x04000000UL
735#define NETLINK_IP6FW_SOCKET__IOCTL 0x00000001UL
736#define NETLINK_IP6FW_SOCKET__READ 0x00000002UL
737#define NETLINK_IP6FW_SOCKET__WRITE 0x00000004UL
738#define NETLINK_IP6FW_SOCKET__CREATE 0x00000008UL
739#define NETLINK_IP6FW_SOCKET__GETATTR 0x00000010UL
740#define NETLINK_IP6FW_SOCKET__SETATTR 0x00000020UL
741#define NETLINK_IP6FW_SOCKET__LOCK 0x00000040UL
742#define NETLINK_IP6FW_SOCKET__RELABELFROM 0x00000080UL
743#define NETLINK_IP6FW_SOCKET__RELABELTO 0x00000100UL
744#define NETLINK_IP6FW_SOCKET__APPEND 0x00000200UL
745#define NETLINK_IP6FW_SOCKET__BIND 0x00000400UL
746#define NETLINK_IP6FW_SOCKET__CONNECT 0x00000800UL
747#define NETLINK_IP6FW_SOCKET__LISTEN 0x00001000UL
748#define NETLINK_IP6FW_SOCKET__ACCEPT 0x00002000UL
749#define NETLINK_IP6FW_SOCKET__GETOPT 0x00004000UL
750#define NETLINK_IP6FW_SOCKET__SETOPT 0x00008000UL
751#define NETLINK_IP6FW_SOCKET__SHUTDOWN 0x00010000UL
752#define NETLINK_IP6FW_SOCKET__RECVFROM 0x00020000UL
753#define NETLINK_IP6FW_SOCKET__SENDTO 0x00040000UL
754#define NETLINK_IP6FW_SOCKET__RECV_MSG 0x00080000UL
755#define NETLINK_IP6FW_SOCKET__SEND_MSG 0x00100000UL
756#define NETLINK_IP6FW_SOCKET__NAME_BIND 0x00200000UL
757#define NETLINK_IP6FW_SOCKET__NLMSG_READ 0x00400000UL
758#define NETLINK_IP6FW_SOCKET__NLMSG_WRITE 0x00800000UL
759#define NETLINK_DNRT_SOCKET__IOCTL 0x00000001UL
760#define NETLINK_DNRT_SOCKET__READ 0x00000002UL
761#define NETLINK_DNRT_SOCKET__WRITE 0x00000004UL
762#define NETLINK_DNRT_SOCKET__CREATE 0x00000008UL
763#define NETLINK_DNRT_SOCKET__GETATTR 0x00000010UL
764#define NETLINK_DNRT_SOCKET__SETATTR 0x00000020UL
765#define NETLINK_DNRT_SOCKET__LOCK 0x00000040UL
766#define NETLINK_DNRT_SOCKET__RELABELFROM 0x00000080UL
767#define NETLINK_DNRT_SOCKET__RELABELTO 0x00000100UL
768#define NETLINK_DNRT_SOCKET__APPEND 0x00000200UL
769#define NETLINK_DNRT_SOCKET__BIND 0x00000400UL
770#define NETLINK_DNRT_SOCKET__CONNECT 0x00000800UL
771#define NETLINK_DNRT_SOCKET__LISTEN 0x00001000UL
772#define NETLINK_DNRT_SOCKET__ACCEPT 0x00002000UL
773#define NETLINK_DNRT_SOCKET__GETOPT 0x00004000UL
774#define NETLINK_DNRT_SOCKET__SETOPT 0x00008000UL
775#define NETLINK_DNRT_SOCKET__SHUTDOWN 0x00010000UL
776#define NETLINK_DNRT_SOCKET__RECVFROM 0x00020000UL
777#define NETLINK_DNRT_SOCKET__SENDTO 0x00040000UL
778#define NETLINK_DNRT_SOCKET__RECV_MSG 0x00080000UL
779#define NETLINK_DNRT_SOCKET__SEND_MSG 0x00100000UL
780#define NETLINK_DNRT_SOCKET__NAME_BIND 0x00200000UL
781#define ASSOCIATION__SENDTO 0x00000001UL
782#define ASSOCIATION__RECVFROM 0x00000002UL
783#define ASSOCIATION__SETCONTEXT 0x00000004UL
784#define ASSOCIATION__POLMATCH 0x00000008UL
785#define NETLINK_KOBJECT_UEVENT_SOCKET__IOCTL 0x00000001UL
786#define NETLINK_KOBJECT_UEVENT_SOCKET__READ 0x00000002UL
787#define NETLINK_KOBJECT_UEVENT_SOCKET__WRITE 0x00000004UL
788#define NETLINK_KOBJECT_UEVENT_SOCKET__CREATE 0x00000008UL
789#define NETLINK_KOBJECT_UEVENT_SOCKET__GETATTR 0x00000010UL
790#define NETLINK_KOBJECT_UEVENT_SOCKET__SETATTR 0x00000020UL
791#define NETLINK_KOBJECT_UEVENT_SOCKET__LOCK 0x00000040UL
792#define NETLINK_KOBJECT_UEVENT_SOCKET__RELABELFROM 0x00000080UL
793#define NETLINK_KOBJECT_UEVENT_SOCKET__RELABELTO 0x00000100UL
794#define NETLINK_KOBJECT_UEVENT_SOCKET__APPEND 0x00000200UL
795#define NETLINK_KOBJECT_UEVENT_SOCKET__BIND 0x00000400UL
796#define NETLINK_KOBJECT_UEVENT_SOCKET__CONNECT 0x00000800UL
797#define NETLINK_KOBJECT_UEVENT_SOCKET__LISTEN 0x00001000UL
798#define NETLINK_KOBJECT_UEVENT_SOCKET__ACCEPT 0x00002000UL
799#define NETLINK_KOBJECT_UEVENT_SOCKET__GETOPT 0x00004000UL
800#define NETLINK_KOBJECT_UEVENT_SOCKET__SETOPT 0x00008000UL
801#define NETLINK_KOBJECT_UEVENT_SOCKET__SHUTDOWN 0x00010000UL
802#define NETLINK_KOBJECT_UEVENT_SOCKET__RECVFROM 0x00020000UL
803#define NETLINK_KOBJECT_UEVENT_SOCKET__SENDTO 0x00040000UL
804#define NETLINK_KOBJECT_UEVENT_SOCKET__RECV_MSG 0x00080000UL
805#define NETLINK_KOBJECT_UEVENT_SOCKET__SEND_MSG 0x00100000UL
806#define NETLINK_KOBJECT_UEVENT_SOCKET__NAME_BIND 0x00200000UL
807#define APPLETALK_SOCKET__IOCTL 0x00000001UL
808#define APPLETALK_SOCKET__READ 0x00000002UL
809#define APPLETALK_SOCKET__WRITE 0x00000004UL
810#define APPLETALK_SOCKET__CREATE 0x00000008UL
811#define APPLETALK_SOCKET__GETATTR 0x00000010UL
812#define APPLETALK_SOCKET__SETATTR 0x00000020UL
813#define APPLETALK_SOCKET__LOCK 0x00000040UL
814#define APPLETALK_SOCKET__RELABELFROM 0x00000080UL
815#define APPLETALK_SOCKET__RELABELTO 0x00000100UL
816#define APPLETALK_SOCKET__APPEND 0x00000200UL
817#define APPLETALK_SOCKET__BIND 0x00000400UL
818#define APPLETALK_SOCKET__CONNECT 0x00000800UL
819#define APPLETALK_SOCKET__LISTEN 0x00001000UL
820#define APPLETALK_SOCKET__ACCEPT 0x00002000UL
821#define APPLETALK_SOCKET__GETOPT 0x00004000UL
822#define APPLETALK_SOCKET__SETOPT 0x00008000UL
823#define APPLETALK_SOCKET__SHUTDOWN 0x00010000UL
824#define APPLETALK_SOCKET__RECVFROM 0x00020000UL
825#define APPLETALK_SOCKET__SENDTO 0x00040000UL
826#define APPLETALK_SOCKET__RECV_MSG 0x00080000UL
827#define APPLETALK_SOCKET__SEND_MSG 0x00100000UL
828#define APPLETALK_SOCKET__NAME_BIND 0x00200000UL
829#define PACKET__SEND 0x00000001UL
830#define PACKET__RECV 0x00000002UL
831#define PACKET__RELABELTO 0x00000004UL
832#define PACKET__FLOW_IN 0x00000008UL
833#define PACKET__FLOW_OUT 0x00000010UL
834#define PACKET__FORWARD_IN 0x00000020UL
835#define PACKET__FORWARD_OUT 0x00000040UL
836#define KEY__VIEW 0x00000001UL
837#define KEY__READ 0x00000002UL
838#define KEY__WRITE 0x00000004UL
839#define KEY__SEARCH 0x00000008UL
840#define KEY__LINK 0x00000010UL
841#define KEY__SETATTR 0x00000020UL
842#define KEY__CREATE 0x00000040UL
843#define DCCP_SOCKET__IOCTL 0x00000001UL
844#define DCCP_SOCKET__READ 0x00000002UL
845#define DCCP_SOCKET__WRITE 0x00000004UL
846#define DCCP_SOCKET__CREATE 0x00000008UL
847#define DCCP_SOCKET__GETATTR 0x00000010UL
848#define DCCP_SOCKET__SETATTR 0x00000020UL
849#define DCCP_SOCKET__LOCK 0x00000040UL
850#define DCCP_SOCKET__RELABELFROM 0x00000080UL
851#define DCCP_SOCKET__RELABELTO 0x00000100UL
852#define DCCP_SOCKET__APPEND 0x00000200UL
853#define DCCP_SOCKET__BIND 0x00000400UL
854#define DCCP_SOCKET__CONNECT 0x00000800UL
855#define DCCP_SOCKET__LISTEN 0x00001000UL
856#define DCCP_SOCKET__ACCEPT 0x00002000UL
857#define DCCP_SOCKET__GETOPT 0x00004000UL
858#define DCCP_SOCKET__SETOPT 0x00008000UL
859#define DCCP_SOCKET__SHUTDOWN 0x00010000UL
860#define DCCP_SOCKET__RECVFROM 0x00020000UL
861#define DCCP_SOCKET__SENDTO 0x00040000UL
862#define DCCP_SOCKET__RECV_MSG 0x00080000UL
863#define DCCP_SOCKET__SEND_MSG 0x00100000UL
864#define DCCP_SOCKET__NAME_BIND 0x00200000UL
865#define DCCP_SOCKET__NODE_BIND 0x00400000UL
866#define DCCP_SOCKET__NAME_CONNECT 0x00800000UL
867#define MEMPROTECT__MMAP_ZERO 0x00000001UL
868#define PEER__RECV 0x00000001UL
869#define KERNEL_SERVICE__USE_AS_OVERRIDE 0x00000001UL
870#define KERNEL_SERVICE__CREATE_FILES_AS 0x00000002UL
diff --git a/security/selinux/include/avc_ss.h b/security/selinux/include/avc_ss.h
index bb1ec801bdfe..4677aa519b04 100644
--- a/security/selinux/include/avc_ss.h
+++ b/security/selinux/include/avc_ss.h
@@ -10,26 +10,13 @@
10 10
11int avc_ss_reset(u32 seqno); 11int avc_ss_reset(u32 seqno);
12 12
13struct av_perm_to_string { 13/* Class/perm mapping support */
14 u16 tclass; 14struct security_class_mapping {
15 u32 value;
16 const char *name; 15 const char *name;
16 const char *perms[sizeof(u32) * 8 + 1];
17}; 17};
18 18
19struct av_inherit { 19extern struct security_class_mapping secclass_map[];
20 const char **common_pts;
21 u32 common_base;
22 u16 tclass;
23};
24
25struct selinux_class_perm {
26 const struct av_perm_to_string *av_perm_to_string;
27 u32 av_pts_len;
28 u32 cts_len;
29 const char **class_to_string;
30 const struct av_inherit *av_inherit;
31 u32 av_inherit_len;
32};
33 20
34#endif /* _SELINUX_AVC_SS_H_ */ 21#endif /* _SELINUX_AVC_SS_H_ */
35 22
diff --git a/security/selinux/include/class_to_string.h b/security/selinux/include/class_to_string.h
deleted file mode 100644
index 7ab9299bfb6b..000000000000
--- a/security/selinux/include/class_to_string.h
+++ /dev/null
@@ -1,80 +0,0 @@
1/* This file is automatically generated. Do not edit. */
2/*
3 * Security object class definitions
4 */
5 S_(NULL)
6 S_("security")
7 S_("process")
8 S_("system")
9 S_("capability")
10 S_("filesystem")
11 S_("file")
12 S_("dir")
13 S_("fd")
14 S_("lnk_file")
15 S_("chr_file")
16 S_("blk_file")
17 S_("sock_file")
18 S_("fifo_file")
19 S_("socket")
20 S_("tcp_socket")
21 S_("udp_socket")
22 S_("rawip_socket")
23 S_("node")
24 S_("netif")
25 S_("netlink_socket")
26 S_("packet_socket")
27 S_("key_socket")
28 S_("unix_stream_socket")
29 S_("unix_dgram_socket")
30 S_("sem")
31 S_("msg")
32 S_("msgq")
33 S_("shm")
34 S_("ipc")
35 S_(NULL)
36 S_(NULL)
37 S_(NULL)
38 S_(NULL)
39 S_(NULL)
40 S_(NULL)
41 S_(NULL)
42 S_(NULL)
43 S_(NULL)
44 S_(NULL)
45 S_(NULL)
46 S_(NULL)
47 S_(NULL)
48 S_("netlink_route_socket")
49 S_("netlink_firewall_socket")
50 S_("netlink_tcpdiag_socket")
51 S_("netlink_nflog_socket")
52 S_("netlink_xfrm_socket")
53 S_("netlink_selinux_socket")
54 S_("netlink_audit_socket")
55 S_("netlink_ip6fw_socket")
56 S_("netlink_dnrt_socket")
57 S_(NULL)
58 S_(NULL)
59 S_("association")
60 S_("netlink_kobject_uevent_socket")
61 S_("appletalk_socket")
62 S_("packet")
63 S_("key")
64 S_(NULL)
65 S_("dccp_socket")
66 S_("memprotect")
67 S_(NULL)
68 S_(NULL)
69 S_(NULL)
70 S_(NULL)
71 S_(NULL)
72 S_(NULL)
73 S_("peer")
74 S_("capability2")
75 S_(NULL)
76 S_(NULL)
77 S_(NULL)
78 S_(NULL)
79 S_("kernel_service")
80 S_("tun_socket")
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
new file mode 100644
index 000000000000..8b32e959bb2e
--- /dev/null
+++ b/security/selinux/include/classmap.h
@@ -0,0 +1,150 @@
1#define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \
2 "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append"
3
4#define COMMON_FILE_PERMS COMMON_FILE_SOCK_PERMS, "unlink", "link", \
5 "rename", "execute", "swapon", "quotaon", "mounton"
6
7#define COMMON_SOCK_PERMS COMMON_FILE_SOCK_PERMS, "bind", "connect", \
8 "listen", "accept", "getopt", "setopt", "shutdown", "recvfrom", \
9 "sendto", "recv_msg", "send_msg", "name_bind"
10
11#define COMMON_IPC_PERMS "create", "destroy", "getattr", "setattr", "read", \
12 "write", "associate", "unix_read", "unix_write"
13
14struct security_class_mapping secclass_map[] = {
15 { "security",
16 { "compute_av", "compute_create", "compute_member",
17 "check_context", "load_policy", "compute_relabel",
18 "compute_user", "setenforce", "setbool", "setsecparam",
19 "setcheckreqprot", NULL } },
20 { "process",
21 { "fork", "transition", "sigchld", "sigkill",
22 "sigstop", "signull", "signal", "ptrace", "getsched", "setsched",
23 "getsession", "getpgid", "setpgid", "getcap", "setcap", "share",
24 "getattr", "setexec", "setfscreate", "noatsecure", "siginh",
25 "setrlimit", "rlimitinh", "dyntransition", "setcurrent",
26 "execmem", "execstack", "execheap", "setkeycreate",
27 "setsockcreate", NULL } },
28 { "system",
29 { "ipc_info", "syslog_read", "syslog_mod",
30 "syslog_console", "module_request", NULL } },
31 { "capability",
32 { "chown", "dac_override", "dac_read_search",
33 "fowner", "fsetid", "kill", "setgid", "setuid", "setpcap",
34 "linux_immutable", "net_bind_service", "net_broadcast",
35 "net_admin", "net_raw", "ipc_lock", "ipc_owner", "sys_module",
36 "sys_rawio", "sys_chroot", "sys_ptrace", "sys_pacct", "sys_admin",
37 "sys_boot", "sys_nice", "sys_resource", "sys_time",
38 "sys_tty_config", "mknod", "lease", "audit_write",
39 "audit_control", "setfcap", NULL } },
40 { "filesystem",
41 { "mount", "remount", "unmount", "getattr",
42 "relabelfrom", "relabelto", "transition", "associate", "quotamod",
43 "quotaget", NULL } },
44 { "file",
45 { COMMON_FILE_PERMS,
46 "execute_no_trans", "entrypoint", "execmod", "open", NULL } },
47 { "dir",
48 { COMMON_FILE_PERMS, "add_name", "remove_name",
49 "reparent", "search", "rmdir", "open", NULL } },
50 { "fd", { "use", NULL } },
51 { "lnk_file",
52 { COMMON_FILE_PERMS, NULL } },
53 { "chr_file",
54 { COMMON_FILE_PERMS,
55 "execute_no_trans", "entrypoint", "execmod", "open", NULL } },
56 { "blk_file",
57 { COMMON_FILE_PERMS, "open", NULL } },
58 { "sock_file",
59 { COMMON_FILE_PERMS, "open", NULL } },
60 { "fifo_file",
61 { COMMON_FILE_PERMS, "open", NULL } },
62 { "socket",
63 { COMMON_SOCK_PERMS, NULL } },
64 { "tcp_socket",
65 { COMMON_SOCK_PERMS,
66 "connectto", "newconn", "acceptfrom", "node_bind", "name_connect",
67 NULL } },
68 { "udp_socket",
69 { COMMON_SOCK_PERMS,
70 "node_bind", NULL } },
71 { "rawip_socket",
72 { COMMON_SOCK_PERMS,
73 "node_bind", NULL } },
74 { "node",
75 { "tcp_recv", "tcp_send", "udp_recv", "udp_send",
76 "rawip_recv", "rawip_send", "enforce_dest",
77 "dccp_recv", "dccp_send", "recvfrom", "sendto", NULL } },
78 { "netif",
79 { "tcp_recv", "tcp_send", "udp_recv", "udp_send",
80 "rawip_recv", "rawip_send", "dccp_recv", "dccp_send",
81 "ingress", "egress", NULL } },
82 { "netlink_socket",
83 { COMMON_SOCK_PERMS, NULL } },
84 { "packet_socket",
85 { COMMON_SOCK_PERMS, NULL } },
86 { "key_socket",
87 { COMMON_SOCK_PERMS, NULL } },
88 { "unix_stream_socket",
89 { COMMON_SOCK_PERMS, "connectto", "newconn", "acceptfrom", NULL
90 } },
91 { "unix_dgram_socket",
92 { COMMON_SOCK_PERMS, NULL
93 } },
94 { "sem",
95 { COMMON_IPC_PERMS, NULL } },
96 { "msg", { "send", "receive", NULL } },
97 { "msgq",
98 { COMMON_IPC_PERMS, "enqueue", NULL } },
99 { "shm",
100 { COMMON_IPC_PERMS, "lock", NULL } },
101 { "ipc",
102 { COMMON_IPC_PERMS, NULL } },
103 { "netlink_route_socket",
104 { COMMON_SOCK_PERMS,
105 "nlmsg_read", "nlmsg_write", NULL } },
106 { "netlink_firewall_socket",
107 { COMMON_SOCK_PERMS,
108 "nlmsg_read", "nlmsg_write", NULL } },
109 { "netlink_tcpdiag_socket",
110 { COMMON_SOCK_PERMS,
111 "nlmsg_read", "nlmsg_write", NULL } },
112 { "netlink_nflog_socket",
113 { COMMON_SOCK_PERMS, NULL } },
114 { "netlink_xfrm_socket",
115 { COMMON_SOCK_PERMS,
116 "nlmsg_read", "nlmsg_write", NULL } },
117 { "netlink_selinux_socket",
118 { COMMON_SOCK_PERMS, NULL } },
119 { "netlink_audit_socket",
120 { COMMON_SOCK_PERMS,
121 "nlmsg_read", "nlmsg_write", "nlmsg_relay", "nlmsg_readpriv",
122 "nlmsg_tty_audit", NULL } },
123 { "netlink_ip6fw_socket",
124 { COMMON_SOCK_PERMS,
125 "nlmsg_read", "nlmsg_write", NULL } },
126 { "netlink_dnrt_socket",
127 { COMMON_SOCK_PERMS, NULL } },
128 { "association",
129 { "sendto", "recvfrom", "setcontext", "polmatch", NULL } },
130 { "netlink_kobject_uevent_socket",
131 { COMMON_SOCK_PERMS, NULL } },
132 { "appletalk_socket",
133 { COMMON_SOCK_PERMS, NULL } },
134 { "packet",
135 { "send", "recv", "relabelto", "flow_in", "flow_out",
136 "forward_in", "forward_out", NULL } },
137 { "key",
138 { "view", "read", "write", "search", "link", "setattr", "create",
139 NULL } },
140 { "dccp_socket",
141 { COMMON_SOCK_PERMS,
142 "node_bind", "name_connect", NULL } },
143 { "memprotect", { "mmap_zero", NULL } },
144 { "peer", { "recv", NULL } },
145 { "capability2", { "mac_override", "mac_admin", NULL } },
146 { "kernel_service", { "use_as_override", "create_files_as", NULL } },
147 { "tun_socket",
148 { COMMON_SOCK_PERMS, NULL } },
149 { NULL }
150 };
diff --git a/security/selinux/include/common_perm_to_string.h b/security/selinux/include/common_perm_to_string.h
deleted file mode 100644
index ce5b6e2fe9dd..000000000000
--- a/security/selinux/include/common_perm_to_string.h
+++ /dev/null
@@ -1,58 +0,0 @@
1/* This file is automatically generated. Do not edit. */
2TB_(common_file_perm_to_string)
3 S_("ioctl")
4 S_("read")
5 S_("write")
6 S_("create")
7 S_("getattr")
8 S_("setattr")
9 S_("lock")
10 S_("relabelfrom")
11 S_("relabelto")
12 S_("append")
13 S_("unlink")
14 S_("link")
15 S_("rename")
16 S_("execute")
17 S_("swapon")
18 S_("quotaon")
19 S_("mounton")
20TE_(common_file_perm_to_string)
21
22TB_(common_socket_perm_to_string)
23 S_("ioctl")
24 S_("read")
25 S_("write")
26 S_("create")
27 S_("getattr")
28 S_("setattr")
29 S_("lock")
30 S_("relabelfrom")
31 S_("relabelto")
32 S_("append")
33 S_("bind")
34 S_("connect")
35 S_("listen")
36 S_("accept")
37 S_("getopt")
38 S_("setopt")
39 S_("shutdown")
40 S_("recvfrom")
41 S_("sendto")
42 S_("recv_msg")
43 S_("send_msg")
44 S_("name_bind")
45TE_(common_socket_perm_to_string)
46
47TB_(common_ipc_perm_to_string)
48 S_("create")
49 S_("destroy")
50 S_("getattr")
51 S_("setattr")
52 S_("read")
53 S_("write")
54 S_("associate")
55 S_("unix_read")
56 S_("unix_write")
57TE_(common_ipc_perm_to_string)
58
diff --git a/security/selinux/include/flask.h b/security/selinux/include/flask.h
deleted file mode 100644
index f248500a1e3c..000000000000
--- a/security/selinux/include/flask.h
+++ /dev/null
@@ -1,91 +0,0 @@
1/* This file is automatically generated. Do not edit. */
2#ifndef _SELINUX_FLASK_H_
3#define _SELINUX_FLASK_H_
4
5/*
6 * Security object class definitions
7 */
8#define SECCLASS_SECURITY 1
9#define SECCLASS_PROCESS 2
10#define SECCLASS_SYSTEM 3
11#define SECCLASS_CAPABILITY 4
12#define SECCLASS_FILESYSTEM 5
13#define SECCLASS_FILE 6
14#define SECCLASS_DIR 7
15#define SECCLASS_FD 8
16#define SECCLASS_LNK_FILE 9
17#define SECCLASS_CHR_FILE 10
18#define SECCLASS_BLK_FILE 11
19#define SECCLASS_SOCK_FILE 12
20#define SECCLASS_FIFO_FILE 13
21#define SECCLASS_SOCKET 14
22#define SECCLASS_TCP_SOCKET 15
23#define SECCLASS_UDP_SOCKET 16
24#define SECCLASS_RAWIP_SOCKET 17
25#define SECCLASS_NODE 18
26#define SECCLASS_NETIF 19
27#define SECCLASS_NETLINK_SOCKET 20
28#define SECCLASS_PACKET_SOCKET 21
29#define SECCLASS_KEY_SOCKET 22
30#define SECCLASS_UNIX_STREAM_SOCKET 23
31#define SECCLASS_UNIX_DGRAM_SOCKET 24
32#define SECCLASS_SEM 25
33#define SECCLASS_MSG 26
34#define SECCLASS_MSGQ 27
35#define SECCLASS_SHM 28
36#define SECCLASS_IPC 29
37#define SECCLASS_NETLINK_ROUTE_SOCKET 43
38#define SECCLASS_NETLINK_FIREWALL_SOCKET 44
39#define SECCLASS_NETLINK_TCPDIAG_SOCKET 45
40#define SECCLASS_NETLINK_NFLOG_SOCKET 46
41#define SECCLASS_NETLINK_XFRM_SOCKET 47
42#define SECCLASS_NETLINK_SELINUX_SOCKET 48
43#define SECCLASS_NETLINK_AUDIT_SOCKET 49
44#define SECCLASS_NETLINK_IP6FW_SOCKET 50
45#define SECCLASS_NETLINK_DNRT_SOCKET 51
46#define SECCLASS_ASSOCIATION 54
47#define SECCLASS_NETLINK_KOBJECT_UEVENT_SOCKET 55
48#define SECCLASS_APPLETALK_SOCKET 56
49#define SECCLASS_PACKET 57
50#define SECCLASS_KEY 58
51#define SECCLASS_DCCP_SOCKET 60
52#define SECCLASS_MEMPROTECT 61
53#define SECCLASS_PEER 68
54#define SECCLASS_CAPABILITY2 69
55#define SECCLASS_KERNEL_SERVICE 74
56#define SECCLASS_TUN_SOCKET 75
57
58/*
59 * Security identifier indices for initial entities
60 */
61#define SECINITSID_KERNEL 1
62#define SECINITSID_SECURITY 2
63#define SECINITSID_UNLABELED 3
64#define SECINITSID_FS 4
65#define SECINITSID_FILE 5
66#define SECINITSID_FILE_LABELS 6
67#define SECINITSID_INIT 7
68#define SECINITSID_ANY_SOCKET 8
69#define SECINITSID_PORT 9
70#define SECINITSID_NETIF 10
71#define SECINITSID_NETMSG 11
72#define SECINITSID_NODE 12
73#define SECINITSID_IGMP_PACKET 13
74#define SECINITSID_ICMP_SOCKET 14
75#define SECINITSID_TCP_SOCKET 15
76#define SECINITSID_SYSCTL_MODPROBE 16
77#define SECINITSID_SYSCTL 17
78#define SECINITSID_SYSCTL_FS 18
79#define SECINITSID_SYSCTL_KERNEL 19
80#define SECINITSID_SYSCTL_NET 20
81#define SECINITSID_SYSCTL_NET_UNIX 21
82#define SECINITSID_SYSCTL_VM 22
83#define SECINITSID_SYSCTL_DEV 23
84#define SECINITSID_KMOD 24
85#define SECINITSID_POLICY 25
86#define SECINITSID_SCMP_PACKET 26
87#define SECINITSID_DEVNULL 27
88
89#define SECINITSID_NUM 27
90
91#endif
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index ca835795a8b3..2553266ad793 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -97,11 +97,18 @@ struct av_decision {
97#define AVD_FLAGS_PERMISSIVE 0x0001 97#define AVD_FLAGS_PERMISSIVE 0x0001
98 98
99int security_compute_av(u32 ssid, u32 tsid, 99int security_compute_av(u32 ssid, u32 tsid,
100 u16 tclass, u32 requested, 100 u16 tclass, u32 requested,
101 struct av_decision *avd); 101 struct av_decision *avd);
102
103int security_compute_av_user(u32 ssid, u32 tsid,
104 u16 tclass, u32 requested,
105 struct av_decision *avd);
102 106
103int security_transition_sid(u32 ssid, u32 tsid, 107int security_transition_sid(u32 ssid, u32 tsid,
104 u16 tclass, u32 *out_sid); 108 u16 tclass, u32 *out_sid);
109
110int security_transition_sid_user(u32 ssid, u32 tsid,
111 u16 tclass, u32 *out_sid);
105 112
106int security_member_sid(u32 ssid, u32 tsid, 113int security_member_sid(u32 ssid, u32 tsid,
107 u16 tclass, u32 *out_sid); 114 u16 tclass, u32 *out_sid);
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index b4fc506e7a87..fab36fdf2769 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -522,7 +522,7 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
522 if (length < 0) 522 if (length < 0)
523 goto out2; 523 goto out2;
524 524
525 length = security_compute_av(ssid, tsid, tclass, req, &avd); 525 length = security_compute_av_user(ssid, tsid, tclass, req, &avd);
526 if (length < 0) 526 if (length < 0)
527 goto out2; 527 goto out2;
528 528
@@ -571,7 +571,7 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
571 if (length < 0) 571 if (length < 0)
572 goto out2; 572 goto out2;
573 573
574 length = security_transition_sid(ssid, tsid, tclass, &newsid); 574 length = security_transition_sid_user(ssid, tsid, tclass, &newsid);
575 if (length < 0) 575 if (length < 0)
576 goto out2; 576 goto out2;
577 577
diff --git a/security/selinux/ss/Makefile b/security/selinux/ss/Makefile
index bad78779b9b0..15d4e62917de 100644
--- a/security/selinux/ss/Makefile
+++ b/security/selinux/ss/Makefile
@@ -2,7 +2,7 @@
2# Makefile for building the SELinux security server as part of the kernel tree. 2# Makefile for building the SELinux security server as part of the kernel tree.
3# 3#
4 4
5EXTRA_CFLAGS += -Isecurity/selinux/include 5EXTRA_CFLAGS += -Isecurity/selinux -Isecurity/selinux/include
6obj-y := ss.o 6obj-y := ss.o
7 7
8ss-y := ebitmap.o hashtab.o symtab.o sidtab.o avtab.o policydb.o services.o conditional.o mls.o 8ss-y := ebitmap.o hashtab.o symtab.o sidtab.o avtab.o policydb.o services.o conditional.o mls.o
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index b5407f16c2a4..3f2b2706b5bb 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -532,7 +532,7 @@ int mls_compute_sid(struct context *scontext,
532 } 532 }
533 /* Fallthrough */ 533 /* Fallthrough */
534 case AVTAB_CHANGE: 534 case AVTAB_CHANGE:
535 if (tclass == SECCLASS_PROCESS) 535 if (tclass == policydb.process_class)
536 /* Use the process MLS attributes. */ 536 /* Use the process MLS attributes. */
537 return mls_context_cpy(newcontext, scontext); 537 return mls_context_cpy(newcontext, scontext);
538 else 538 else
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 72e4a54973aa..f03667213ea8 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -713,7 +713,6 @@ void policydb_destroy(struct policydb *p)
713 ebitmap_destroy(&p->type_attr_map[i]); 713 ebitmap_destroy(&p->type_attr_map[i]);
714 } 714 }
715 kfree(p->type_attr_map); 715 kfree(p->type_attr_map);
716 kfree(p->undefined_perms);
717 ebitmap_destroy(&p->policycaps); 716 ebitmap_destroy(&p->policycaps);
718 ebitmap_destroy(&p->permissive_map); 717 ebitmap_destroy(&p->permissive_map);
719 718
@@ -1640,6 +1639,40 @@ static int policydb_bounds_sanity_check(struct policydb *p)
1640 1639
1641extern int ss_initialized; 1640extern int ss_initialized;
1642 1641
1642u16 string_to_security_class(struct policydb *p, const char *name)
1643{
1644 struct class_datum *cladatum;
1645
1646 cladatum = hashtab_search(p->p_classes.table, name);
1647 if (!cladatum)
1648 return 0;
1649
1650 return cladatum->value;
1651}
1652
1653u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name)
1654{
1655 struct class_datum *cladatum;
1656 struct perm_datum *perdatum = NULL;
1657 struct common_datum *comdatum;
1658
1659 if (!tclass || tclass > p->p_classes.nprim)
1660 return 0;
1661
1662 cladatum = p->class_val_to_struct[tclass-1];
1663 comdatum = cladatum->comdatum;
1664 if (comdatum)
1665 perdatum = hashtab_search(comdatum->permissions.table,
1666 name);
1667 if (!perdatum)
1668 perdatum = hashtab_search(cladatum->permissions.table,
1669 name);
1670 if (!perdatum)
1671 return 0;
1672
1673 return 1U << (perdatum->value-1);
1674}
1675
1643/* 1676/*
1644 * Read the configuration data from a policy database binary 1677 * Read the configuration data from a policy database binary
1645 * representation file into a policy database structure. 1678 * representation file into a policy database structure.
@@ -1861,6 +1894,16 @@ int policydb_read(struct policydb *p, void *fp)
1861 if (rc) 1894 if (rc)
1862 goto bad; 1895 goto bad;
1863 1896
1897 p->process_class = string_to_security_class(p, "process");
1898 if (!p->process_class)
1899 goto bad;
1900 p->process_trans_perms = string_to_av_perm(p, p->process_class,
1901 "transition");
1902 p->process_trans_perms |= string_to_av_perm(p, p->process_class,
1903 "dyntransition");
1904 if (!p->process_trans_perms)
1905 goto bad;
1906
1864 for (i = 0; i < info->ocon_num; i++) { 1907 for (i = 0; i < info->ocon_num; i++) {
1865 rc = next_entry(buf, fp, sizeof(u32)); 1908 rc = next_entry(buf, fp, sizeof(u32));
1866 if (rc < 0) 1909 if (rc < 0)
@@ -2101,7 +2144,7 @@ int policydb_read(struct policydb *p, void *fp)
2101 goto bad; 2144 goto bad;
2102 rt->target_class = le32_to_cpu(buf[0]); 2145 rt->target_class = le32_to_cpu(buf[0]);
2103 } else 2146 } else
2104 rt->target_class = SECCLASS_PROCESS; 2147 rt->target_class = p->process_class;
2105 if (!policydb_type_isvalid(p, rt->source_type) || 2148 if (!policydb_type_isvalid(p, rt->source_type) ||
2106 !policydb_type_isvalid(p, rt->target_type) || 2149 !policydb_type_isvalid(p, rt->target_type) ||
2107 !policydb_class_isvalid(p, rt->target_class)) { 2150 !policydb_class_isvalid(p, rt->target_class)) {
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index 55152d498b53..cdcc5700946f 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -254,7 +254,9 @@ struct policydb {
254 254
255 unsigned int reject_unknown : 1; 255 unsigned int reject_unknown : 1;
256 unsigned int allow_unknown : 1; 256 unsigned int allow_unknown : 1;
257 u32 *undefined_perms; 257
258 u16 process_class;
259 u32 process_trans_perms;
258}; 260};
259 261
260extern void policydb_destroy(struct policydb *p); 262extern void policydb_destroy(struct policydb *p);
@@ -295,5 +297,8 @@ static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes)
295 return 0; 297 return 0;
296} 298}
297 299
300extern u16 string_to_security_class(struct policydb *p, const char *name);
301extern u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name);
302
298#endif /* _SS_POLICYDB_H_ */ 303#endif /* _SS_POLICYDB_H_ */
299 304
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index ff17820d35ec..d6bb20cbad62 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -65,16 +65,10 @@
65#include "audit.h" 65#include "audit.h"
66 66
67extern void selnl_notify_policyload(u32 seqno); 67extern void selnl_notify_policyload(u32 seqno);
68unsigned int policydb_loaded_version;
69 68
70int selinux_policycap_netpeer; 69int selinux_policycap_netpeer;
71int selinux_policycap_openperm; 70int selinux_policycap_openperm;
72 71
73/*
74 * This is declared in avc.c
75 */
76extern const struct selinux_class_perm selinux_class_perm;
77
78static DEFINE_RWLOCK(policy_rwlock); 72static DEFINE_RWLOCK(policy_rwlock);
79 73
80static struct sidtab sidtab; 74static struct sidtab sidtab;
@@ -98,6 +92,165 @@ static int context_struct_compute_av(struct context *scontext,
98 u16 tclass, 92 u16 tclass,
99 u32 requested, 93 u32 requested,
100 struct av_decision *avd); 94 struct av_decision *avd);
95
96struct selinux_mapping {
97 u16 value; /* policy value */
98 unsigned num_perms;
99 u32 perms[sizeof(u32) * 8];
100};
101
102static struct selinux_mapping *current_mapping;
103static u16 current_mapping_size;
104
105static int selinux_set_mapping(struct policydb *pol,
106 struct security_class_mapping *map,
107 struct selinux_mapping **out_map_p,
108 u16 *out_map_size)
109{
110 struct selinux_mapping *out_map = NULL;
111 size_t size = sizeof(struct selinux_mapping);
112 u16 i, j;
113 unsigned k;
114 bool print_unknown_handle = false;
115
116 /* Find number of classes in the input mapping */
117 if (!map)
118 return -EINVAL;
119 i = 0;
120 while (map[i].name)
121 i++;
122
123 /* Allocate space for the class records, plus one for class zero */
124 out_map = kcalloc(++i, size, GFP_ATOMIC);
125 if (!out_map)
126 return -ENOMEM;
127
128 /* Store the raw class and permission values */
129 j = 0;
130 while (map[j].name) {
131 struct security_class_mapping *p_in = map + (j++);
132 struct selinux_mapping *p_out = out_map + j;
133
134 /* An empty class string skips ahead */
135 if (!strcmp(p_in->name, "")) {
136 p_out->num_perms = 0;
137 continue;
138 }
139
140 p_out->value = string_to_security_class(pol, p_in->name);
141 if (!p_out->value) {
142 printk(KERN_INFO
143 "SELinux: Class %s not defined in policy.\n",
144 p_in->name);
145 if (pol->reject_unknown)
146 goto err;
147 p_out->num_perms = 0;
148 print_unknown_handle = true;
149 continue;
150 }
151
152 k = 0;
153 while (p_in->perms && p_in->perms[k]) {
154 /* An empty permission string skips ahead */
155 if (!*p_in->perms[k]) {
156 k++;
157 continue;
158 }
159 p_out->perms[k] = string_to_av_perm(pol, p_out->value,
160 p_in->perms[k]);
161 if (!p_out->perms[k]) {
162 printk(KERN_INFO
163 "SELinux: Permission %s in class %s not defined in policy.\n",
164 p_in->perms[k], p_in->name);
165 if (pol->reject_unknown)
166 goto err;
167 print_unknown_handle = true;
168 }
169
170 k++;
171 }
172 p_out->num_perms = k;
173 }
174
175 if (print_unknown_handle)
176 printk(KERN_INFO "SELinux: the above unknown classes and permissions will be %s\n",
177 pol->allow_unknown ? "allowed" : "denied");
178
179 *out_map_p = out_map;
180 *out_map_size = i;
181 return 0;
182err:
183 kfree(out_map);
184 return -EINVAL;
185}
186
187/*
188 * Get real, policy values from mapped values
189 */
190
191static u16 unmap_class(u16 tclass)
192{
193 if (tclass < current_mapping_size)
194 return current_mapping[tclass].value;
195
196 return tclass;
197}
198
199static u32 unmap_perm(u16 tclass, u32 tperm)
200{
201 if (tclass < current_mapping_size) {
202 unsigned i;
203 u32 kperm = 0;
204
205 for (i = 0; i < current_mapping[tclass].num_perms; i++)
206 if (tperm & (1<<i)) {
207 kperm |= current_mapping[tclass].perms[i];
208 tperm &= ~(1<<i);
209 }
210 return kperm;
211 }
212
213 return tperm;
214}
215
216static void map_decision(u16 tclass, struct av_decision *avd,
217 int allow_unknown)
218{
219 if (tclass < current_mapping_size) {
220 unsigned i, n = current_mapping[tclass].num_perms;
221 u32 result;
222
223 for (i = 0, result = 0; i < n; i++) {
224 if (avd->allowed & current_mapping[tclass].perms[i])
225 result |= 1<<i;
226 if (allow_unknown && !current_mapping[tclass].perms[i])
227 result |= 1<<i;
228 }
229 avd->allowed = result;
230
231 for (i = 0, result = 0; i < n; i++)
232 if (avd->auditallow & current_mapping[tclass].perms[i])
233 result |= 1<<i;
234 avd->auditallow = result;
235
236 for (i = 0, result = 0; i < n; i++) {
237 if (avd->auditdeny & current_mapping[tclass].perms[i])
238 result |= 1<<i;
239 if (!allow_unknown && !current_mapping[tclass].perms[i])
240 result |= 1<<i;
241 }
242 /*
243 * In case the kernel has a bug and requests a permission
244 * between num_perms and the maximum permission number, we
245 * should audit that denial
246 */
247 for (; i < (sizeof(u32)*8); i++)
248 result |= 1<<i;
249 avd->auditdeny = result;
250 }
251}
252
253
101/* 254/*
102 * Return the boolean value of a constraint expression 255 * Return the boolean value of a constraint expression
103 * when it is applied to the specified source and target 256 * when it is applied to the specified source and target
@@ -467,21 +620,9 @@ static int context_struct_compute_av(struct context *scontext,
467 struct class_datum *tclass_datum; 620 struct class_datum *tclass_datum;
468 struct ebitmap *sattr, *tattr; 621 struct ebitmap *sattr, *tattr;
469 struct ebitmap_node *snode, *tnode; 622 struct ebitmap_node *snode, *tnode;
470 const struct selinux_class_perm *kdefs = &selinux_class_perm;
471 unsigned int i, j; 623 unsigned int i, j;
472 624
473 /* 625 /*
474 * Remap extended Netlink classes for old policy versions.
475 * Do this here rather than socket_type_to_security_class()
476 * in case a newer policy version is loaded, allowing sockets
477 * to remain in the correct class.
478 */
479 if (policydb_loaded_version < POLICYDB_VERSION_NLCLASS)
480 if (tclass >= SECCLASS_NETLINK_ROUTE_SOCKET &&
481 tclass <= SECCLASS_NETLINK_DNRT_SOCKET)
482 tclass = SECCLASS_NETLINK_SOCKET;
483
484 /*
485 * Initialize the access vectors to the default values. 626 * Initialize the access vectors to the default values.
486 */ 627 */
487 avd->allowed = 0; 628 avd->allowed = 0;
@@ -490,33 +631,11 @@ static int context_struct_compute_av(struct context *scontext,
490 avd->seqno = latest_granting; 631 avd->seqno = latest_granting;
491 avd->flags = 0; 632 avd->flags = 0;
492 633
493 /* 634 if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) {
494 * Check for all the invalid cases. 635 if (printk_ratelimit())
495 * - tclass 0 636 printk(KERN_WARNING "SELinux: Invalid class %hu\n", tclass);
496 * - tclass > policy and > kernel 637 return -EINVAL;
497 * - tclass > policy but is a userspace class 638 }
498 * - tclass > policy but we do not allow unknowns
499 */
500 if (unlikely(!tclass))
501 goto inval_class;
502 if (unlikely(tclass > policydb.p_classes.nprim))
503 if (tclass > kdefs->cts_len ||
504 !kdefs->class_to_string[tclass] ||
505 !policydb.allow_unknown)
506 goto inval_class;
507
508 /*
509 * Kernel class and we allow unknown so pad the allow decision
510 * the pad will be all 1 for unknown classes.
511 */
512 if (tclass <= kdefs->cts_len && policydb.allow_unknown)
513 avd->allowed = policydb.undefined_perms[tclass - 1];
514
515 /*
516 * Not in policy. Since decision is completed (all 1 or all 0) return.
517 */
518 if (unlikely(tclass > policydb.p_classes.nprim))
519 return 0;
520 639
521 tclass_datum = policydb.class_val_to_struct[tclass - 1]; 640 tclass_datum = policydb.class_val_to_struct[tclass - 1];
522 641
@@ -568,8 +687,8 @@ static int context_struct_compute_av(struct context *scontext,
568 * role is changing, then check the (current_role, new_role) 687 * role is changing, then check the (current_role, new_role)
569 * pair. 688 * pair.
570 */ 689 */
571 if (tclass == SECCLASS_PROCESS && 690 if (tclass == policydb.process_class &&
572 (avd->allowed & (PROCESS__TRANSITION | PROCESS__DYNTRANSITION)) && 691 (avd->allowed & policydb.process_trans_perms) &&
573 scontext->role != tcontext->role) { 692 scontext->role != tcontext->role) {
574 for (ra = policydb.role_allow; ra; ra = ra->next) { 693 for (ra = policydb.role_allow; ra; ra = ra->next) {
575 if (scontext->role == ra->role && 694 if (scontext->role == ra->role &&
@@ -577,8 +696,7 @@ static int context_struct_compute_av(struct context *scontext,
577 break; 696 break;
578 } 697 }
579 if (!ra) 698 if (!ra)
580 avd->allowed &= ~(PROCESS__TRANSITION | 699 avd->allowed &= ~policydb.process_trans_perms;
581 PROCESS__DYNTRANSITION);
582 } 700 }
583 701
584 /* 702 /*
@@ -590,21 +708,6 @@ static int context_struct_compute_av(struct context *scontext,
590 tclass, requested, avd); 708 tclass, requested, avd);
591 709
592 return 0; 710 return 0;
593
594inval_class:
595 if (!tclass || tclass > kdefs->cts_len ||
596 !kdefs->class_to_string[tclass]) {
597 if (printk_ratelimit())
598 printk(KERN_ERR "SELinux: %s: unrecognized class %d\n",
599 __func__, tclass);
600 return -EINVAL;
601 }
602
603 /*
604 * Known to the kernel, but not to the policy.
605 * Handle as a denial (allowed is 0).
606 */
607 return 0;
608} 711}
609 712
610static int security_validtrans_handle_fail(struct context *ocontext, 713static int security_validtrans_handle_fail(struct context *ocontext,
@@ -636,13 +739,14 @@ out:
636} 739}
637 740
638int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid, 741int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
639 u16 tclass) 742 u16 orig_tclass)
640{ 743{
641 struct context *ocontext; 744 struct context *ocontext;
642 struct context *ncontext; 745 struct context *ncontext;
643 struct context *tcontext; 746 struct context *tcontext;
644 struct class_datum *tclass_datum; 747 struct class_datum *tclass_datum;
645 struct constraint_node *constraint; 748 struct constraint_node *constraint;
749 u16 tclass;
646 int rc = 0; 750 int rc = 0;
647 751
648 if (!ss_initialized) 752 if (!ss_initialized)
@@ -650,16 +754,7 @@ int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
650 754
651 read_lock(&policy_rwlock); 755 read_lock(&policy_rwlock);
652 756
653 /* 757 tclass = unmap_class(orig_tclass);
654 * Remap extended Netlink classes for old policy versions.
655 * Do this here rather than socket_type_to_security_class()
656 * in case a newer policy version is loaded, allowing sockets
657 * to remain in the correct class.
658 */
659 if (policydb_loaded_version < POLICYDB_VERSION_NLCLASS)
660 if (tclass >= SECCLASS_NETLINK_ROUTE_SOCKET &&
661 tclass <= SECCLASS_NETLINK_DNRT_SOCKET)
662 tclass = SECCLASS_NETLINK_SOCKET;
663 758
664 if (!tclass || tclass > policydb.p_classes.nprim) { 759 if (!tclass || tclass > policydb.p_classes.nprim) {
665 printk(KERN_ERR "SELinux: %s: unrecognized class %d\n", 760 printk(KERN_ERR "SELinux: %s: unrecognized class %d\n",
@@ -792,6 +887,38 @@ out:
792} 887}
793 888
794 889
890static int security_compute_av_core(u32 ssid,
891 u32 tsid,
892 u16 tclass,
893 u32 requested,
894 struct av_decision *avd)
895{
896 struct context *scontext = NULL, *tcontext = NULL;
897 int rc = 0;
898
899 scontext = sidtab_search(&sidtab, ssid);
900 if (!scontext) {
901 printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
902 __func__, ssid);
903 return -EINVAL;
904 }
905 tcontext = sidtab_search(&sidtab, tsid);
906 if (!tcontext) {
907 printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
908 __func__, tsid);
909 return -EINVAL;
910 }
911
912 rc = context_struct_compute_av(scontext, tcontext, tclass,
913 requested, avd);
914
915 /* permissive domain? */
916 if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
917 avd->flags |= AVD_FLAGS_PERMISSIVE;
918
919 return rc;
920}
921
795/** 922/**
796 * security_compute_av - Compute access vector decisions. 923 * security_compute_av - Compute access vector decisions.
797 * @ssid: source security identifier 924 * @ssid: source security identifier
@@ -807,12 +934,49 @@ out:
807 */ 934 */
808int security_compute_av(u32 ssid, 935int security_compute_av(u32 ssid,
809 u32 tsid, 936 u32 tsid,
810 u16 tclass, 937 u16 orig_tclass,
811 u32 requested, 938 u32 orig_requested,
812 struct av_decision *avd) 939 struct av_decision *avd)
813{ 940{
814 struct context *scontext = NULL, *tcontext = NULL; 941 u16 tclass;
815 int rc = 0; 942 u32 requested;
943 int rc;
944
945 read_lock(&policy_rwlock);
946
947 if (!ss_initialized)
948 goto allow;
949
950 requested = unmap_perm(orig_tclass, orig_requested);
951 tclass = unmap_class(orig_tclass);
952 if (unlikely(orig_tclass && !tclass)) {
953 if (policydb.allow_unknown)
954 goto allow;
955 rc = -EINVAL;
956 goto out;
957 }
958 rc = security_compute_av_core(ssid, tsid, tclass, requested, avd);
959 map_decision(orig_tclass, avd, policydb.allow_unknown);
960out:
961 read_unlock(&policy_rwlock);
962 return rc;
963allow:
964 avd->allowed = 0xffffffff;
965 avd->auditallow = 0;
966 avd->auditdeny = 0xffffffff;
967 avd->seqno = latest_granting;
968 avd->flags = 0;
969 rc = 0;
970 goto out;
971}
972
973int security_compute_av_user(u32 ssid,
974 u32 tsid,
975 u16 tclass,
976 u32 requested,
977 struct av_decision *avd)
978{
979 int rc;
816 980
817 if (!ss_initialized) { 981 if (!ss_initialized) {
818 avd->allowed = 0xffffffff; 982 avd->allowed = 0xffffffff;
@@ -823,29 +987,7 @@ int security_compute_av(u32 ssid,
823 } 987 }
824 988
825 read_lock(&policy_rwlock); 989 read_lock(&policy_rwlock);
826 990 rc = security_compute_av_core(ssid, tsid, tclass, requested, avd);
827 scontext = sidtab_search(&sidtab, ssid);
828 if (!scontext) {
829 printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
830 __func__, ssid);
831 rc = -EINVAL;
832 goto out;
833 }
834 tcontext = sidtab_search(&sidtab, tsid);
835 if (!tcontext) {
836 printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
837 __func__, tsid);
838 rc = -EINVAL;
839 goto out;
840 }
841
842 rc = context_struct_compute_av(scontext, tcontext, tclass,
843 requested, avd);
844
845 /* permissive domain? */
846 if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
847 avd->flags |= AVD_FLAGS_PERMISSIVE;
848out:
849 read_unlock(&policy_rwlock); 991 read_unlock(&policy_rwlock);
850 return rc; 992 return rc;
851} 993}
@@ -1204,20 +1346,22 @@ out:
1204 1346
1205static int security_compute_sid(u32 ssid, 1347static int security_compute_sid(u32 ssid,
1206 u32 tsid, 1348 u32 tsid,
1207 u16 tclass, 1349 u16 orig_tclass,
1208 u32 specified, 1350 u32 specified,
1209 u32 *out_sid) 1351 u32 *out_sid,
1352 bool kern)
1210{ 1353{
1211 struct context *scontext = NULL, *tcontext = NULL, newcontext; 1354 struct context *scontext = NULL, *tcontext = NULL, newcontext;
1212 struct role_trans *roletr = NULL; 1355 struct role_trans *roletr = NULL;
1213 struct avtab_key avkey; 1356 struct avtab_key avkey;
1214 struct avtab_datum *avdatum; 1357 struct avtab_datum *avdatum;
1215 struct avtab_node *node; 1358 struct avtab_node *node;
1359 u16 tclass;
1216 int rc = 0; 1360 int rc = 0;
1217 1361
1218 if (!ss_initialized) { 1362 if (!ss_initialized) {
1219 switch (tclass) { 1363 switch (orig_tclass) {
1220 case SECCLASS_PROCESS: 1364 case SECCLASS_PROCESS: /* kernel value */
1221 *out_sid = ssid; 1365 *out_sid = ssid;
1222 break; 1366 break;
1223 default: 1367 default:
@@ -1231,6 +1375,11 @@ static int security_compute_sid(u32 ssid,
1231 1375
1232 read_lock(&policy_rwlock); 1376 read_lock(&policy_rwlock);
1233 1377
1378 if (kern)
1379 tclass = unmap_class(orig_tclass);
1380 else
1381 tclass = orig_tclass;
1382
1234 scontext = sidtab_search(&sidtab, ssid); 1383 scontext = sidtab_search(&sidtab, ssid);
1235 if (!scontext) { 1384 if (!scontext) {
1236 printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", 1385 printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
@@ -1260,13 +1409,11 @@ static int security_compute_sid(u32 ssid,
1260 } 1409 }
1261 1410
1262 /* Set the role and type to default values. */ 1411 /* Set the role and type to default values. */
1263 switch (tclass) { 1412 if (tclass == policydb.process_class) {
1264 case SECCLASS_PROCESS:
1265 /* Use the current role and type of process. */ 1413 /* Use the current role and type of process. */
1266 newcontext.role = scontext->role; 1414 newcontext.role = scontext->role;
1267 newcontext.type = scontext->type; 1415 newcontext.type = scontext->type;
1268 break; 1416 } else {
1269 default:
1270 /* Use the well-defined object role. */ 1417 /* Use the well-defined object role. */
1271 newcontext.role = OBJECT_R_VAL; 1418 newcontext.role = OBJECT_R_VAL;
1272 /* Use the type of the related object. */ 1419 /* Use the type of the related object. */
@@ -1297,8 +1444,7 @@ static int security_compute_sid(u32 ssid,
1297 } 1444 }
1298 1445
1299 /* Check for class-specific changes. */ 1446 /* Check for class-specific changes. */
1300 switch (tclass) { 1447 if (tclass == policydb.process_class) {
1301 case SECCLASS_PROCESS:
1302 if (specified & AVTAB_TRANSITION) { 1448 if (specified & AVTAB_TRANSITION) {
1303 /* Look for a role transition rule. */ 1449 /* Look for a role transition rule. */
1304 for (roletr = policydb.role_tr; roletr; 1450 for (roletr = policydb.role_tr; roletr;
@@ -1311,9 +1457,6 @@ static int security_compute_sid(u32 ssid,
1311 } 1457 }
1312 } 1458 }
1313 } 1459 }
1314 break;
1315 default:
1316 break;
1317 } 1460 }
1318 1461
1319 /* Set the MLS attributes. 1462 /* Set the MLS attributes.
@@ -1358,7 +1501,17 @@ int security_transition_sid(u32 ssid,
1358 u16 tclass, 1501 u16 tclass,
1359 u32 *out_sid) 1502 u32 *out_sid)
1360{ 1503{
1361 return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION, out_sid); 1504 return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION,
1505 out_sid, true);
1506}
1507
1508int security_transition_sid_user(u32 ssid,
1509 u32 tsid,
1510 u16 tclass,
1511 u32 *out_sid)
1512{
1513 return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION,
1514 out_sid, false);
1362} 1515}
1363 1516
1364/** 1517/**
@@ -1379,7 +1532,8 @@ int security_member_sid(u32 ssid,
1379 u16 tclass, 1532 u16 tclass,
1380 u32 *out_sid) 1533 u32 *out_sid)
1381{ 1534{
1382 return security_compute_sid(ssid, tsid, tclass, AVTAB_MEMBER, out_sid); 1535 return security_compute_sid(ssid, tsid, tclass, AVTAB_MEMBER, out_sid,
1536 false);
1383} 1537}
1384 1538
1385/** 1539/**
@@ -1400,144 +1554,8 @@ int security_change_sid(u32 ssid,
1400 u16 tclass, 1554 u16 tclass,
1401 u32 *out_sid) 1555 u32 *out_sid)
1402{ 1556{
1403 return security_compute_sid(ssid, tsid, tclass, AVTAB_CHANGE, out_sid); 1557 return security_compute_sid(ssid, tsid, tclass, AVTAB_CHANGE, out_sid,
1404} 1558 false);
1405
1406/*
1407 * Verify that each kernel class that is defined in the
1408 * policy is correct
1409 */
1410static int validate_classes(struct policydb *p)
1411{
1412 int i, j;
1413 struct class_datum *cladatum;
1414 struct perm_datum *perdatum;
1415 u32 nprim, tmp, common_pts_len, perm_val, pol_val;
1416 u16 class_val;
1417 const struct selinux_class_perm *kdefs = &selinux_class_perm;
1418 const char *def_class, *def_perm, *pol_class;
1419 struct symtab *perms;
1420 bool print_unknown_handle = 0;
1421
1422 if (p->allow_unknown) {
1423 u32 num_classes = kdefs->cts_len;
1424 p->undefined_perms = kcalloc(num_classes, sizeof(u32), GFP_KERNEL);
1425 if (!p->undefined_perms)
1426 return -ENOMEM;
1427 }
1428
1429 for (i = 1; i < kdefs->cts_len; i++) {
1430 def_class = kdefs->class_to_string[i];
1431 if (!def_class)
1432 continue;
1433 if (i > p->p_classes.nprim) {
1434 printk(KERN_INFO
1435 "SELinux: class %s not defined in policy\n",
1436 def_class);
1437 if (p->reject_unknown)
1438 return -EINVAL;
1439 if (p->allow_unknown)
1440 p->undefined_perms[i-1] = ~0U;
1441 print_unknown_handle = 1;
1442 continue;
1443 }
1444 pol_class = p->p_class_val_to_name[i-1];
1445 if (strcmp(pol_class, def_class)) {
1446 printk(KERN_ERR
1447 "SELinux: class %d is incorrect, found %s but should be %s\n",
1448 i, pol_class, def_class);
1449 return -EINVAL;
1450 }
1451 }
1452 for (i = 0; i < kdefs->av_pts_len; i++) {
1453 class_val = kdefs->av_perm_to_string[i].tclass;
1454 perm_val = kdefs->av_perm_to_string[i].value;
1455 def_perm = kdefs->av_perm_to_string[i].name;
1456 if (class_val > p->p_classes.nprim)
1457 continue;
1458 pol_class = p->p_class_val_to_name[class_val-1];
1459 cladatum = hashtab_search(p->p_classes.table, pol_class);
1460 BUG_ON(!cladatum);
1461 perms = &cladatum->permissions;
1462 nprim = 1 << (perms->nprim - 1);
1463 if (perm_val > nprim) {
1464 printk(KERN_INFO
1465 "SELinux: permission %s in class %s not defined in policy\n",
1466 def_perm, pol_class);
1467 if (p->reject_unknown)
1468 return -EINVAL;
1469 if (p->allow_unknown)
1470 p->undefined_perms[class_val-1] |= perm_val;
1471 print_unknown_handle = 1;
1472 continue;
1473 }
1474 perdatum = hashtab_search(perms->table, def_perm);
1475 if (perdatum == NULL) {
1476 printk(KERN_ERR
1477 "SELinux: permission %s in class %s not found in policy, bad policy\n",
1478 def_perm, pol_class);
1479 return -EINVAL;
1480 }
1481 pol_val = 1 << (perdatum->value - 1);
1482 if (pol_val != perm_val) {
1483 printk(KERN_ERR
1484 "SELinux: permission %s in class %s has incorrect value\n",
1485 def_perm, pol_class);
1486 return -EINVAL;
1487 }
1488 }
1489 for (i = 0; i < kdefs->av_inherit_len; i++) {
1490 class_val = kdefs->av_inherit[i].tclass;
1491 if (class_val > p->p_classes.nprim)
1492 continue;
1493 pol_class = p->p_class_val_to_name[class_val-1];
1494 cladatum = hashtab_search(p->p_classes.table, pol_class);
1495 BUG_ON(!cladatum);
1496 if (!cladatum->comdatum) {
1497 printk(KERN_ERR
1498 "SELinux: class %s should have an inherits clause but does not\n",
1499 pol_class);
1500 return -EINVAL;
1501 }
1502 tmp = kdefs->av_inherit[i].common_base;
1503 common_pts_len = 0;
1504 while (!(tmp & 0x01)) {
1505 common_pts_len++;
1506 tmp >>= 1;
1507 }
1508 perms = &cladatum->comdatum->permissions;
1509 for (j = 0; j < common_pts_len; j++) {
1510 def_perm = kdefs->av_inherit[i].common_pts[j];
1511 if (j >= perms->nprim) {
1512 printk(KERN_INFO
1513 "SELinux: permission %s in class %s not defined in policy\n",
1514 def_perm, pol_class);
1515 if (p->reject_unknown)
1516 return -EINVAL;
1517 if (p->allow_unknown)
1518 p->undefined_perms[class_val-1] |= (1 << j);
1519 print_unknown_handle = 1;
1520 continue;
1521 }
1522 perdatum = hashtab_search(perms->table, def_perm);
1523 if (perdatum == NULL) {
1524 printk(KERN_ERR
1525 "SELinux: permission %s in class %s not found in policy, bad policy\n",
1526 def_perm, pol_class);
1527 return -EINVAL;
1528 }
1529 if (perdatum->value != j + 1) {
1530 printk(KERN_ERR
1531 "SELinux: permission %s in class %s has incorrect value\n",
1532 def_perm, pol_class);
1533 return -EINVAL;
1534 }
1535 }
1536 }
1537 if (print_unknown_handle)
1538 printk(KERN_INFO "SELinux: the above unknown classes and permissions will be %s\n",
1539 (security_get_allow_unknown() ? "allowed" : "denied"));
1540 return 0;
1541} 1559}
1542 1560
1543/* Clone the SID into the new SID table. */ 1561/* Clone the SID into the new SID table. */
@@ -1710,8 +1728,10 @@ int security_load_policy(void *data, size_t len)
1710{ 1728{
1711 struct policydb oldpolicydb, newpolicydb; 1729 struct policydb oldpolicydb, newpolicydb;
1712 struct sidtab oldsidtab, newsidtab; 1730 struct sidtab oldsidtab, newsidtab;
1731 struct selinux_mapping *oldmap, *map = NULL;
1713 struct convert_context_args args; 1732 struct convert_context_args args;
1714 u32 seqno; 1733 u32 seqno;
1734 u16 map_size;
1715 int rc = 0; 1735 int rc = 0;
1716 struct policy_file file = { data, len }, *fp = &file; 1736 struct policy_file file = { data, len }, *fp = &file;
1717 1737
@@ -1721,22 +1741,19 @@ int security_load_policy(void *data, size_t len)
1721 avtab_cache_destroy(); 1741 avtab_cache_destroy();
1722 return -EINVAL; 1742 return -EINVAL;
1723 } 1743 }
1724 if (policydb_load_isids(&policydb, &sidtab)) { 1744 if (selinux_set_mapping(&policydb, secclass_map,
1745 &current_mapping,
1746 &current_mapping_size)) {
1725 policydb_destroy(&policydb); 1747 policydb_destroy(&policydb);
1726 avtab_cache_destroy(); 1748 avtab_cache_destroy();
1727 return -EINVAL; 1749 return -EINVAL;
1728 } 1750 }
1729 /* Verify that the kernel defined classes are correct. */ 1751 if (policydb_load_isids(&policydb, &sidtab)) {
1730 if (validate_classes(&policydb)) {
1731 printk(KERN_ERR
1732 "SELinux: the definition of a class is incorrect\n");
1733 sidtab_destroy(&sidtab);
1734 policydb_destroy(&policydb); 1752 policydb_destroy(&policydb);
1735 avtab_cache_destroy(); 1753 avtab_cache_destroy();
1736 return -EINVAL; 1754 return -EINVAL;
1737 } 1755 }
1738 security_load_policycaps(); 1756 security_load_policycaps();
1739 policydb_loaded_version = policydb.policyvers;
1740 ss_initialized = 1; 1757 ss_initialized = 1;
1741 seqno = ++latest_granting; 1758 seqno = ++latest_granting;
1742 selinux_complete_init(); 1759 selinux_complete_init();
@@ -1759,13 +1776,9 @@ int security_load_policy(void *data, size_t len)
1759 return -ENOMEM; 1776 return -ENOMEM;
1760 } 1777 }
1761 1778
1762 /* Verify that the kernel defined classes are correct. */ 1779 if (selinux_set_mapping(&newpolicydb, secclass_map,
1763 if (validate_classes(&newpolicydb)) { 1780 &map, &map_size))
1764 printk(KERN_ERR
1765 "SELinux: the definition of a class is incorrect\n");
1766 rc = -EINVAL;
1767 goto err; 1781 goto err;
1768 }
1769 1782
1770 rc = security_preserve_bools(&newpolicydb); 1783 rc = security_preserve_bools(&newpolicydb);
1771 if (rc) { 1784 if (rc) {
@@ -1799,13 +1812,16 @@ int security_load_policy(void *data, size_t len)
1799 memcpy(&policydb, &newpolicydb, sizeof policydb); 1812 memcpy(&policydb, &newpolicydb, sizeof policydb);
1800 sidtab_set(&sidtab, &newsidtab); 1813 sidtab_set(&sidtab, &newsidtab);
1801 security_load_policycaps(); 1814 security_load_policycaps();
1815 oldmap = current_mapping;
1816 current_mapping = map;
1817 current_mapping_size = map_size;
1802 seqno = ++latest_granting; 1818 seqno = ++latest_granting;
1803 policydb_loaded_version = policydb.policyvers;
1804 write_unlock_irq(&policy_rwlock); 1819 write_unlock_irq(&policy_rwlock);
1805 1820
1806 /* Free the old policydb and SID table. */ 1821 /* Free the old policydb and SID table. */
1807 policydb_destroy(&oldpolicydb); 1822 policydb_destroy(&oldpolicydb);
1808 sidtab_destroy(&oldsidtab); 1823 sidtab_destroy(&oldsidtab);
1824 kfree(oldmap);
1809 1825
1810 avc_ss_reset(seqno); 1826 avc_ss_reset(seqno);
1811 selnl_notify_policyload(seqno); 1827 selnl_notify_policyload(seqno);
@@ -1815,6 +1831,7 @@ int security_load_policy(void *data, size_t len)
1815 return 0; 1831 return 0;
1816 1832
1817err: 1833err:
1834 kfree(map);
1818 sidtab_destroy(&newsidtab); 1835 sidtab_destroy(&newsidtab);
1819 policydb_destroy(&newpolicydb); 1836 policydb_destroy(&newpolicydb);
1820 return rc; 1837 return rc;
@@ -2091,7 +2108,7 @@ out_unlock:
2091 } 2108 }
2092 for (i = 0, j = 0; i < mynel; i++) { 2109 for (i = 0, j = 0; i < mynel; i++) {
2093 rc = avc_has_perm_noaudit(fromsid, mysids[i], 2110 rc = avc_has_perm_noaudit(fromsid, mysids[i],
2094 SECCLASS_PROCESS, 2111 SECCLASS_PROCESS, /* kernel value */
2095 PROCESS__TRANSITION, AVC_STRICT, 2112 PROCESS__TRANSITION, AVC_STRICT,
2096 NULL); 2113 NULL);
2097 if (!rc) 2114 if (!rc)
@@ -2119,10 +2136,11 @@ out:
2119 */ 2136 */
2120int security_genfs_sid(const char *fstype, 2137int security_genfs_sid(const char *fstype,
2121 char *path, 2138 char *path,
2122 u16 sclass, 2139 u16 orig_sclass,
2123 u32 *sid) 2140 u32 *sid)
2124{ 2141{
2125 int len; 2142 int len;
2143 u16 sclass;
2126 struct genfs *genfs; 2144 struct genfs *genfs;
2127 struct ocontext *c; 2145 struct ocontext *c;
2128 int rc = 0, cmp = 0; 2146 int rc = 0, cmp = 0;
@@ -2132,6 +2150,8 @@ int security_genfs_sid(const char *fstype,
2132 2150
2133 read_lock(&policy_rwlock); 2151 read_lock(&policy_rwlock);
2134 2152
2153 sclass = unmap_class(orig_sclass);
2154
2135 for (genfs = policydb.genfs; genfs; genfs = genfs->next) { 2155 for (genfs = policydb.genfs; genfs; genfs = genfs->next) {
2136 cmp = strcmp(fstype, genfs->fstype); 2156 cmp = strcmp(fstype, genfs->fstype);
2137 if (cmp <= 0) 2157 if (cmp <= 0)
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index 3c8bd8ee0b95..e0d0354008b7 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -187,6 +187,8 @@ bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
187 const s8 pattern_type, const s8 end_type, 187 const s8 pattern_type, const s8 end_type,
188 const char *function) 188 const char *function)
189{ 189{
190 const char *const start = filename;
191 bool in_repetition = false;
190 bool contains_pattern = false; 192 bool contains_pattern = false;
191 unsigned char c; 193 unsigned char c;
192 unsigned char d; 194 unsigned char d;
@@ -212,9 +214,13 @@ bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
212 if (c == '/') 214 if (c == '/')
213 goto out; 215 goto out;
214 } 216 }
215 while ((c = *filename++) != '\0') { 217 while (1) {
218 c = *filename++;
219 if (!c)
220 break;
216 if (c == '\\') { 221 if (c == '\\') {
217 switch ((c = *filename++)) { 222 c = *filename++;
223 switch (c) {
218 case '\\': /* "\\" */ 224 case '\\': /* "\\" */
219 continue; 225 continue;
220 case '$': /* "\$" */ 226 case '$': /* "\$" */
@@ -231,6 +237,22 @@ bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
231 break; /* Must not contain pattern */ 237 break; /* Must not contain pattern */
232 contains_pattern = true; 238 contains_pattern = true;
233 continue; 239 continue;
240 case '{': /* "/\{" */
241 if (filename - 3 < start ||
242 *(filename - 3) != '/')
243 break;
244 if (pattern_type == -1)
245 break; /* Must not contain pattern */
246 contains_pattern = true;
247 in_repetition = true;
248 continue;
249 case '}': /* "\}/" */
250 if (*filename != '/')
251 break;
252 if (!in_repetition)
253 break;
254 in_repetition = false;
255 continue;
234 case '0': /* "\ooo" */ 256 case '0': /* "\ooo" */
235 case '1': 257 case '1':
236 case '2': 258 case '2':
@@ -246,6 +268,8 @@ bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
246 continue; /* pattern is not \000 */ 268 continue; /* pattern is not \000 */
247 } 269 }
248 goto out; 270 goto out;
271 } else if (in_repetition && c == '/') {
272 goto out;
249 } else if (tomoyo_is_invalid(c)) { 273 } else if (tomoyo_is_invalid(c)) {
250 goto out; 274 goto out;
251 } 275 }
@@ -254,6 +278,8 @@ bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
254 if (!contains_pattern) 278 if (!contains_pattern)
255 goto out; 279 goto out;
256 } 280 }
281 if (in_repetition)
282 goto out;
257 return true; 283 return true;
258 out: 284 out:
259 printk(KERN_DEBUG "%s: Invalid pathname '%s'\n", function, 285 printk(KERN_DEBUG "%s: Invalid pathname '%s'\n", function,
@@ -360,33 +386,6 @@ struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname)
360} 386}
361 387
362/** 388/**
363 * tomoyo_path_depth - Evaluate the number of '/' in a string.
364 *
365 * @pathname: The string to evaluate.
366 *
367 * Returns path depth of the string.
368 *
369 * I score 2 for each of the '/' in the @pathname
370 * and score 1 if the @pathname ends with '/'.
371 */
372static int tomoyo_path_depth(const char *pathname)
373{
374 int i = 0;
375
376 if (pathname) {
377 const char *ep = pathname + strlen(pathname);
378 if (pathname < ep--) {
379 if (*ep != '/')
380 i++;
381 while (pathname <= ep)
382 if (*ep-- == '/')
383 i += 2;
384 }
385 }
386 return i;
387}
388
389/**
390 * tomoyo_const_part_length - Evaluate the initial length without a pattern in a token. 389 * tomoyo_const_part_length - Evaluate the initial length without a pattern in a token.
391 * 390 *
392 * @filename: The string to evaluate. 391 * @filename: The string to evaluate.
@@ -444,11 +443,10 @@ void tomoyo_fill_path_info(struct tomoyo_path_info *ptr)
444 ptr->is_dir = len && (name[len - 1] == '/'); 443 ptr->is_dir = len && (name[len - 1] == '/');
445 ptr->is_patterned = (ptr->const_len < len); 444 ptr->is_patterned = (ptr->const_len < len);
446 ptr->hash = full_name_hash(name, len); 445 ptr->hash = full_name_hash(name, len);
447 ptr->depth = tomoyo_path_depth(name);
448} 446}
449 447
450/** 448/**
451 * tomoyo_file_matches_to_pattern2 - Pattern matching without '/' character 449 * tomoyo_file_matches_pattern2 - Pattern matching without '/' character
452 * and "\-" pattern. 450 * and "\-" pattern.
453 * 451 *
454 * @filename: The start of string to check. 452 * @filename: The start of string to check.
@@ -458,10 +456,10 @@ void tomoyo_fill_path_info(struct tomoyo_path_info *ptr)
458 * 456 *
459 * Returns true if @filename matches @pattern, false otherwise. 457 * Returns true if @filename matches @pattern, false otherwise.
460 */ 458 */
461static bool tomoyo_file_matches_to_pattern2(const char *filename, 459static bool tomoyo_file_matches_pattern2(const char *filename,
462 const char *filename_end, 460 const char *filename_end,
463 const char *pattern, 461 const char *pattern,
464 const char *pattern_end) 462 const char *pattern_end)
465{ 463{
466 while (filename < filename_end && pattern < pattern_end) { 464 while (filename < filename_end && pattern < pattern_end) {
467 char c; 465 char c;
@@ -519,7 +517,7 @@ static bool tomoyo_file_matches_to_pattern2(const char *filename,
519 case '*': 517 case '*':
520 case '@': 518 case '@':
521 for (i = 0; i <= filename_end - filename; i++) { 519 for (i = 0; i <= filename_end - filename; i++) {
522 if (tomoyo_file_matches_to_pattern2( 520 if (tomoyo_file_matches_pattern2(
523 filename + i, filename_end, 521 filename + i, filename_end,
524 pattern + 1, pattern_end)) 522 pattern + 1, pattern_end))
525 return true; 523 return true;
@@ -550,7 +548,7 @@ static bool tomoyo_file_matches_to_pattern2(const char *filename,
550 j++; 548 j++;
551 } 549 }
552 for (i = 1; i <= j; i++) { 550 for (i = 1; i <= j; i++) {
553 if (tomoyo_file_matches_to_pattern2( 551 if (tomoyo_file_matches_pattern2(
554 filename + i, filename_end, 552 filename + i, filename_end,
555 pattern + 1, pattern_end)) 553 pattern + 1, pattern_end))
556 return true; 554 return true;
@@ -567,7 +565,7 @@ static bool tomoyo_file_matches_to_pattern2(const char *filename,
567} 565}
568 566
569/** 567/**
570 * tomoyo_file_matches_to_pattern - Pattern matching without without '/' character. 568 * tomoyo_file_matches_pattern - Pattern matching without without '/' character.
571 * 569 *
572 * @filename: The start of string to check. 570 * @filename: The start of string to check.
573 * @filename_end: The end of string to check. 571 * @filename_end: The end of string to check.
@@ -576,7 +574,7 @@ static bool tomoyo_file_matches_to_pattern2(const char *filename,
576 * 574 *
577 * Returns true if @filename matches @pattern, false otherwise. 575 * Returns true if @filename matches @pattern, false otherwise.
578 */ 576 */
579static bool tomoyo_file_matches_to_pattern(const char *filename, 577static bool tomoyo_file_matches_pattern(const char *filename,
580 const char *filename_end, 578 const char *filename_end,
581 const char *pattern, 579 const char *pattern,
582 const char *pattern_end) 580 const char *pattern_end)
@@ -589,10 +587,10 @@ static bool tomoyo_file_matches_to_pattern(const char *filename,
589 /* Split at "\-" pattern. */ 587 /* Split at "\-" pattern. */
590 if (*pattern++ != '\\' || *pattern++ != '-') 588 if (*pattern++ != '\\' || *pattern++ != '-')
591 continue; 589 continue;
592 result = tomoyo_file_matches_to_pattern2(filename, 590 result = tomoyo_file_matches_pattern2(filename,
593 filename_end, 591 filename_end,
594 pattern_start, 592 pattern_start,
595 pattern - 2); 593 pattern - 2);
596 if (first) 594 if (first)
597 result = !result; 595 result = !result;
598 if (result) 596 if (result)
@@ -600,13 +598,79 @@ static bool tomoyo_file_matches_to_pattern(const char *filename,
600 first = false; 598 first = false;
601 pattern_start = pattern; 599 pattern_start = pattern;
602 } 600 }
603 result = tomoyo_file_matches_to_pattern2(filename, filename_end, 601 result = tomoyo_file_matches_pattern2(filename, filename_end,
604 pattern_start, pattern_end); 602 pattern_start, pattern_end);
605 return first ? result : !result; 603 return first ? result : !result;
606} 604}
607 605
608/** 606/**
607 * tomoyo_path_matches_pattern2 - Do pathname pattern matching.
608 *
609 * @f: The start of string to check.
610 * @p: The start of pattern to compare.
611 *
612 * Returns true if @f matches @p, false otherwise.
613 */
614static bool tomoyo_path_matches_pattern2(const char *f, const char *p)
615{
616 const char *f_delimiter;
617 const char *p_delimiter;
618
619 while (*f && *p) {
620 f_delimiter = strchr(f, '/');
621 if (!f_delimiter)
622 f_delimiter = f + strlen(f);
623 p_delimiter = strchr(p, '/');
624 if (!p_delimiter)
625 p_delimiter = p + strlen(p);
626 if (*p == '\\' && *(p + 1) == '{')
627 goto recursive;
628 if (!tomoyo_file_matches_pattern(f, f_delimiter, p,
629 p_delimiter))
630 return false;
631 f = f_delimiter;
632 if (*f)
633 f++;
634 p = p_delimiter;
635 if (*p)
636 p++;
637 }
638 /* Ignore trailing "\*" and "\@" in @pattern. */
639 while (*p == '\\' &&
640 (*(p + 1) == '*' || *(p + 1) == '@'))
641 p += 2;
642 return !*f && !*p;
643 recursive:
644 /*
645 * The "\{" pattern is permitted only after '/' character.
646 * This guarantees that below "*(p - 1)" is safe.
647 * Also, the "\}" pattern is permitted only before '/' character
648 * so that "\{" + "\}" pair will not break the "\-" operator.
649 */
650 if (*(p - 1) != '/' || p_delimiter <= p + 3 || *p_delimiter != '/' ||
651 *(p_delimiter - 1) != '}' || *(p_delimiter - 2) != '\\')
652 return false; /* Bad pattern. */
653 do {
654 /* Compare current component with pattern. */
655 if (!tomoyo_file_matches_pattern(f, f_delimiter, p + 2,
656 p_delimiter - 2))
657 break;
658 /* Proceed to next component. */
659 f = f_delimiter;
660 if (!*f)
661 break;
662 f++;
663 /* Continue comparison. */
664 if (tomoyo_path_matches_pattern2(f, p_delimiter + 1))
665 return true;
666 f_delimiter = strchr(f, '/');
667 } while (f_delimiter);
668 return false; /* Not matched. */
669}
670
671/**
609 * tomoyo_path_matches_pattern - Check whether the given filename matches the given pattern. 672 * tomoyo_path_matches_pattern - Check whether the given filename matches the given pattern.
673 *
610 * @filename: The filename to check. 674 * @filename: The filename to check.
611 * @pattern: The pattern to compare. 675 * @pattern: The pattern to compare.
612 * 676 *
@@ -615,24 +679,24 @@ static bool tomoyo_file_matches_to_pattern(const char *filename,
615 * The following patterns are available. 679 * The following patterns are available.
616 * \\ \ itself. 680 * \\ \ itself.
617 * \ooo Octal representation of a byte. 681 * \ooo Octal representation of a byte.
618 * \* More than or equals to 0 character other than '/'. 682 * \* Zero or more repetitions of characters other than '/'.
619 * \@ More than or equals to 0 character other than '/' or '.'. 683 * \@ Zero or more repetitions of characters other than '/' or '.'.
620 * \? 1 byte character other than '/'. 684 * \? 1 byte character other than '/'.
621 * \$ More than or equals to 1 decimal digit. 685 * \$ One or more repetitions of decimal digits.
622 * \+ 1 decimal digit. 686 * \+ 1 decimal digit.
623 * \X More than or equals to 1 hexadecimal digit. 687 * \X One or more repetitions of hexadecimal digits.
624 * \x 1 hexadecimal digit. 688 * \x 1 hexadecimal digit.
625 * \A More than or equals to 1 alphabet character. 689 * \A One or more repetitions of alphabet characters.
626 * \a 1 alphabet character. 690 * \a 1 alphabet character.
691 *
627 * \- Subtraction operator. 692 * \- Subtraction operator.
693 *
694 * /\{dir\}/ '/' + 'One or more repetitions of dir/' (e.g. /dir/ /dir/dir/
695 * /dir/dir/dir/ ).
628 */ 696 */
629bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename, 697bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename,
630 const struct tomoyo_path_info *pattern) 698 const struct tomoyo_path_info *pattern)
631{ 699{
632 /*
633 if (!filename || !pattern)
634 return false;
635 */
636 const char *f = filename->name; 700 const char *f = filename->name;
637 const char *p = pattern->name; 701 const char *p = pattern->name;
638 const int len = pattern->const_len; 702 const int len = pattern->const_len;
@@ -640,37 +704,15 @@ bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename,
640 /* If @pattern doesn't contain pattern, I can use strcmp(). */ 704 /* If @pattern doesn't contain pattern, I can use strcmp(). */
641 if (!pattern->is_patterned) 705 if (!pattern->is_patterned)
642 return !tomoyo_pathcmp(filename, pattern); 706 return !tomoyo_pathcmp(filename, pattern);
643 /* Dont compare if the number of '/' differs. */ 707 /* Don't compare directory and non-directory. */
644 if (filename->depth != pattern->depth) 708 if (filename->is_dir != pattern->is_dir)
645 return false; 709 return false;
646 /* Compare the initial length without patterns. */ 710 /* Compare the initial length without patterns. */
647 if (strncmp(f, p, len)) 711 if (strncmp(f, p, len))
648 return false; 712 return false;
649 f += len; 713 f += len;
650 p += len; 714 p += len;
651 /* Main loop. Compare each directory component. */ 715 return tomoyo_path_matches_pattern2(f, p);
652 while (*f && *p) {
653 const char *f_delimiter = strchr(f, '/');
654 const char *p_delimiter = strchr(p, '/');
655 if (!f_delimiter)
656 f_delimiter = f + strlen(f);
657 if (!p_delimiter)
658 p_delimiter = p + strlen(p);
659 if (!tomoyo_file_matches_to_pattern(f, f_delimiter,
660 p, p_delimiter))
661 return false;
662 f = f_delimiter;
663 if (*f)
664 f++;
665 p = p_delimiter;
666 if (*p)
667 p++;
668 }
669 /* Ignore trailing "\*" and "\@" in @pattern. */
670 while (*p == '\\' &&
671 (*(p + 1) == '*' || *(p + 1) == '@'))
672 p += 2;
673 return !*f && !*p;
674} 716}
675 717
676/** 718/**
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 31df541911f7..92169d29b2db 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -56,9 +56,6 @@ struct tomoyo_page_buffer {
56 * (5) "is_patterned" is a bool which is true if "name" contains wildcard 56 * (5) "is_patterned" is a bool which is true if "name" contains wildcard
57 * characters, false otherwise. This allows TOMOYO to use "hash" and 57 * characters, false otherwise. This allows TOMOYO to use "hash" and
58 * strcmp() for string comparison if "is_patterned" is false. 58 * strcmp() for string comparison if "is_patterned" is false.
59 * (6) "depth" is calculated using the number of "/" characters in "name".
60 * This allows TOMOYO to avoid comparing two pathnames which never match
61 * (e.g. whether "/var/www/html/index.html" matches "/tmp/sh-thd-\$").
62 */ 59 */
63struct tomoyo_path_info { 60struct tomoyo_path_info {
64 const char *name; 61 const char *name;
@@ -66,7 +63,6 @@ struct tomoyo_path_info {
66 u16 const_len; /* = tomoyo_const_part_length(name) */ 63 u16 const_len; /* = tomoyo_const_part_length(name) */
67 bool is_dir; /* = tomoyo_strendswith(name, "/") */ 64 bool is_dir; /* = tomoyo_strendswith(name, "/") */
68 bool is_patterned; /* = tomoyo_path_contains_pattern(name) */ 65 bool is_patterned; /* = tomoyo_path_contains_pattern(name) */
69 u16 depth; /* = tomoyo_path_depth(name) */
70}; 66};
71 67
72/* 68/*
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 5f2e33263371..917f564cdab1 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -13,6 +13,8 @@
13#include <linux/mount.h> 13#include <linux/mount.h>
14#include <linux/mnt_namespace.h> 14#include <linux/mnt_namespace.h>
15#include <linux/fs_struct.h> 15#include <linux/fs_struct.h>
16#include <linux/hash.h>
17
16#include "common.h" 18#include "common.h"
17#include "realpath.h" 19#include "realpath.h"
18 20
@@ -263,7 +265,8 @@ static unsigned int tomoyo_quota_for_savename;
263 * table. Frequency of appending strings is very low. So we don't need 265 * table. Frequency of appending strings is very low. So we don't need
264 * large (e.g. 64k) hash size. 256 will be sufficient. 266 * large (e.g. 64k) hash size. 256 will be sufficient.
265 */ 267 */
266#define TOMOYO_MAX_HASH 256 268#define TOMOYO_HASH_BITS 8
269#define TOMOYO_MAX_HASH (1u<<TOMOYO_HASH_BITS)
267 270
268/* 271/*
269 * tomoyo_name_entry is a structure which is used for linking 272 * tomoyo_name_entry is a structure which is used for linking
@@ -315,6 +318,7 @@ const struct tomoyo_path_info *tomoyo_save_name(const char *name)
315 struct tomoyo_free_memory_block_list *fmb; 318 struct tomoyo_free_memory_block_list *fmb;
316 int len; 319 int len;
317 char *cp; 320 char *cp;
321 struct list_head *head;
318 322
319 if (!name) 323 if (!name)
320 return NULL; 324 return NULL;
@@ -325,9 +329,10 @@ const struct tomoyo_path_info *tomoyo_save_name(const char *name)
325 return NULL; 329 return NULL;
326 } 330 }
327 hash = full_name_hash((const unsigned char *) name, len - 1); 331 hash = full_name_hash((const unsigned char *) name, len - 1);
332 head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)];
333
328 mutex_lock(&lock); 334 mutex_lock(&lock);
329 list_for_each_entry(ptr, &tomoyo_name_list[hash % TOMOYO_MAX_HASH], 335 list_for_each_entry(ptr, head, list) {
330 list) {
331 if (hash == ptr->entry.hash && !strcmp(name, ptr->entry.name)) 336 if (hash == ptr->entry.hash && !strcmp(name, ptr->entry.name))
332 goto out; 337 goto out;
333 } 338 }
@@ -365,7 +370,7 @@ const struct tomoyo_path_info *tomoyo_save_name(const char *name)
365 tomoyo_fill_path_info(&ptr->entry); 370 tomoyo_fill_path_info(&ptr->entry);
366 fmb->ptr += len; 371 fmb->ptr += len;
367 fmb->len -= len; 372 fmb->len -= len;
368 list_add_tail(&ptr->list, &tomoyo_name_list[hash % TOMOYO_MAX_HASH]); 373 list_add_tail(&ptr->list, head);
369 if (fmb->len == 0) { 374 if (fmb->len == 0) {
370 list_del(&fmb->list); 375 list_del(&fmb->list);
371 kfree(fmb); 376 kfree(fmb);
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index 1f0f8213e2d5..6c160a038b23 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -504,6 +504,10 @@ static int aaci_pcm_hw_params(struct snd_pcm_substream *substream,
504 int err; 504 int err;
505 505
506 aaci_pcm_hw_free(substream); 506 aaci_pcm_hw_free(substream);
507 if (aacirun->pcm_open) {
508 snd_ac97_pcm_close(aacirun->pcm);
509 aacirun->pcm_open = 0;
510 }
507 511
508 err = devdma_hw_alloc(NULL, substream, 512 err = devdma_hw_alloc(NULL, substream,
509 params_buffer_bytes(params)); 513 params_buffer_bytes(params));
@@ -517,7 +521,7 @@ static int aaci_pcm_hw_params(struct snd_pcm_substream *substream,
517 else 521 else
518 err = snd_ac97_pcm_open(aacirun->pcm, params_rate(params), 522 err = snd_ac97_pcm_open(aacirun->pcm, params_rate(params),
519 params_channels(params), 523 params_channels(params),
520 aacirun->pcm->r[1].slots); 524 aacirun->pcm->r[0].slots);
521 525
522 if (err) 526 if (err)
523 goto out; 527 goto out;
diff --git a/sound/oss/hex2hex.c b/sound/oss/hex2hex.c
index 5460faae98c9..041ef5c52bc2 100644
--- a/sound/oss/hex2hex.c
+++ b/sound/oss/hex2hex.c
@@ -12,7 +12,7 @@
12#define MAX_SIZE (256*1024) 12#define MAX_SIZE (256*1024)
13unsigned char buf[MAX_SIZE]; 13unsigned char buf[MAX_SIZE];
14 14
15int loadhex(FILE *inf, unsigned char *buf) 15static int loadhex(FILE *inf, unsigned char *buf)
16{ 16{
17 int l=0, c, i; 17 int l=0, c, i;
18 18
diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c
index 9fb60276f5c9..6afdab09bab7 100644
--- a/sound/pci/hda/patch_nvhdmi.c
+++ b/sound/pci/hda/patch_nvhdmi.c
@@ -397,6 +397,7 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
397static struct hda_codec_preset snd_hda_preset_nvhdmi[] = { 397static struct hda_codec_preset snd_hda_preset_nvhdmi[] = {
398 { .id = 0x10de0002, .name = "MCP78 HDMI", .patch = patch_nvhdmi_8ch }, 398 { .id = 0x10de0002, .name = "MCP78 HDMI", .patch = patch_nvhdmi_8ch },
399 { .id = 0x10de0003, .name = "MCP78 HDMI", .patch = patch_nvhdmi_8ch }, 399 { .id = 0x10de0003, .name = "MCP78 HDMI", .patch = patch_nvhdmi_8ch },
400 { .id = 0x10de0005, .name = "MCP78 HDMI", .patch = patch_nvhdmi_8ch },
400 { .id = 0x10de0006, .name = "MCP78 HDMI", .patch = patch_nvhdmi_8ch }, 401 { .id = 0x10de0006, .name = "MCP78 HDMI", .patch = patch_nvhdmi_8ch },
401 { .id = 0x10de0007, .name = "MCP7A HDMI", .patch = patch_nvhdmi_8ch }, 402 { .id = 0x10de0007, .name = "MCP7A HDMI", .patch = patch_nvhdmi_8ch },
402 { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, 403 { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
@@ -406,6 +407,7 @@ static struct hda_codec_preset snd_hda_preset_nvhdmi[] = {
406 407
407MODULE_ALIAS("snd-hda-codec-id:10de0002"); 408MODULE_ALIAS("snd-hda-codec-id:10de0002");
408MODULE_ALIAS("snd-hda-codec-id:10de0003"); 409MODULE_ALIAS("snd-hda-codec-id:10de0003");
410MODULE_ALIAS("snd-hda-codec-id:10de0005");
409MODULE_ALIAS("snd-hda-codec-id:10de0006"); 411MODULE_ALIAS("snd-hda-codec-id:10de0006");
410MODULE_ALIAS("snd-hda-codec-id:10de0007"); 412MODULE_ALIAS("snd-hda-codec-id:10de0007");
411MODULE_ALIAS("snd-hda-codec-id:10de0067"); 413MODULE_ALIAS("snd-hda-codec-id:10de0067");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index daf6975b0c2e..70583719282b 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -8911,10 +8911,11 @@ static struct snd_pci_quirk alc882_ssid_cfg_tbl[] = {
8911 SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC885_MBP3), 8911 SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC885_MBP3),
8912 SND_PCI_QUIRK(0x106b, 0x3e00, "iMac 24 Aluminum", ALC885_IMAC24), 8912 SND_PCI_QUIRK(0x106b, 0x3e00, "iMac 24 Aluminum", ALC885_IMAC24),
8913 SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC885_MB5), 8913 SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC885_MB5),
8914 /* FIXME: HP jack sense seems not working for MBP 5,1, so apparently 8914 /* FIXME: HP jack sense seems not working for MBP 5,1 or 5,2,
8915 * no perfect solution yet 8915 * so apparently no perfect solution yet
8916 */ 8916 */
8917 SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC885_MB5), 8917 SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC885_MB5),
8918 SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC885_MB5),
8918 {} /* terminator */ 8919 {} /* terminator */
8919}; 8920};
8920 8921
@@ -11461,6 +11462,7 @@ static struct snd_pci_quirk alc262_cfg_tbl[] = {
11461 SND_PCI_QUIRK(0x104d, 0x9016, "Sony VAIO", ALC262_AUTO), /* dig-only */ 11462 SND_PCI_QUIRK(0x104d, 0x9016, "Sony VAIO", ALC262_AUTO), /* dig-only */
11462 SND_PCI_QUIRK(0x104d, 0x9025, "Sony VAIO Z21MN", ALC262_TOSHIBA_S06), 11463 SND_PCI_QUIRK(0x104d, 0x9025, "Sony VAIO Z21MN", ALC262_TOSHIBA_S06),
11463 SND_PCI_QUIRK(0x104d, 0x9035, "Sony VAIO VGN-FW170J", ALC262_AUTO), 11464 SND_PCI_QUIRK(0x104d, 0x9035, "Sony VAIO VGN-FW170J", ALC262_AUTO),
11465 SND_PCI_QUIRK(0x104d, 0x9047, "Sony VAIO Type G", ALC262_AUTO),
11464 SND_PCI_QUIRK_MASK(0x104d, 0xff00, 0x9000, "Sony VAIO", 11466 SND_PCI_QUIRK_MASK(0x104d, 0xff00, 0x9000, "Sony VAIO",
11465 ALC262_SONY_ASSAMD), 11467 ALC262_SONY_ASSAMD),
11466 SND_PCI_QUIRK(0x1179, 0x0001, "Toshiba dynabook SS RX1", 11468 SND_PCI_QUIRK(0x1179, 0x0001, "Toshiba dynabook SS RX1",
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 8eb6508cd991..86de305fc9f2 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -1590,6 +1590,8 @@ static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
1590 "Dell Studio 17", STAC_DELL_M6_DMIC), 1590 "Dell Studio 17", STAC_DELL_M6_DMIC),
1591 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02be, 1591 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02be,
1592 "Dell Studio 1555", STAC_DELL_M6_DMIC), 1592 "Dell Studio 1555", STAC_DELL_M6_DMIC),
1593 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd,
1594 "Dell Studio 1557", STAC_DELL_M6_DMIC),
1593 {} /* terminator */ 1595 {} /* terminator */
1594}; 1596};
1595 1597
diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
index 9da2dae64c5b..d063149e7047 100644
--- a/sound/pci/ice1712/ice1712.h
+++ b/sound/pci/ice1712/ice1712.h
@@ -382,8 +382,8 @@ struct snd_ice1712 {
382#ifdef CONFIG_PM 382#ifdef CONFIG_PM
383 int (*pm_suspend)(struct snd_ice1712 *); 383 int (*pm_suspend)(struct snd_ice1712 *);
384 int (*pm_resume)(struct snd_ice1712 *); 384 int (*pm_resume)(struct snd_ice1712 *);
385 int pm_suspend_enabled:1; 385 unsigned int pm_suspend_enabled:1;
386 int pm_saved_is_spdif_master:1; 386 unsigned int pm_saved_is_spdif_master:1;
387 unsigned int pm_saved_spdif_ctrl; 387 unsigned int pm_saved_spdif_ctrl;
388 unsigned char pm_saved_spdif_cfg; 388 unsigned char pm_saved_spdif_cfg;
389 unsigned int pm_saved_route; 389 unsigned int pm_saved_route;
diff --git a/sound/pci/ice1712/prodigy_hifi.c b/sound/pci/ice1712/prodigy_hifi.c
index c75515f5be6f..6a9fee3ee78f 100644
--- a/sound/pci/ice1712/prodigy_hifi.c
+++ b/sound/pci/ice1712/prodigy_hifi.c
@@ -1100,7 +1100,7 @@ static void ak4396_init(struct snd_ice1712 *ice)
1100} 1100}
1101 1101
1102#ifdef CONFIG_PM 1102#ifdef CONFIG_PM
1103static int __devinit prodigy_hd2_resume(struct snd_ice1712 *ice) 1103static int prodigy_hd2_resume(struct snd_ice1712 *ice)
1104{ 1104{
1105 /* initialize ak4396 codec and restore previous mixer volumes */ 1105 /* initialize ak4396 codec and restore previous mixer volumes */
1106 struct prodigy_hifi_spec *spec = ice->spec; 1106 struct prodigy_hifi_spec *spec = ice->spec;
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.c b/sound/pcmcia/pdaudiocf/pdaudiocf.c
index 64b859925c0b..7717e01fc071 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf.c
@@ -131,7 +131,7 @@ static int snd_pdacf_probe(struct pcmcia_device *link)
131 return err; 131 return err;
132 } 132 }
133 133
134 snd_card_set_dev(card, &handle_to_dev(link)); 134 snd_card_set_dev(card, &link->dev);
135 135
136 pdacf->index = i; 136 pdacf->index = i;
137 card_list[i] = card; 137 card_list[i] = card;
@@ -142,12 +142,10 @@ static int snd_pdacf_probe(struct pcmcia_device *link)
142 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 142 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
143 link->io.NumPorts1 = 16; 143 link->io.NumPorts1 = 16;
144 144
145 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT | IRQ_FORCED_PULSE; 145 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_FORCED_PULSE;
146 // link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED; 146 // link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
147 147
148 link->irq.IRQInfo1 = 0 /* | IRQ_LEVEL_ID */;
149 link->irq.Handler = pdacf_interrupt; 148 link->irq.Handler = pdacf_interrupt;
150 link->irq.Instance = pdacf;
151 link->conf.Attributes = CONF_ENABLE_IRQ; 149 link->conf.Attributes = CONF_ENABLE_IRQ;
152 link->conf.IntType = INT_MEMORY_AND_IO; 150 link->conf.IntType = INT_MEMORY_AND_IO;
153 link->conf.ConfigIndex = 1; 151 link->conf.ConfigIndex = 1;
diff --git a/sound/pcmcia/vx/vxpocket.c b/sound/pcmcia/vx/vxpocket.c
index 1492744ad67f..7be3b3357045 100644
--- a/sound/pcmcia/vx/vxpocket.c
+++ b/sound/pcmcia/vx/vxpocket.c
@@ -161,11 +161,9 @@ static int snd_vxpocket_new(struct snd_card *card, int ibl,
161 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 161 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
162 link->io.NumPorts1 = 16; 162 link->io.NumPorts1 = 16;
163 163
164 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 164 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
165 165
166 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
167 link->irq.Handler = &snd_vx_irq_handler; 166 link->irq.Handler = &snd_vx_irq_handler;
168 link->irq.Instance = chip;
169 167
170 link->conf.Attributes = CONF_ENABLE_IRQ; 168 link->conf.Attributes = CONF_ENABLE_IRQ;
171 link->conf.IntType = INT_MEMORY_AND_IO; 169 link->conf.IntType = INT_MEMORY_AND_IO;
@@ -244,7 +242,7 @@ static int vxpocket_config(struct pcmcia_device *link)
244 if (ret) 242 if (ret)
245 goto failed; 243 goto failed;
246 244
247 chip->dev = &handle_to_dev(link); 245 chip->dev = &link->dev;
248 snd_card_set_dev(chip->card, chip->dev); 246 snd_card_set_dev(chip->card, chip->dev);
249 247
250 if (snd_vxpocket_assign_resources(chip, link->io.BasePort1, link->irq.AssignedIRQ) < 0) 248 if (snd_vxpocket_assign_resources(chip, link->io.BasePort1, link->irq.AssignedIRQ) < 0)
diff --git a/sound/soc/codecs/tlv320aic23.c b/sound/soc/codecs/tlv320aic23.c
index 0b8dcb5cd729..90a0264f7538 100644
--- a/sound/soc/codecs/tlv320aic23.c
+++ b/sound/soc/codecs/tlv320aic23.c
@@ -265,8 +265,8 @@ static const int bosr_usb_divisor_table[] = {
265#define UPPER_GROUP ((1<<8) | (1<<9) | (1<<10) | (1<<11) | (1<<15)) 265#define UPPER_GROUP ((1<<8) | (1<<9) | (1<<10) | (1<<11) | (1<<15))
266static const unsigned short sr_valid_mask[] = { 266static const unsigned short sr_valid_mask[] = {
267 LOWER_GROUP|UPPER_GROUP, /* Normal, bosr - 0*/ 267 LOWER_GROUP|UPPER_GROUP, /* Normal, bosr - 0*/
268 LOWER_GROUP|UPPER_GROUP, /* Normal, bosr - 1*/
269 LOWER_GROUP, /* Usb, bosr - 0*/ 268 LOWER_GROUP, /* Usb, bosr - 0*/
269 LOWER_GROUP|UPPER_GROUP, /* Normal, bosr - 1*/
270 UPPER_GROUP, /* Usb, bosr - 1*/ 270 UPPER_GROUP, /* Usb, bosr - 1*/
271}; 271};
272/* 272/*
@@ -625,11 +625,10 @@ static int tlv320aic23_resume(struct platform_device *pdev)
625{ 625{
626 struct snd_soc_device *socdev = platform_get_drvdata(pdev); 626 struct snd_soc_device *socdev = platform_get_drvdata(pdev);
627 struct snd_soc_codec *codec = socdev->card->codec; 627 struct snd_soc_codec *codec = socdev->card->codec;
628 int i;
629 u16 reg; 628 u16 reg;
630 629
631 /* Sync reg_cache with the hardware */ 630 /* Sync reg_cache with the hardware */
632 for (reg = 0; reg < ARRAY_SIZE(tlv320aic23_reg); i++) { 631 for (reg = 0; reg < TLV320AIC23_RESET; reg++) {
633 u16 val = tlv320aic23_read_reg_cache(codec, reg); 632 u16 val = tlv320aic23_read_reg_cache(codec, reg);
634 tlv320aic23_write(codec, reg, val); 633 tlv320aic23_write(codec, reg, val);
635 } 634 }
diff --git a/sound/soc/omap/omap3evm.c b/sound/soc/omap/omap3evm.c
index 9114c263077b..13aa380de162 100644
--- a/sound/soc/omap/omap3evm.c
+++ b/sound/soc/omap/omap3evm.c
@@ -144,4 +144,4 @@ module_exit(omap3evm_soc_exit);
144 144
145MODULE_AUTHOR("Anuj Aggarwal <anuj.aggarwal@ti.com>"); 145MODULE_AUTHOR("Anuj Aggarwal <anuj.aggarwal@ti.com>");
146MODULE_DESCRIPTION("ALSA SoC OMAP3 EVM"); 146MODULE_DESCRIPTION("ALSA SoC OMAP3 EVM");
147MODULE_LICENSE("GPLv2"); 147MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/omap/omap3pandora.c b/sound/soc/omap/omap3pandora.c
index ad219aaf7cb8..0cd06f5dd356 100644
--- a/sound/soc/omap/omap3pandora.c
+++ b/sound/soc/omap/omap3pandora.c
@@ -134,7 +134,7 @@ static int omap3pandora_hp_event(struct snd_soc_dapm_widget *w,
134 * |P| <--- TWL4030 <--------- Line In and MICs 134 * |P| <--- TWL4030 <--------- Line In and MICs
135 */ 135 */
136static const struct snd_soc_dapm_widget omap3pandora_out_dapm_widgets[] = { 136static const struct snd_soc_dapm_widget omap3pandora_out_dapm_widgets[] = {
137 SND_SOC_DAPM_DAC("PCM DAC", "Playback", SND_SOC_NOPM, 0, 0), 137 SND_SOC_DAPM_DAC("PCM DAC", "HiFi Playback", SND_SOC_NOPM, 0, 0),
138 SND_SOC_DAPM_PGA_E("Headphone Amplifier", SND_SOC_NOPM, 138 SND_SOC_DAPM_PGA_E("Headphone Amplifier", SND_SOC_NOPM,
139 0, 0, NULL, 0, omap3pandora_hp_event, 139 0, 0, NULL, 0, omap3pandora_hp_event,
140 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), 140 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
@@ -181,6 +181,7 @@ static int omap3pandora_out_init(struct snd_soc_codec *codec)
181 snd_soc_dapm_nc_pin(codec, "CARKITR"); 181 snd_soc_dapm_nc_pin(codec, "CARKITR");
182 snd_soc_dapm_nc_pin(codec, "HFL"); 182 snd_soc_dapm_nc_pin(codec, "HFL");
183 snd_soc_dapm_nc_pin(codec, "HFR"); 183 snd_soc_dapm_nc_pin(codec, "HFR");
184 snd_soc_dapm_nc_pin(codec, "VIBRA");
184 185
185 ret = snd_soc_dapm_new_controls(codec, omap3pandora_out_dapm_widgets, 186 ret = snd_soc_dapm_new_controls(codec, omap3pandora_out_dapm_widgets,
186 ARRAY_SIZE(omap3pandora_out_dapm_widgets)); 187 ARRAY_SIZE(omap3pandora_out_dapm_widgets));
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index d89f6dc00908..66d4c165f99b 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -973,9 +973,19 @@ static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
973 if (!w->power_check) 973 if (!w->power_check)
974 continue; 974 continue;
975 975
976 power = w->power_check(w); 976 /* If we're suspending then pull down all the
977 if (power) 977 * power. */
978 sys_power = 1; 978 switch (event) {
979 case SND_SOC_DAPM_STREAM_SUSPEND:
980 power = 0;
981 break;
982
983 default:
984 power = w->power_check(w);
985 if (power)
986 sys_power = 1;
987 break;
988 }
979 989
980 if (w->power == power) 990 if (w->power == power)
981 continue; 991 continue;
@@ -999,8 +1009,12 @@ static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
999 case SND_SOC_DAPM_STREAM_RESUME: 1009 case SND_SOC_DAPM_STREAM_RESUME:
1000 sys_power = 1; 1010 sys_power = 1;
1001 break; 1011 break;
1012 case SND_SOC_DAPM_STREAM_SUSPEND:
1013 sys_power = 0;
1014 break;
1002 case SND_SOC_DAPM_STREAM_NOP: 1015 case SND_SOC_DAPM_STREAM_NOP:
1003 sys_power = codec->bias_level != SND_SOC_BIAS_STANDBY; 1016 sys_power = codec->bias_level != SND_SOC_BIAS_STANDBY;
1017 break;
1004 default: 1018 default:
1005 break; 1019 break;
1006 } 1020 }
diff --git a/sound/usb/usbmixer.c b/sound/usb/usbmixer.c
index 9efcfd08d747..c998220b99c6 100644
--- a/sound/usb/usbmixer.c
+++ b/sound/usb/usbmixer.c
@@ -1071,6 +1071,15 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, unsig
1071 channels = (ftr[0] - 7) / csize - 1; 1071 channels = (ftr[0] - 7) / csize - 1;
1072 1072
1073 master_bits = snd_usb_combine_bytes(ftr + 6, csize); 1073 master_bits = snd_usb_combine_bytes(ftr + 6, csize);
1074 /* master configuration quirks */
1075 switch (state->chip->usb_id) {
1076 case USB_ID(0x08bb, 0x2702):
1077 snd_printk(KERN_INFO
1078 "usbmixer: master volume quirk for PCM2702 chip\n");
1079 /* disable non-functional volume control */
1080 master_bits &= ~(1 << (USB_FEATURE_VOLUME - 1));
1081 break;
1082 }
1074 if (channels > 0) 1083 if (channels > 0)
1075 first_ch_bits = snd_usb_combine_bytes(ftr + 6 + csize, csize); 1084 first_ch_bits = snd_usb_combine_bytes(ftr + 6 + csize, csize);
1076 else 1085 else