aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/obsolete/proc-pid-oom_adj22
-rw-r--r--Documentation/arm/OMAP/DSS7
-rw-r--r--Documentation/block/switching-sched.txt8
-rw-r--r--Documentation/feature-removal-schedule.txt10
-rw-r--r--Documentation/filesystems/xfs-delayed-logging-design.txt11
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Documentation/leds-class.txt21
-rw-r--r--Documentation/leds/leds-lp5521.txt88
-rw-r--r--Documentation/leds/leds-lp5523.txt83
-rw-r--r--Documentation/networking/ip-sysctl.txt9
-rw-r--r--Documentation/rbtree.txt4
-rw-r--r--Documentation/sysctl/kernel.txt14
-rw-r--r--MAINTAINERS19
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig34
-rw-r--r--arch/arm/common/gic.c28
-rw-r--r--arch/arm/include/asm/hardware/it8152.h2
-rw-r--r--arch/arm/kernel/hw_breakpoint.c3
-rw-r--r--arch/arm/kernel/perf_event.c2
-rw-r--r--arch/arm/kernel/stacktrace.c2
-rw-r--r--arch/arm/kernel/traps.c5
-rw-r--r--arch/arm/kernel/unwind.c2
-rw-r--r--arch/arm/mach-ep93xx/include/mach/dma.h111
-rw-r--r--arch/arm/mach-kirkwood/common.c7
-rw-r--r--arch/arm/mach-kirkwood/d2net_v2-setup.c2
-rw-r--r--arch/arm/mach-kirkwood/lacie_v2-common.c14
-rw-r--r--arch/arm/mach-kirkwood/lacie_v2-common.h2
-rw-r--r--arch/arm/mach-kirkwood/mpp.c4
-rw-r--r--arch/arm/mach-kirkwood/netspace_v2-setup.c6
-rw-r--r--arch/arm/mach-kirkwood/netxbig_v2-setup.c4
-rw-r--r--arch/arm/mach-kirkwood/ts41x-setup.c14
-rw-r--r--arch/arm/mach-mmp/include/mach/cputype.h3
-rw-r--r--arch/arm/mach-mv78xx0/mpp.c4
-rw-r--r--arch/arm/mach-omap1/devices.c5
-rw-r--r--arch/arm/mach-omap1/include/mach/camera.h2
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c3
-rw-r--r--arch/arm/mach-orion5x/mpp.c4
-rw-r--r--arch/arm/mach-orion5x/ts78xx-setup.c2
-rw-r--r--arch/arm/mach-pxa/cm-x2xx.c2
-rw-r--r--arch/arm/mach-pxa/saar.c2
-rw-r--r--arch/arm/mach-s3c64xx/Kconfig2
-rw-r--r--arch/arm/mach-s3c64xx/clock.c2
-rw-r--r--arch/arm/mach-s3c64xx/dev-audio.c60
-rw-r--r--arch/arm/mach-s5p6442/dev-audio.c44
-rw-r--r--arch/arm/mach-s5p64x0/clock-s5p6440.c2
-rw-r--r--arch/arm/mach-s5p64x0/clock-s5p6450.c2
-rw-r--r--arch/arm/mach-s5p64x0/dev-audio.c55
-rw-r--r--arch/arm/mach-s5pc100/dev-audio.c54
-rw-r--r--arch/arm/mach-s5pv210/clock.c10
-rw-r--r--arch/arm/mach-s5pv210/dev-audio.c56
-rw-r--r--arch/arm/mach-s5pv310/Kconfig1
-rw-r--r--arch/arm/mach-s5pv310/Makefile3
-rw-r--r--arch/arm/mach-s5pv310/dev-audio.c364
-rw-r--r--arch/arm/mach-s5pv310/dma.c168
-rw-r--r--arch/arm/mach-s5pv310/include/mach/dma.h26
-rw-r--r--arch/arm/mach-s5pv310/include/mach/irqs.h3
-rw-r--r--arch/arm/mach-s5pv310/include/mach/map.h21
-rw-r--r--arch/arm/mach-shmobile/Kconfig2
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c52
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c102
-rw-r--r--arch/arm/mach-shmobile/include/mach/gpio.h4
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh7372.h2
-rw-r--r--arch/arm/mach-shmobile/intc-sh7372.c2
-rw-r--r--arch/arm/mach-vexpress/ct-ca9x4.c2
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/plat-omap/devices.c4
-rw-r--r--arch/arm/plat-omap/dma.c2
-rw-r--r--arch/arm/plat-orion/include/plat/pcie.h3
-rw-r--r--arch/arm/plat-orion/pcie.c5
-rw-r--r--arch/arm/plat-samsung/include/plat/audio.h24
-rw-r--r--arch/arm/plat-samsung/include/plat/devs.h9
-rw-r--r--arch/m68k/include/asm/irqflags.h2
-rw-r--r--arch/m68k/include/asm/machdep.h1
-rw-r--r--arch/powerpc/kernel/kvm.c2
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S2
-rw-r--r--arch/powerpc/kvm/e500.c2
-rw-r--r--arch/powerpc/kvm/powerpc.c1
-rw-r--r--arch/powerpc/kvm/timing.c2
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/Makefile3
-rw-r--r--arch/sh/boards/Kconfig7
-rw-r--r--arch/sh/boards/Makefile2
-rw-r--r--arch/sh/boards/board-edosk7705.c78
-rw-r--r--arch/sh/boards/board-secureedge5410.c (renamed from arch/sh/boards/mach-snapgear/setup.c)38
-rw-r--r--arch/sh/boards/mach-edosk7705/Makefile5
-rw-r--r--arch/sh/boards/mach-edosk7705/io.c71
-rw-r--r--arch/sh/boards/mach-edosk7705/setup.c36
-rw-r--r--arch/sh/boards/mach-microdev/io.c246
-rw-r--r--arch/sh/boards/mach-microdev/setup.c23
-rw-r--r--arch/sh/boards/mach-se/7206/Makefile2
-rw-r--r--arch/sh/boards/mach-se/7206/io.c104
-rw-r--r--arch/sh/boards/mach-se/7206/irq.c4
-rw-r--r--arch/sh/boards/mach-se/7206/setup.c15
-rw-r--r--arch/sh/boards/mach-se/770x/Makefile2
-rw-r--r--arch/sh/boards/mach-se/770x/io.c156
-rw-r--r--arch/sh/boards/mach-se/770x/setup.c22
-rw-r--r--arch/sh/boards/mach-se/7751/Makefile2
-rw-r--r--arch/sh/boards/mach-se/7751/io.c119
-rw-r--r--arch/sh/boards/mach-se/7751/setup.c18
-rw-r--r--arch/sh/boards/mach-snapgear/Makefile5
-rw-r--r--arch/sh/boards/mach-snapgear/io.c121
-rw-r--r--arch/sh/boards/mach-systemh/Makefile13
-rw-r--r--arch/sh/boards/mach-systemh/io.c158
-rw-r--r--arch/sh/boards/mach-systemh/irq.c61
-rw-r--r--arch/sh/boards/mach-systemh/setup.c57
-rw-r--r--arch/sh/configs/secureedge5410_defconfig (renamed from arch/sh/configs/snapgear_defconfig)0
-rw-r--r--arch/sh/configs/systemh_defconfig28
-rw-r--r--arch/sh/include/asm/addrspace.h8
-rw-r--r--arch/sh/include/asm/pgtable.h12
-rw-r--r--arch/sh/include/asm/system.h4
-rw-r--r--arch/sh/include/asm/system_32.h36
-rw-r--r--arch/sh/include/asm/system_64.h3
-rw-r--r--arch/sh/include/asm/uncached.h40
-rw-r--r--arch/sh/include/mach-common/mach/edosk7705.h7
-rw-r--r--arch/sh/include/mach-common/mach/microdev.h9
-rw-r--r--arch/sh/include/mach-common/mach/secureedge5410.h (renamed from arch/sh/include/mach-common/mach/snapgear.h)22
-rw-r--r--arch/sh/include/mach-common/mach/systemh7751.h71
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7724.c6
-rw-r--r--arch/sh/mm/Kconfig2
-rw-r--r--arch/sh/mm/consistent.c15
-rw-r--r--arch/sh/mm/uncached.c2
-rw-r--r--arch/sh/tools/mach-types1
-rw-r--r--arch/tile/include/asm/highmem.h1
-rw-r--r--arch/tile/include/asm/kmap_types.h34
-rw-r--r--arch/tile/include/asm/pgtable.h6
-rw-r--r--arch/tile/include/asm/stat.h3
-rw-r--r--arch/tile/include/asm/unistd.h1
-rw-r--r--arch/tile/kernel/compat.c10
-rw-r--r--arch/tile/kernel/early_printk.c2
-rw-r--r--arch/tile/kernel/hardwall.c6
-rw-r--r--arch/tile/kernel/irq.c4
-rw-r--r--arch/tile/kernel/machine_kexec.c6
-rw-r--r--arch/tile/kernel/messaging.c2
-rw-r--r--arch/tile/kernel/ptrace.c39
-rw-r--r--arch/tile/kernel/reboot.c6
-rw-r--r--arch/tile/kernel/setup.c8
-rw-r--r--arch/tile/kernel/signal.c9
-rw-r--r--arch/tile/kernel/smp.c2
-rw-r--r--arch/tile/kernel/time.c8
-rw-r--r--arch/tile/lib/memcpy_tile64.c11
-rw-r--r--arch/tile/mm/highmem.c2
-rw-r--r--arch/tile/mm/init.c8
-rw-r--r--arch/tile/mm/pgtable.c4
-rw-r--r--arch/um/include/asm/ptrace-generic.h4
-rw-r--r--arch/um/kernel/ptrace.c2
-rw-r--r--arch/x86/include/asm/apic.h10
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h189
-rw-r--r--arch/x86/kernel/apic/apic.c1
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c4
-rw-r--r--arch/x86/kernel/microcode_amd.c2
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c7
-rw-r--r--arch/x86/kernel/pvclock.c38
-rw-r--r--arch/x86/kvm/mmu.c9
-rw-r--r--arch/x86/kvm/x86.c16
-rw-r--r--arch/x86/mm/tlb.c2
-rw-r--r--arch/x86/pci/acpi.c103
-rw-r--r--arch/x86/pci/xen.c8
-rw-r--r--arch/x86/platform/uv/tlb_uv.c13
-rw-r--r--arch/x86/xen/mmu.c2
-rw-r--r--arch/x86/xen/setup.c18
-rw-r--r--block/blk-core.c11
-rw-r--r--block/blk-ioc.c14
-rw-r--r--block/blk-map.c2
-rw-r--r--block/compat_ioctl.c4
-rw-r--r--block/elevator.c4
-rw-r--r--block/ioctl.c7
-rw-r--r--block/scsi_ioctl.c34
-rw-r--r--crypto/pcrypt.c1
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/debugfs.c2
-rw-r--r--drivers/ata/libata-scsi.c5
-rw-r--r--drivers/ata/pata_legacy.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c2
-rw-r--r--drivers/atm/solos-attrlist.c1
-rw-r--r--drivers/atm/solos-pci.c8
-rw-r--r--drivers/block/aoe/aoeblk.c3
-rw-r--r--drivers/block/cciss.c131
-rw-r--r--drivers/block/cciss.h4
-rw-r--r--drivers/block/drbd/drbd_actlog.c42
-rw-r--r--drivers/block/drbd/drbd_int.h52
-rw-r--r--drivers/block/drbd/drbd_main.c148
-rw-r--r--drivers/block/drbd/drbd_nl.c25
-rw-r--r--drivers/block/drbd/drbd_proc.c1
-rw-r--r--drivers/block/drbd/drbd_receiver.c217
-rw-r--r--drivers/block/drbd/drbd_req.c38
-rw-r--r--drivers/block/drbd/drbd_worker.c23
-rw-r--r--drivers/block/floppy.c4
-rw-r--r--drivers/block/loop.c6
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/bluetooth/btusb.c5
-rw-r--r--drivers/char/Makefile44
-rw-r--r--drivers/char/agp/intel-gtt.c6
-rw-r--r--drivers/char/amiserial.c1
-rw-r--r--drivers/char/i8k.c7
-rw-r--r--drivers/char/nozomi.c1
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c3
-rw-r--r--drivers/char/pcmcia/synclink_cs.c1
-rw-r--r--drivers/clocksource/sh_cmt.c10
-rw-r--r--drivers/clocksource/sh_mtu2.c10
-rw-r--r--drivers/clocksource/sh_tmu.c10
-rw-r--r--drivers/crypto/n2_core.c2
-rw-r--r--drivers/crypto/padlock-aes.c2
-rw-r--r--drivers/firewire/ohci.c88
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c26
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c118
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c8
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c70
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c16
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c129
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h3
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c8
-rw-r--r--drivers/gpu/drm/radeon/r100.c4
-rw-r--r--drivers/gpu/drm/radeon/r300.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c41
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/rs400.c2
-rw-r--r--drivers/gpu/drm/radeon/rs600.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c86
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c81
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/stub/Kconfig3
-rw-r--r--drivers/hwmon/ad7414.c6
-rw-r--r--drivers/hwmon/adt7470.c4
-rw-r--r--drivers/hwmon/amc6821.c2
-rw-r--r--drivers/hwmon/gpio-fan.c8
-rw-r--r--drivers/hwmon/lm93.c4
-rw-r--r--drivers/hwmon/lm95241.c19
-rw-r--r--drivers/hwmon/ltc4261.c5
-rw-r--r--drivers/hwmon/max6650.c2
-rw-r--r--drivers/hwmon/w83795.c207
-rw-r--r--drivers/i2c/i2c-core.c12
-rw-r--r--drivers/i2c/i2c-mux.c1
-rw-r--r--drivers/input/input.c87
-rw-r--r--drivers/input/keyboard/adp5588-keys.c74
-rw-r--r--drivers/input/keyboard/atkbd.c12
-rw-r--r--drivers/input/misc/pcf8574_keypad.c23
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h11
-rw-r--r--drivers/input/tablet/acecad.c3
-rw-r--r--drivers/isdn/hisax/isar.c4
-rw-r--r--drivers/leds/Kconfig20
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/led-class.c105
-rw-r--r--drivers/leds/led-triggers.c2
-rw-r--r--drivers/leds/leds-gpio.c2
-rw-r--r--drivers/leds/leds-lp5521.c821
-rw-r--r--drivers/leds/leds-lp5523.c1065
-rw-r--r--drivers/leds/leds-net5501.c2
-rw-r--r--drivers/leds/ledtrig-timer.c124
-rw-r--r--drivers/macintosh/adb-iop.c4
-rw-r--r--drivers/md/md.c20
-rw-r--r--drivers/media/Kconfig1
-rw-r--r--drivers/media/common/saa7146_i2c.c1
-rw-r--r--drivers/media/dvb/frontends/dibx000_common.c1
-rw-r--r--drivers/media/video/cafe_ccic.c5
-rw-r--r--drivers/media/video/cx231xx/cx231xx-417.c6
-rw-r--r--drivers/media/video/cx23885/cx23885-417.c9
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c5
-rw-r--r--drivers/media/video/imx074.c2
-rw-r--r--drivers/media/video/ir-kbd-i2c.c1
-rw-r--r--drivers/media/video/mx2_camera.c13
-rw-r--r--drivers/media/video/mx3_camera.c4
-rw-r--r--drivers/media/video/omap1_camera.c16
-rw-r--r--drivers/media/video/ov6650.c6
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c24
-rw-r--r--drivers/media/video/se401.c7
-rw-r--r--drivers/media/video/stk-webcam.c4
-rw-r--r--drivers/media/video/tlg2300/pd-main.c13
-rw-r--r--drivers/media/video/usbvideo/vicam.c29
-rw-r--r--drivers/media/video/v4l2-dev.c7
-rw-r--r--drivers/media/video/zoran/zoran.h1
-rw-r--r--drivers/media/video/zoran/zoran_card.c1
-rw-r--r--drivers/media/video/zoran/zoran_driver.c27
-rw-r--r--drivers/misc/apds9802als.c3
-rw-r--r--drivers/misc/bh1770glc.c8
-rw-r--r--drivers/misc/isl29020.c4
-rw-r--r--drivers/net/atlx/atl1.c1
-rw-r--r--drivers/net/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h9
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c57
-rw-r--r--drivers/net/caif/caif_spi.c57
-rw-r--r--drivers/net/caif/caif_spi_slave.c13
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c1
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c1
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c43
-rw-r--r--drivers/net/cxgb4vf/sge.c122
-rw-r--r--drivers/net/cxgb4vf/t4vf_common.h1
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c19
-rw-r--r--drivers/net/gianfar_ethtool.c5
-rw-r--r--drivers/net/ibm_newemac/core.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c60
-rw-r--r--drivers/net/jme.c4
-rw-r--r--drivers/net/netxen/netxen_nic_main.c3
-rw-r--r--drivers/net/pcmcia/axnet_cs.c30
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c1
-rw-r--r--drivers/net/r8169.c9
-rw-r--r--drivers/net/skge.c1
-rw-r--r--drivers/net/smsc911x.h2
-rw-r--r--drivers/net/tulip/de2104x.c1
-rw-r--r--drivers/net/ucc_geth.c25
-rw-r--r--drivers/net/usb/usbnet.c11
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h1
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c4
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c3
-rw-r--r--drivers/net/wireless/libertas/cfg.c5
-rw-r--r--drivers/net/wireless/libertas/dev.h1
-rw-r--r--drivers/net/wireless/libertas/main.c7
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig3
-rw-r--r--drivers/pci/bus.c70
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c6
-rw-r--r--drivers/pci/pci-sysfs.c23
-rw-r--r--drivers/pci/pci.c12
-rw-r--r--drivers/pci/pci.h7
-rw-r--r--drivers/pci/proc.c2
-rw-r--r--drivers/pci/xen-pcifront.c6
-rw-r--r--drivers/pcmcia/pd6729.c8
-rw-r--r--drivers/pcmcia/pd6729.h2
-rw-r--r--drivers/pcmcia/pxa2xx_sharpsl.c2
-rw-r--r--drivers/pcmcia/sa1100_assabet.c2
-rw-r--r--drivers/pcmcia/sa1100_cerf.c2
-rw-r--r--drivers/pcmcia/sa1100_generic.c2
-rw-r--r--drivers/pcmcia/sa1100_h3600.c2
-rw-r--r--drivers/pcmcia/sa1100_shannon.c2
-rw-r--r--drivers/pcmcia/sa1100_simpad.c2
-rw-r--r--drivers/pcmcia/soc_common.c9
-rw-r--r--drivers/rapidio/rio.c4
-rw-r--r--drivers/rtc/rtc-ds1302.c2
-rw-r--r--drivers/rtc/rtc-sh.c4
-rw-r--r--drivers/s390/net/qeth_core.h9
-rw-r--r--drivers/s390/net/qeth_core_main.c55
-rw-r--r--drivers/scsi/scsi_error.c18
-rw-r--r--drivers/serial/8250_pci.c5
-rw-r--r--drivers/serial/bfin_5xx.c31
-rw-r--r--drivers/serial/kgdboc.c59
-rw-r--r--drivers/sh/clk/core.c96
-rw-r--r--drivers/sh/intc/core.c2
-rw-r--r--drivers/sh/intc/dynamic.c2
-rw-r--r--drivers/sh/intc/virq.c2
-rw-r--r--drivers/staging/ath6kl/Kconfig2
-rw-r--r--drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c4
-rw-r--r--drivers/staging/ath6kl/os/linux/ar6000_drv.c5
-rw-r--r--drivers/staging/ath6kl/os/linux/cfg80211.c7
-rw-r--r--drivers/staging/ath6kl/os/linux/include/athendpack_linux.h0
-rw-r--r--drivers/staging/ath6kl/os/linux/include/athstartpack_linux.h0
-rw-r--r--drivers/staging/batman-adv/hard-interface.c15
-rw-r--r--drivers/staging/batman-adv/routing.c12
-rw-r--r--drivers/staging/batman-adv/routing.h4
-rw-r--r--drivers/staging/batman-adv/unicast.c2
-rw-r--r--drivers/staging/bcm/Bcmchar.c49
-rw-r--r--drivers/staging/brcm80211/README2
-rw-r--r--drivers/staging/brcm80211/TODO2
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_linux.c2
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c12
-rw-r--r--drivers/staging/cpia/cpia.c6
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c1
-rw-r--r--drivers/staging/hv/hv_utils.c3
-rw-r--r--drivers/staging/intel_sst/intel_sst_app_interface.c284
-rw-r--r--drivers/staging/intel_sst/intel_sst_common.h4
-rw-r--r--drivers/staging/keucr/init.c18
-rw-r--r--drivers/staging/keucr/ms.c14
-rw-r--r--drivers/staging/keucr/msscsi.c6
-rw-r--r--drivers/staging/keucr/sdscsi.c4
-rw-r--r--drivers/staging/keucr/smilsub.c18
-rw-r--r--drivers/staging/keucr/transport.c2
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c4
-rw-r--r--drivers/staging/rt2860/common/cmm_aes.c2
-rw-r--r--drivers/staging/rt2860/usb_main_dev.c1
-rw-r--r--drivers/staging/rtl8192e/r8192E_core.c3
-rw-r--r--drivers/staging/solo6x10/solo6010-v4l2-enc.c2
-rw-r--r--drivers/staging/solo6x10/solo6010-v4l2.c2
-rw-r--r--drivers/staging/stradis/stradis.c11
-rw-r--r--drivers/staging/tidspbridge/Kconfig1
-rw-r--r--drivers/staging/tidspbridge/Makefile7
-rw-r--r--drivers/staging/tidspbridge/core/_deh.h5
-rw-r--r--drivers/staging/tidspbridge/core/_tiomap.h19
-rw-r--r--drivers/staging/tidspbridge/core/dsp-mmu.c317
-rw-r--r--drivers/staging/tidspbridge/core/io_sm.c180
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c1083
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430_pwr.c4
-rw-r--r--drivers/staging/tidspbridge/core/tiomap_io.c17
-rw-r--r--drivers/staging/tidspbridge/core/ue_deh.c115
-rw-r--r--drivers/staging/tidspbridge/hw/EasiGlobal.h41
-rw-r--r--drivers/staging/tidspbridge/hw/MMUAccInt.h76
-rw-r--r--drivers/staging/tidspbridge/hw/MMURegAcM.h225
-rw-r--r--drivers/staging/tidspbridge/hw/hw_defs.h58
-rw-r--r--drivers/staging/tidspbridge/hw/hw_mmu.c562
-rw-r--r--drivers/staging/tidspbridge/hw/hw_mmu.h163
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h1
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dev.h24
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dmm.h75
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/drv.h10
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h67
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspdefs.h44
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspioctl.h7
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/proc.h46
-rw-r--r--drivers/staging/tidspbridge/pmgr/dev.c63
-rw-r--r--drivers/staging/tidspbridge/pmgr/dmm.c533
-rw-r--r--drivers/staging/tidspbridge/pmgr/dspapi.c34
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv.c15
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c2
-rw-r--r--drivers/staging/tidspbridge/rmgr/node.c48
-rw-r--r--drivers/staging/tidspbridge/rmgr/proc.c197
-rw-r--r--drivers/staging/tm6000/tm6000-video.c1
-rw-r--r--drivers/staging/udlfb/udlfb.c2
-rw-r--r--drivers/staging/vt6656/main_usb.c3
-rw-r--r--drivers/staging/westbridge/astoria/api/src/cyasusb.c1
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c6
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c2
-rw-r--r--drivers/tty/Makefile11
-rw-r--r--drivers/tty/n_gsm.c (renamed from drivers/char/n_gsm.c)5
-rw-r--r--drivers/tty/n_hdlc.c (renamed from drivers/char/n_hdlc.c)0
-rw-r--r--drivers/tty/n_r3964.c (renamed from drivers/char/n_r3964.c)0
-rw-r--r--drivers/tty/n_tty.c (renamed from drivers/char/n_tty.c)0
-rw-r--r--drivers/tty/pty.c (renamed from drivers/char/pty.c)0
-rw-r--r--drivers/tty/sysrq.c (renamed from drivers/char/sysrq.c)0
-rw-r--r--drivers/tty/tty_audit.c (renamed from drivers/char/tty_audit.c)0
-rw-r--r--drivers/tty/tty_buffer.c (renamed from drivers/char/tty_buffer.c)14
-rw-r--r--drivers/tty/tty_io.c (renamed from drivers/char/tty_io.c)0
-rw-r--r--drivers/tty/tty_ioctl.c (renamed from drivers/char/tty_ioctl.c)0
-rw-r--r--drivers/tty/tty_ldisc.c (renamed from drivers/char/tty_ldisc.c)49
-rw-r--r--drivers/tty/tty_mutex.c (renamed from drivers/char/tty_mutex.c)0
-rw-r--r--drivers/tty/tty_port.c (renamed from drivers/char/tty_port.c)0
-rw-r--r--drivers/tty/vt/.gitignore (renamed from drivers/char/.gitignore)0
-rw-r--r--drivers/tty/vt/Makefile34
-rw-r--r--drivers/tty/vt/consolemap.c (renamed from drivers/char/consolemap.c)0
-rw-r--r--drivers/tty/vt/cp437.uni (renamed from drivers/char/cp437.uni)0
-rw-r--r--drivers/tty/vt/defkeymap.c_shipped (renamed from drivers/char/defkeymap.c_shipped)0
-rw-r--r--drivers/tty/vt/defkeymap.map (renamed from drivers/char/defkeymap.map)0
-rw-r--r--drivers/tty/vt/keyboard.c (renamed from drivers/char/keyboard.c)0
-rw-r--r--drivers/tty/vt/selection.c (renamed from drivers/char/selection.c)0
-rw-r--r--drivers/tty/vt/vc_screen.c (renamed from drivers/char/vc_screen.c)6
-rw-r--r--drivers/tty/vt/vt.c (renamed from drivers/char/vt.c)0
-rw-r--r--drivers/tty/vt/vt_ioctl.c (renamed from drivers/char/vt_ioctl.c)0
-rw-r--r--drivers/usb/core/devio.c7
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/goku_udc.h3
-rw-r--r--drivers/usb/gadget/u_ether.c1
-rw-r--r--drivers/usb/gadget/u_serial.c54
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/ehci-mxc.c14
-rw-r--r--drivers/usb/host/ohci-jz4740.c2
-rw-r--r--drivers/usb/misc/iowarrior.c1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1
-rw-r--r--drivers/usb/musb/blackfin.c80
-rw-r--r--drivers/usb/musb/musb_core.c41
-rw-r--r--drivers/usb/musb/musb_core.h2
-rw-r--r--drivers/usb/musb/musb_gadget.c41
-rw-r--r--drivers/usb/musb/musb_regs.h3
-rw-r--r--drivers/usb/musb/musbhsdma.c14
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/storage/uas.c5
-rw-r--r--drivers/uwb/allocator.c3
-rw-r--r--drivers/video/backlight/adp8860_bl.c8
-rw-r--r--drivers/video/backlight/l4f00242t03.c2
-rw-r--r--drivers/video/backlight/lms283gf05.c2
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c18
-rw-r--r--drivers/video/backlight/pwm_bl.c7
-rw-r--r--drivers/video/backlight/s6e63m0.c7
-rw-r--r--drivers/video/omap2/vram.c17
-rw-r--r--drivers/video/riva/rivafb-i2c.c1
-rw-r--r--drivers/video/sh_mobile_hdmi.c5
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c21
-rw-r--r--drivers/video/sis/sis_main.c8
-rw-r--r--drivers/xen/events.c25
-rw-r--r--fs/bio.c23
-rw-r--r--fs/cifs/TODO2
-rw-r--r--fs/cifs/cifs_fs_sb.h6
-rw-r--r--fs/cifs/cifsfs.c5
-rw-r--r--fs/cifs/cifsglob.h3
-rw-r--r--fs/cifs/cifsproto.h1
-rw-r--r--fs/cifs/connect.c195
-rw-r--r--fs/cifs/file.c72
-rw-r--r--fs/cifs/inode.c1
-rw-r--r--fs/cifs/ioctl.c16
-rw-r--r--fs/cifs/misc.c25
-rw-r--r--fs/ext4/ext4.h4
-rw-r--r--fs/ext4/inode.c5
-rw-r--r--fs/ext4/mballoc.c2
-rw-r--r--fs/ext4/page-io.c97
-rw-r--r--fs/ext4/super.c102
-rw-r--r--fs/gfs2/export.c46
-rw-r--r--fs/gfs2/glock.c21
-rw-r--r--fs/gfs2/inode.c152
-rw-r--r--fs/gfs2/inode.h4
-rw-r--r--fs/gfs2/rgrp.c91
-rw-r--r--fs/hugetlbfs/inode.c3
-rw-r--r--fs/ioprio.c18
-rw-r--r--fs/locks.c19
-rw-r--r--fs/logfs/logfs.h2
-rw-r--r--fs/nfsd/nfs4state.c16
-rw-r--r--fs/ocfs2/ocfs2.h6
-rw-r--r--fs/openpromfs/inode.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c7
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c1
-rw-r--r--fs/xfs/xfs_filestream.c8
-rw-r--r--fs/xfs/xfs_mount.c1
-rw-r--r--fs/xfs/xfs_quota.h20
-rw-r--r--include/asm-generic/stat.h14
-rw-r--r--include/drm/ttm/ttm_bo_api.h4
-rw-r--r--include/drm/ttm/ttm_bo_driver.h79
-rw-r--r--include/linux/atomic.h37
-rw-r--r--include/linux/bio.h4
-rw-r--r--include/linux/blk_types.h6
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/drbd.h2
-rw-r--r--include/linux/fsl-diu-fb.h1
-rw-r--r--include/linux/hardirq.h8
-rw-r--r--include/linux/highmem.h1
-rw-r--r--include/linux/i2c-id.h22
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/i2c/adp5588.h15
-rw-r--r--include/linux/if_vlan.h25
-rw-r--r--include/linux/input.h4
-rw-r--r--include/linux/iocontext.h1
-rw-r--r--include/linux/kernel.h246
-rw-r--r--include/linux/leds-lp5521.h47
-rw-r--r--include/linux/leds-lp5523.h47
-rw-r--r--include/linux/leds.h47
-rw-r--r--include/linux/mmc/sh_mmcif.h18
-rw-r--r--include/linux/netdevice.h5
-rw-r--r--include/linux/netfilter.h2
-rw-r--r--include/linux/perf_event.h10
-rw-r--r--include/linux/printk.h248
-rw-r--r--include/linux/pwm_backlight.h1
-rw-r--r--include/linux/radix-tree.h39
-rw-r--r--include/linux/resource.h1
-rw-r--r--include/linux/security.h9
-rw-r--r--include/linux/sh_clk.h4
-rw-r--r--include/linux/sh_intc.h2
-rw-r--r--include/linux/sh_timer.h1
-rw-r--r--include/linux/sunrpc/svc_xprt.h18
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/musb.h2
-rw-r--r--include/net/caif/caif_dev.h4
-rw-r--r--include/net/caif/caif_spi.h2
-rw-r--r--include/net/caif/cfcnfg.h8
-rw-r--r--include/net/dn.h2
-rw-r--r--include/net/dst_ops.h1
-rw-r--r--include/net/netlink.h2
-rw-r--r--include/net/sock.h4
-rw-r--r--include/net/tcp.h6
-rw-r--r--include/net/udp.h4
-rw-r--r--include/trace/events/ext4.h97
-rw-r--r--kernel/exit.c8
-rw-r--r--kernel/latencytop.c17
-rw-r--r--kernel/perf_event.c42
-rw-r--r--kernel/printk.c21
-rw-r--r--kernel/range.c2
-rw-r--r--kernel/relay.c15
-rw-r--r--kernel/sysctl.c9
-rw-r--r--kernel/trace/blktrace.c4
-rw-r--r--kernel/watchdog.c2
-rw-r--r--lib/radix-tree.c83
-rw-r--r--mm/filemap.c33
-rw-r--r--mm/memcontrol.c16
-rw-r--r--mm/mprotect.c2
-rw-r--r--mm/slub.c3
-rw-r--r--mm/vmscan.c2
-rw-r--r--mm/vmstat.c2
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/bluetooth/hci_event.c6
-rw-r--r--net/bluetooth/hidp/Kconfig2
-rw-r--r--net/bluetooth/l2cap.c8
-rw-r--r--net/bluetooth/rfcomm/core.c13
-rw-r--r--net/caif/caif_config_util.c13
-rw-r--r--net/caif/caif_dev.c2
-rw-r--r--net/caif/caif_socket.c45
-rw-r--r--net/caif/cfcnfg.c17
-rw-r--r--net/caif/cfctrl.c3
-rw-r--r--net/caif/cfdbgl.c14
-rw-r--r--net/caif/cfrfml.c2
-rw-r--r--net/can/bcm.c2
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/dst.c1
-rw-r--r--net/core/filter.c64
-rw-r--r--net/core/pktgen.c6
-rw-r--r--net/core/rtnetlink.c9
-rw-r--r--net/core/sock.c14
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/decnet/sysctl_net_decnet.c4
-rw-r--r--net/ipv4/fib_lookup.h5
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/inet_diag.c27
-rw-r--r--net/ipv4/netfilter/arp_tables.c1
-rw-r--r--net/ipv4/netfilter/ip_tables.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c40
-rw-r--r--net/ipv4/proc.c8
-rw-r--r--net/ipv4/sysctl_net_ipv4.c5
-rw-r--r--net/ipv4/tcp.c6
-rw-r--r--net/ipv4/tcp_input.c11
-rw-r--r--net/ipv4/tcp_ipv4.c8
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/addrconf.c24
-rw-r--r--net/ipv6/netfilter/ip6_tables.c1
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/reassembly.c2
-rw-r--r--net/ipv6/route.c8
-rw-r--r--net/l2tp/l2tp_debugfs.c2
-rw-r--r--net/mac80211/iface.c6
-rw-r--r--net/netfilter/nf_conntrack_core.c3
-rw-r--r--net/netfilter/nf_conntrack_proto.c6
-rw-r--r--net/packet/af_packet.c7
-rw-r--r--net/rds/loop.c4
-rw-r--r--net/rds/message.c4
-rw-r--r--net/rds/tcp.c6
-rw-r--r--net/sched/cls_basic.c4
-rw-r--r--net/sched/cls_cgroup.c2
-rw-r--r--net/sched/em_text.c3
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/sctp/sysctl.c4
-rw-r--r--net/tipc/socket.c1
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--net/x25/x25_facilities.c20
-rw-r--r--net/x25/x25_in.c2
-rw-r--r--scripts/kconfig/symbol.c2
-rw-r--r--security/Kconfig12
-rw-r--r--security/apparmor/lsm.c6
-rw-r--r--security/apparmor/policy.c2
-rw-r--r--security/capability.c5
-rw-r--r--security/commoncap.c19
-rw-r--r--security/security.c4
-rw-r--r--security/selinux/hooks.c6
-rw-r--r--security/smack/smack_lsm.c8
-rw-r--r--sound/pci/asihpi/hpi6000.c2
-rw-r--r--sound/pci/asihpi/hpi6205.c2
-rw-r--r--sound/pci/asihpi/hpicmn.c12
-rw-r--r--sound/pci/cs46xx/dsp_spos.c33
-rw-r--r--sound/pci/hda/patch_cirrus.c1
-rw-r--r--sound/pci/lx6464es/lx6464es.c4
-rw-r--r--sound/pci/lx6464es/lx6464es.h2
-rw-r--r--sound/pci/lx6464es/lx_core.c2
-rw-r--r--sound/usb/mixer_quirks.c15
-rw-r--r--sound/usb/pcm.c4
-rw-r--r--tools/perf/Documentation/perf-trace.txt57
-rw-r--r--tools/perf/builtin-record.c10
-rw-r--r--tools/perf/builtin-top.c12
-rw-r--r--tools/perf/builtin-trace.c209
-rw-r--r--tools/perf/scripts/perl/bin/failed-syscalls-record2
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-file-record2
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-pid-record2
-rw-r--r--tools/perf/scripts/perl/bin/rwtop-record2
-rw-r--r--tools/perf/scripts/perl/bin/wakeup-latency-record2
-rw-r--r--tools/perf/scripts/perl/bin/workqueue-stats-record2
-rw-r--r--tools/perf/scripts/python/bin/failed-syscalls-by-pid-record2
-rw-r--r--tools/perf/scripts/python/bin/futex-contention-record2
-rw-r--r--tools/perf/scripts/python/bin/netdev-times-record2
-rw-r--r--tools/perf/scripts/python/bin/sched-migration-record2
-rw-r--r--tools/perf/scripts/python/bin/sctop-record2
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-by-pid-record2
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-record2
-rw-r--r--tools/perf/util/ui/util.c5
691 files changed, 12126 insertions, 5826 deletions
diff --git a/Documentation/ABI/obsolete/proc-pid-oom_adj b/Documentation/ABI/obsolete/proc-pid-oom_adj
new file mode 100644
index 00000000000..cf63f264ce0
--- /dev/null
+++ b/Documentation/ABI/obsolete/proc-pid-oom_adj
@@ -0,0 +1,22 @@
1What: /proc/<pid>/oom_adj
2When: August 2012
3Why: /proc/<pid>/oom_adj allows userspace to influence the oom killer's
4 badness heuristic used to determine which task to kill when the kernel
5 is out of memory.
6
7 The badness heuristic has since been rewritten since the introduction of
8 this tunable such that its meaning is deprecated. The value was
9 implemented as a bitshift on a score generated by the badness()
10 function that did not have any precise units of measure. With the
11 rewrite, the score is given as a proportion of available memory to the
12 task allocating pages, so using a bitshift which grows the score
13 exponentially is, thus, impossible to tune with fine granularity.
14
15 A much more powerful interface, /proc/<pid>/oom_score_adj, was
16 introduced with the oom killer rewrite that allows users to increase or
17 decrease the badness() score linearly. This interface will replace
18 /proc/<pid>/oom_adj.
19
20 A warning will be emitted to the kernel log if an application uses this
21 deprecated interface. After it is printed once, future warnings will be
22 suppressed until the kernel is rebooted.
diff --git a/Documentation/arm/OMAP/DSS b/Documentation/arm/OMAP/DSS
index 0af0e9eed5d..888ae7b83ae 100644
--- a/Documentation/arm/OMAP/DSS
+++ b/Documentation/arm/OMAP/DSS
@@ -255,9 +255,10 @@ framebuffer parameters.
255Kernel boot arguments 255Kernel boot arguments
256--------------------- 256---------------------
257 257
258vram=<size> 258vram=<size>[,<physaddr>]
259 - Amount of total VRAM to preallocate. For example, "10M". omapfb 259 - Amount of total VRAM to preallocate and optionally a physical start
260 allocates memory for framebuffers from VRAM. 260 memory address. For example, "10M". omapfb allocates memory for
261 framebuffers from VRAM.
261 262
262omapfb.mode=<display>:<mode>[,...] 263omapfb.mode=<display>:<mode>[,...]
263 - Default video mode for specified displays. For example, 264 - Default video mode for specified displays. For example,
diff --git a/Documentation/block/switching-sched.txt b/Documentation/block/switching-sched.txt
index d5af3f63081..71cfbdc0f74 100644
--- a/Documentation/block/switching-sched.txt
+++ b/Documentation/block/switching-sched.txt
@@ -16,7 +16,7 @@ you can do so by typing:
16As of the Linux 2.6.10 kernel, it is now possible to change the 16As of the Linux 2.6.10 kernel, it is now possible to change the
17IO scheduler for a given block device on the fly (thus making it possible, 17IO scheduler for a given block device on the fly (thus making it possible,
18for instance, to set the CFQ scheduler for the system default, but 18for instance, to set the CFQ scheduler for the system default, but
19set a specific device to use the anticipatory or noop schedulers - which 19set a specific device to use the deadline or noop schedulers - which
20can improve that device's throughput). 20can improve that device's throughput).
21 21
22To set a specific scheduler, simply do this: 22To set a specific scheduler, simply do this:
@@ -31,7 +31,7 @@ a "cat /sys/block/DEV/queue/scheduler" - the list of valid names
31will be displayed, with the currently selected scheduler in brackets: 31will be displayed, with the currently selected scheduler in brackets:
32 32
33# cat /sys/block/hda/queue/scheduler 33# cat /sys/block/hda/queue/scheduler
34noop anticipatory deadline [cfq] 34noop deadline [cfq]
35# echo anticipatory > /sys/block/hda/queue/scheduler 35# echo deadline > /sys/block/hda/queue/scheduler
36# cat /sys/block/hda/queue/scheduler 36# cat /sys/block/hda/queue/scheduler
37noop [anticipatory] deadline cfq 37noop [deadline] cfq
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index d8f36f984fa..6c2f55e05f1 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -554,3 +554,13 @@ Why: This is a legacy interface which have been replaced by a more
554Who: NeilBrown <neilb@suse.de> 554Who: NeilBrown <neilb@suse.de>
555 555
556---------------------------- 556----------------------------
557
558What: i2c_adapter.id
559When: June 2011
560Why: This field is deprecated. I2C device drivers shouldn't change their
561 behavior based on the underlying I2C adapter. Instead, the I2C
562 adapter driver should instantiate the I2C devices and provide the
563 needed platform-specific information.
564Who: Jean Delvare <khali@linux-fr.org>
565
566----------------------------
diff --git a/Documentation/filesystems/xfs-delayed-logging-design.txt b/Documentation/filesystems/xfs-delayed-logging-design.txt
index 96d0df28bed..7445bf335da 100644
--- a/Documentation/filesystems/xfs-delayed-logging-design.txt
+++ b/Documentation/filesystems/xfs-delayed-logging-design.txt
@@ -794,17 +794,6 @@ designed.
794 794
795Roadmap: 795Roadmap:
796 796
7972.6.37 Remove experimental tag from mount option
798 => should be roughly 6 months after initial merge
799 => enough time to:
800 => gain confidence and fix problems reported by early
801 adopters (a.k.a. guinea pigs)
802 => address worst performance regressions and undesired
803 behaviours
804 => start tuning/optimising code for parallelism
805 => start tuning/optimising algorithms consuming
806 excessive CPU time
807
8082.6.39 Switch default mount option to use delayed logging 7972.6.39 Switch default mount option to use delayed logging
809 => should be roughly 12 months after initial merge 798 => should be roughly 12 months after initial merge
810 => enough time to shake out remaining problems before next round of 799 => enough time to shake out remaining problems before next round of
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index ed45e9802aa..92e83e53148 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -706,7 +706,7 @@ and is between 256 and 4096 characters. It is defined in the file
706 arch/x86/kernel/cpu/cpufreq/elanfreq.c. 706 arch/x86/kernel/cpu/cpufreq/elanfreq.c.
707 707
708 elevator= [IOSCHED] 708 elevator= [IOSCHED]
709 Format: {"anticipatory" | "cfq" | "deadline" | "noop"} 709 Format: {"cfq" | "deadline" | "noop"}
710 See Documentation/block/as-iosched.txt and 710 See Documentation/block/as-iosched.txt and
711 Documentation/block/deadline-iosched.txt for details. 711 Documentation/block/deadline-iosched.txt for details.
712 712
diff --git a/Documentation/leds-class.txt b/Documentation/leds-class.txt
index 8fd5ca2ae32..58b266bd184 100644
--- a/Documentation/leds-class.txt
+++ b/Documentation/leds-class.txt
@@ -60,15 +60,18 @@ Hardware accelerated blink of LEDs
60 60
61Some LEDs can be programmed to blink without any CPU interaction. To 61Some LEDs can be programmed to blink without any CPU interaction. To
62support this feature, a LED driver can optionally implement the 62support this feature, a LED driver can optionally implement the
63blink_set() function (see <linux/leds.h>). If implemented, triggers can 63blink_set() function (see <linux/leds.h>). To set an LED to blinking,
64attempt to use it before falling back to software timers. The blink_set() 64however, it is better to use use the API function led_blink_set(),
65function should return 0 if the blink setting is supported, or -EINVAL 65as it will check and implement software fallback if necessary.
66otherwise, which means that LED blinking will be handled by software. 66
67 67To turn off blinking again, use the API function led_brightness_set()
68The blink_set() function should choose a user friendly blinking 68as that will not just set the LED brightness but also stop any software
69value if it is called with *delay_on==0 && *delay_off==0 parameters. In 69timers that may have been required for blinking.
70this case the driver should give back the chosen value through delay_on 70
71and delay_off parameters to the leds subsystem. 71The blink_set() function should choose a user friendly blinking value
72if it is called with *delay_on==0 && *delay_off==0 parameters. In this
73case the driver should give back the chosen value through delay_on and
74delay_off parameters to the leds subsystem.
72 75
73Setting the brightness to zero with brightness_set() callback function 76Setting the brightness to zero with brightness_set() callback function
74should completely turn off the LED and cancel the previously programmed 77should completely turn off the LED and cancel the previously programmed
diff --git a/Documentation/leds/leds-lp5521.txt b/Documentation/leds/leds-lp5521.txt
new file mode 100644
index 00000000000..c4d8d151e0f
--- /dev/null
+++ b/Documentation/leds/leds-lp5521.txt
@@ -0,0 +1,88 @@
1Kernel driver for lp5521
2========================
3
4* National Semiconductor LP5521 led driver chip
5* Datasheet: http://www.national.com/pf/LP/LP5521.html
6
7Authors: Mathias Nyman, Yuri Zaporozhets, Samu Onkalo
8Contact: Samu Onkalo (samu.p.onkalo-at-nokia.com)
9
10Description
11-----------
12
13LP5521 can drive up to 3 channels. Leds can be controlled directly via
14the led class control interface. Channels have generic names:
15lp5521:channelx, where x is 0 .. 2
16
17All three channels can be also controlled using the engine micro programs.
18More details of the instructions can be found from the public data sheet.
19
20Control interface for the engines:
21x is 1 .. 3
22enginex_mode : disabled, load, run
23enginex_load : store program (visible only in engine load mode)
24
25Example (start to blink the channel 2 led):
26cd /sys/class/leds/lp5521:channel2/device
27echo "load" > engine3_mode
28echo "037f4d0003ff6000" > engine3_load
29echo "run" > engine3_mode
30
31stop the engine:
32echo "disabled" > engine3_mode
33
34sysfs contains a selftest entry.
35The test communicates with the chip and checks that
36the clock mode is automatically set to the requested one.
37
38Each channel has its own led current settings.
39/sys/class/leds/lp5521:channel0/led_current - RW
40/sys/class/leds/lp5521:channel0/max_current - RO
41Format: 10x mA i.e 10 means 1.0 mA
42
43example platform data:
44
45Note: chan_nr can have values between 0 and 2.
46
47static struct lp5521_led_config lp5521_led_config[] = {
48 {
49 .chan_nr = 0,
50 .led_current = 50,
51 .max_current = 130,
52 }, {
53 .chan_nr = 1,
54 .led_current = 0,
55 .max_current = 130,
56 }, {
57 .chan_nr = 2,
58 .led_current = 0,
59 .max_current = 130,
60 }
61};
62
63static int lp5521_setup(void)
64{
65 /* setup HW resources */
66}
67
68static void lp5521_release(void)
69{
70 /* Release HW resources */
71}
72
73static void lp5521_enable(bool state)
74{
75 /* Control of chip enable signal */
76}
77
78static struct lp5521_platform_data lp5521_platform_data = {
79 .led_config = lp5521_led_config,
80 .num_channels = ARRAY_SIZE(lp5521_led_config),
81 .clock_mode = LP5521_CLOCK_EXT,
82 .setup_resources = lp5521_setup,
83 .release_resources = lp5521_release,
84 .enable = lp5521_enable,
85};
86
87If the current is set to 0 in the platform data, that channel is
88disabled and it is not visible in the sysfs.
diff --git a/Documentation/leds/leds-lp5523.txt b/Documentation/leds/leds-lp5523.txt
new file mode 100644
index 00000000000..fad2feb8b7c
--- /dev/null
+++ b/Documentation/leds/leds-lp5523.txt
@@ -0,0 +1,83 @@
1Kernel driver for lp5523
2========================
3
4* National Semiconductor LP5523 led driver chip
5* Datasheet: http://www.national.com/pf/LP/LP5523.html
6
7Authors: Mathias Nyman, Yuri Zaporozhets, Samu Onkalo
8Contact: Samu Onkalo (samu.p.onkalo-at-nokia.com)
9
10Description
11-----------
12LP5523 can drive up to 9 channels. Leds can be controlled directly via
13the led class control interface. Channels have generic names:
14lp5523:channelx where x is 0...8
15
16The chip provides 3 engines. Each engine can control channels without
17interaction from the main CPU. Details of the micro engine code can be found
18from the public data sheet. Leds can be muxed to different channels.
19
20Control interface for the engines:
21x is 1 .. 3
22enginex_mode : disabled, load, run
23enginex_load : microcode load (visible only in load mode)
24enginex_leds : led mux control (visible only in load mode)
25
26cd /sys/class/leds/lp5523:channel2/device
27echo "load" > engine3_mode
28echo "9d80400004ff05ff437f0000" > engine3_load
29echo "111111111" > engine3_leds
30echo "run" > engine3_mode
31
32sysfs contains a selftest entry. It measures each channel
33voltage level and checks if it looks reasonable. If the level is too high,
34the led is missing; if the level is too low, there is a short circuit.
35
36Selftest uses always the current from the platform data.
37
38Each channel contains led current settings.
39/sys/class/leds/lp5523:channel2/led_current - RW
40/sys/class/leds/lp5523:channel2/max_current - RO
41Format: 10x mA i.e 10 means 1.0 mA
42
43Example platform data:
44
45Note - chan_nr can have values between 0 and 8.
46
47static struct lp5523_led_config lp5523_led_config[] = {
48 {
49 .chan_nr = 0,
50 .led_current = 50,
51 .max_current = 130,
52 },
53...
54 }, {
55 .chan_nr = 8,
56 .led_current = 50,
57 .max_current = 130,
58 }
59};
60
61static int lp5523_setup(void)
62{
63 /* Setup HW resources */
64}
65
66static void lp5523_release(void)
67{
68 /* Release HW resources */
69}
70
71static void lp5523_enable(bool state)
72{
73 /* Control chip enable signal */
74}
75
76static struct lp5523_platform_data lp5523_platform_data = {
77 .led_config = lp5523_led_config,
78 .num_channels = ARRAY_SIZE(lp5523_led_config),
79 .clock_mode = LP5523_CLOCK_EXT,
80 .setup_resources = lp5523_setup,
81 .release_resources = lp5523_release,
82 .enable = lp5523_enable,
83};
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index c7165f4cb79..fe95105992c 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -20,6 +20,15 @@ ip_no_pmtu_disc - BOOLEAN
20min_pmtu - INTEGER 20min_pmtu - INTEGER
21 default 562 - minimum discovered Path MTU 21 default 562 - minimum discovered Path MTU
22 22
23route/max_size - INTEGER
24 Maximum number of routes allowed in the kernel. Increase
25 this when using large numbers of interfaces and/or routes.
26
27neigh/default/gc_thresh3 - INTEGER
28 Maximum number of neighbor entries allowed. Increase this
29 when using large numbers of interfaces and when communicating
30 with large numbers of directly-connected peers.
31
23mtu_expires - INTEGER 32mtu_expires - INTEGER
24 Time, in seconds, that cached PMTU information is kept. 33 Time, in seconds, that cached PMTU information is kept.
25 34
diff --git a/Documentation/rbtree.txt b/Documentation/rbtree.txt
index 221f38be98f..19f8278c385 100644
--- a/Documentation/rbtree.txt
+++ b/Documentation/rbtree.txt
@@ -21,8 +21,8 @@ three rotations, respectively, to balance the tree), with slightly slower
21To quote Linux Weekly News: 21To quote Linux Weekly News:
22 22
23 There are a number of red-black trees in use in the kernel. 23 There are a number of red-black trees in use in the kernel.
24 The anticipatory, deadline, and CFQ I/O schedulers all employ 24 The deadline and CFQ I/O schedulers employ rbtrees to
25 rbtrees to track requests; the packet CD/DVD driver does the same. 25 track requests; the packet CD/DVD driver does the same.
26 The high-resolution timer code uses an rbtree to organize outstanding 26 The high-resolution timer code uses an rbtree to organize outstanding
27 timer requests. The ext3 filesystem tracks directory entries in a 27 timer requests. The ext3 filesystem tracks directory entries in a
28 red-black tree. Virtual memory areas (VMAs) are tracked with red-black 28 red-black tree. Virtual memory areas (VMAs) are tracked with red-black
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 3894eaa2348..209e1584c3d 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -28,6 +28,7 @@ show up in /proc/sys/kernel:
28- core_uses_pid 28- core_uses_pid
29- ctrl-alt-del 29- ctrl-alt-del
30- dentry-state 30- dentry-state
31- dmesg_restrict
31- domainname 32- domainname
32- hostname 33- hostname
33- hotplug 34- hotplug
@@ -213,6 +214,19 @@ to decide what to do with it.
213 214
214============================================================== 215==============================================================
215 216
217dmesg_restrict:
218
219This toggle indicates whether unprivileged users are prevented from using
220dmesg(8) to view messages from the kernel's log buffer. When
221dmesg_restrict is set to (0) there are no restrictions. When
222dmesg_restrict is set set to (1), users must have CAP_SYS_ADMIN to use
223dmesg(8).
224
225The kernel config option CONFIG_SECURITY_DMESG_RESTRICT sets the default
226value of dmesg_restrict.
227
228==============================================================
229
216domainname & hostname: 230domainname & hostname:
217 231
218These files can be used to set the NIS/YP domainname and the 232These files can be used to set the NIS/YP domainname and the
diff --git a/MAINTAINERS b/MAINTAINERS
index 0094224ca79..8e6548dbd5d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -161,7 +161,7 @@ M: Greg Kroah-Hartman <gregkh@suse.de>
161L: linux-serial@vger.kernel.org 161L: linux-serial@vger.kernel.org
162W: http://serial.sourceforge.net 162W: http://serial.sourceforge.net
163S: Maintained 163S: Maintained
164T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ 164T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
165F: drivers/serial/8250* 165F: drivers/serial/8250*
166F: include/linux/serial_8250.h 166F: include/linux/serial_8250.h
167 167
@@ -945,7 +945,7 @@ M: Magnus Damm <magnus.damm@gmail.com>
945L: linux-sh@vger.kernel.org 945L: linux-sh@vger.kernel.org
946W: http://oss.renesas.com 946W: http://oss.renesas.com
947Q: http://patchwork.kernel.org/project/linux-sh/list/ 947Q: http://patchwork.kernel.org/project/linux-sh/list/
948T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/genesis-2.6.git 948T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6.git rmobile-latest
949S: Supported 949S: Supported
950F: arch/arm/mach-shmobile/ 950F: arch/arm/mach-shmobile/
951F: drivers/sh/ 951F: drivers/sh/
@@ -2435,6 +2435,7 @@ F: drivers/net/wan/sdla.c
2435FRAMEBUFFER LAYER 2435FRAMEBUFFER LAYER
2436L: linux-fbdev@vger.kernel.org 2436L: linux-fbdev@vger.kernel.org
2437W: http://linux-fbdev.sourceforge.net/ 2437W: http://linux-fbdev.sourceforge.net/
2438T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/fbdev-2.6.git
2438S: Orphan 2439S: Orphan
2439F: Documentation/fb/ 2440F: Documentation/fb/
2440F: drivers/video/fb* 2441F: drivers/video/fb*
@@ -5676,7 +5677,7 @@ S: Maintained
5676 5677
5677STAGING SUBSYSTEM 5678STAGING SUBSYSTEM
5678M: Greg Kroah-Hartman <gregkh@suse.de> 5679M: Greg Kroah-Hartman <gregkh@suse.de>
5679T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-next-2.6.git 5680T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6.git
5680L: devel@driverdev.osuosl.org 5681L: devel@driverdev.osuosl.org
5681S: Maintained 5682S: Maintained
5682F: drivers/staging/ 5683F: drivers/staging/
@@ -5705,7 +5706,7 @@ M: Paul Mundt <lethal@linux-sh.org>
5705L: linux-sh@vger.kernel.org 5706L: linux-sh@vger.kernel.org
5706W: http://www.linux-sh.org 5707W: http://www.linux-sh.org
5707Q: http://patchwork.kernel.org/project/linux-sh/list/ 5708Q: http://patchwork.kernel.org/project/linux-sh/list/
5708T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6.git 5709T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6.git sh-latest
5709S: Supported 5710S: Supported
5710F: Documentation/sh/ 5711F: Documentation/sh/
5711F: arch/sh/ 5712F: arch/sh/
@@ -5910,7 +5911,7 @@ S: Maintained
5910TTY LAYER 5911TTY LAYER
5911M: Greg Kroah-Hartman <gregkh@suse.de> 5912M: Greg Kroah-Hartman <gregkh@suse.de>
5912S: Maintained 5913S: Maintained
5913T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ 5914T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
5914F: drivers/char/tty_* 5915F: drivers/char/tty_*
5915F: drivers/serial/serial_core.c 5916F: drivers/serial/serial_core.c
5916F: include/linux/serial_core.h 5917F: include/linux/serial_core.h
@@ -6233,7 +6234,7 @@ USB SUBSYSTEM
6233M: Greg Kroah-Hartman <gregkh@suse.de> 6234M: Greg Kroah-Hartman <gregkh@suse.de>
6234L: linux-usb@vger.kernel.org 6235L: linux-usb@vger.kernel.org
6235W: http://www.linux-usb.org 6236W: http://www.linux-usb.org
6236T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ 6237T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6.git
6237S: Supported 6238S: Supported
6238F: Documentation/usb/ 6239F: Documentation/usb/
6239F: drivers/net/usb/ 6240F: drivers/net/usb/
@@ -6598,14 +6599,14 @@ F: drivers/platform/x86
6598 6599
6599XEN PCI SUBSYSTEM 6600XEN PCI SUBSYSTEM
6600M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 6601M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
6601L: xen-devel@lists.xensource.com 6602L: xen-devel@lists.xensource.com (moderated for non-subscribers)
6602S: Supported 6603S: Supported
6603F: arch/x86/pci/*xen* 6604F: arch/x86/pci/*xen*
6604F: drivers/pci/*xen* 6605F: drivers/pci/*xen*
6605 6606
6606XEN SWIOTLB SUBSYSTEM 6607XEN SWIOTLB SUBSYSTEM
6607M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 6608M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
6608L: xen-devel@lists.xensource.com 6609L: xen-devel@lists.xensource.com (moderated for non-subscribers)
6609S: Supported 6610S: Supported
6610F: arch/x86/xen/*swiotlb* 6611F: arch/x86/xen/*swiotlb*
6611F: drivers/xen/*swiotlb* 6612F: drivers/xen/*swiotlb*
@@ -6613,7 +6614,7 @@ F: drivers/xen/*swiotlb*
6613XEN HYPERVISOR INTERFACE 6614XEN HYPERVISOR INTERFACE
6614M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> 6615M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
6615M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 6616M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
6616L: xen-devel@lists.xen.org 6617L: xen-devel@lists.xensource.com (moderated for non-subscribers)
6617L: virtualization@lists.osdl.org 6618L: virtualization@lists.osdl.org
6618S: Supported 6619S: Supported
6619F: arch/x86/xen/ 6620F: arch/x86/xen/
diff --git a/Makefile b/Makefile
index 6619720f50d..ab5359db3d1 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 37 3SUBLEVEL = 37
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc2
5NAME = Flesh-Eating Bats with Fangs 5NAME = Flesh-Eating Bats with Fangs
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a19a5266d5f..db524e75c4a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -6,7 +6,7 @@ config ARM
6 select HAVE_MEMBLOCK 6 select HAVE_MEMBLOCK
7 select RTC_LIB 7 select RTC_LIB
8 select SYS_SUPPORTS_APM_EMULATION 8 select SYS_SUPPORTS_APM_EMULATION
9 select GENERIC_ATOMIC64 if (!CPU_32v6K) 9 select GENERIC_ATOMIC64 if (!CPU_32v6K || !AEABI)
10 select HAVE_OPROFILE if (HAVE_PERF_EVENTS) 10 select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
11 select HAVE_ARCH_KGDB 11 select HAVE_ARCH_KGDB
12 select HAVE_KPROBES if (!XIP_KERNEL) 12 select HAVE_KPROBES if (!XIP_KERNEL)
@@ -646,7 +646,7 @@ config ARCH_S3C2410
646 select ARCH_HAS_CPUFREQ 646 select ARCH_HAS_CPUFREQ
647 select HAVE_CLK 647 select HAVE_CLK
648 select ARCH_USES_GETTIMEOFFSET 648 select ARCH_USES_GETTIMEOFFSET
649 select HAVE_S3C2410_I2C 649 select HAVE_S3C2410_I2C if I2C
650 help 650 help
651 Samsung S3C2410X CPU based systems, such as the Simtec Electronics 651 Samsung S3C2410X CPU based systems, such as the Simtec Electronics
652 BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or 652 BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or
@@ -676,8 +676,8 @@ config ARCH_S3C64XX
676 select S3C_DEV_NAND 676 select S3C_DEV_NAND
677 select USB_ARCH_HAS_OHCI 677 select USB_ARCH_HAS_OHCI
678 select SAMSUNG_GPIOLIB_4BIT 678 select SAMSUNG_GPIOLIB_4BIT
679 select HAVE_S3C2410_I2C 679 select HAVE_S3C2410_I2C if I2C
680 select HAVE_S3C2410_WATCHDOG 680 select HAVE_S3C2410_WATCHDOG if WATCHDOG
681 help 681 help
682 Samsung S3C64XX series based systems 682 Samsung S3C64XX series based systems
683 683
@@ -686,10 +686,10 @@ config ARCH_S5P64X0
686 select CPU_V6 686 select CPU_V6
687 select GENERIC_GPIO 687 select GENERIC_GPIO
688 select HAVE_CLK 688 select HAVE_CLK
689 select HAVE_S3C2410_WATCHDOG 689 select HAVE_S3C2410_WATCHDOG if WATCHDOG
690 select ARCH_USES_GETTIMEOFFSET 690 select ARCH_USES_GETTIMEOFFSET
691 select HAVE_S3C2410_I2C 691 select HAVE_S3C2410_I2C if I2C
692 select HAVE_S3C_RTC 692 select HAVE_S3C_RTC if RTC_CLASS
693 help 693 help
694 Samsung S5P64X0 CPU based systems, such as the Samsung SMDK6440, 694 Samsung S5P64X0 CPU based systems, such as the Samsung SMDK6440,
695 SMDK6450. 695 SMDK6450.
@@ -700,7 +700,7 @@ config ARCH_S5P6442
700 select GENERIC_GPIO 700 select GENERIC_GPIO
701 select HAVE_CLK 701 select HAVE_CLK
702 select ARCH_USES_GETTIMEOFFSET 702 select ARCH_USES_GETTIMEOFFSET
703 select HAVE_S3C2410_WATCHDOG 703 select HAVE_S3C2410_WATCHDOG if WATCHDOG
704 help 704 help
705 Samsung S5P6442 CPU based systems 705 Samsung S5P6442 CPU based systems
706 706
@@ -711,9 +711,9 @@ config ARCH_S5PC100
711 select CPU_V7 711 select CPU_V7
712 select ARM_L1_CACHE_SHIFT_6 712 select ARM_L1_CACHE_SHIFT_6
713 select ARCH_USES_GETTIMEOFFSET 713 select ARCH_USES_GETTIMEOFFSET
714 select HAVE_S3C2410_I2C 714 select HAVE_S3C2410_I2C if I2C
715 select HAVE_S3C_RTC 715 select HAVE_S3C_RTC if RTC_CLASS
716 select HAVE_S3C2410_WATCHDOG 716 select HAVE_S3C2410_WATCHDOG if WATCHDOG
717 help 717 help
718 Samsung S5PC100 series based systems 718 Samsung S5PC100 series based systems
719 719
@@ -726,9 +726,9 @@ config ARCH_S5PV210
726 select ARM_L1_CACHE_SHIFT_6 726 select ARM_L1_CACHE_SHIFT_6
727 select ARCH_HAS_CPUFREQ 727 select ARCH_HAS_CPUFREQ
728 select ARCH_USES_GETTIMEOFFSET 728 select ARCH_USES_GETTIMEOFFSET
729 select HAVE_S3C2410_I2C 729 select HAVE_S3C2410_I2C if I2C
730 select HAVE_S3C_RTC 730 select HAVE_S3C_RTC if RTC_CLASS
731 select HAVE_S3C2410_WATCHDOG 731 select HAVE_S3C2410_WATCHDOG if WATCHDOG
732 help 732 help
733 Samsung S5PV210/S5PC110 series based systems 733 Samsung S5PV210/S5PC110 series based systems
734 734
@@ -739,9 +739,9 @@ config ARCH_S5PV310
739 select GENERIC_GPIO 739 select GENERIC_GPIO
740 select HAVE_CLK 740 select HAVE_CLK
741 select GENERIC_CLOCKEVENTS 741 select GENERIC_CLOCKEVENTS
742 select HAVE_S3C_RTC 742 select HAVE_S3C_RTC if RTC_CLASS
743 select HAVE_S3C2410_I2C 743 select HAVE_S3C2410_I2C if I2C
744 select HAVE_S3C2410_WATCHDOG 744 select HAVE_S3C2410_WATCHDOG if WATCHDOG
745 help 745 help
746 Samsung S5PV310 series based systems 746 Samsung S5PV310 series based systems
747 747
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index ada6359160e..772f95f1aec 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -251,15 +251,16 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
251 writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); 251 writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
252 252
253 /* 253 /*
254 * Set priority on all interrupts. 254 * Set priority on all global interrupts.
255 */ 255 */
256 for (i = 0; i < max_irq; i += 4) 256 for (i = 32; i < max_irq; i += 4)
257 writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); 257 writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
258 258
259 /* 259 /*
260 * Disable all interrupts. 260 * Disable all interrupts. Leave the PPI and SGIs alone
261 * as these enables are banked registers.
261 */ 262 */
262 for (i = 0; i < max_irq; i += 32) 263 for (i = 32; i < max_irq; i += 32)
263 writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); 264 writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
264 265
265 /* 266 /*
@@ -277,11 +278,30 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
277 278
278void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base) 279void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
279{ 280{
281 void __iomem *dist_base;
282 int i;
283
280 if (gic_nr >= MAX_GIC_NR) 284 if (gic_nr >= MAX_GIC_NR)
281 BUG(); 285 BUG();
282 286
287 dist_base = gic_data[gic_nr].dist_base;
288 BUG_ON(!dist_base);
289
283 gic_data[gic_nr].cpu_base = base; 290 gic_data[gic_nr].cpu_base = base;
284 291
292 /*
293 * Deal with the banked PPI and SGI interrupts - disable all
294 * PPI interrupts, ensure all SGI interrupts are enabled.
295 */
296 writel(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
297 writel(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
298
299 /*
300 * Set priority on PPI and SGI interrupts
301 */
302 for (i = 0; i < 32; i += 4)
303 writel(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
304
285 writel(0xf0, base + GIC_CPU_PRIMASK); 305 writel(0xf0, base + GIC_CPU_PRIMASK);
286 writel(1, base + GIC_CPU_CTRL); 306 writel(1, base + GIC_CPU_CTRL);
287} 307}
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
index 6700c7fc7eb..21fa272301f 100644
--- a/arch/arm/include/asm/hardware/it8152.h
+++ b/arch/arm/include/asm/hardware/it8152.h
@@ -75,7 +75,7 @@ extern unsigned long it8152_base_address;
75 IT8152_PD_IRQ(1) USB (USBR) 75 IT8152_PD_IRQ(1) USB (USBR)
76 IT8152_PD_IRQ(0) Audio controller (ACR) 76 IT8152_PD_IRQ(0) Audio controller (ACR)
77 */ 77 */
78#define IT8152_IRQ(x) (IRQ_BOARD_END + (x)) 78#define IT8152_IRQ(x) (IRQ_BOARD_START + (x))
79 79
80/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */ 80/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
81#define IT8152_LD_IRQ_COUNT 9 81#define IT8152_LD_IRQ_COUNT 9
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 54593b0c241..21e3a4ab3b8 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -748,8 +748,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
748 breakpoint_handler(addr, regs); 748 breakpoint_handler(addr, regs);
749 break; 749 break;
750 case ARM_ENTRY_ASYNC_WATCHPOINT: 750 case ARM_ENTRY_ASYNC_WATCHPOINT:
751 WARN_ON("Asynchronous watchpoint exception taken. " 751 WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
752 "Debugging results may be unreliable");
753 case ARM_ENTRY_SYNC_WATCHPOINT: 752 case ARM_ENTRY_SYNC_WATCHPOINT:
754 watchpoint_handler(addr, regs); 753 watchpoint_handler(addr, regs);
755 break; 754 break;
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 49643b1467e..07a50357492 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -1749,7 +1749,7 @@ static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
1749static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc, 1749static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
1750 enum armv7_counters counter) 1750 enum armv7_counters counter)
1751{ 1751{
1752 int ret; 1752 int ret = 0;
1753 1753
1754 if (counter == ARMV7_CYCLE_COUNTER) 1754 if (counter == ARMV7_CYCLE_COUNTER)
1755 ret = pmnc & ARMV7_FLAG_C; 1755 ret = pmnc & ARMV7_FLAG_C;
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 20b7411e47f..c2e112e1a05 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -28,7 +28,7 @@ int notrace unwind_frame(struct stackframe *frame)
28 28
29 /* only go to a higher address on the stack */ 29 /* only go to a higher address on the stack */
30 low = frame->sp; 30 low = frame->sp;
31 high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE; 31 high = ALIGN(low, THREAD_SIZE);
32 32
33 /* check current frame pointer is within bounds */ 33 /* check current frame pointer is within bounds */
34 if (fp < (low + 12) || fp + 4 >= high) 34 if (fp < (low + 12) || fp + 4 >= high)
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index cda78d59aa3..446aee97436 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -53,10 +53,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
53void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) 53void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
54{ 54{
55#ifdef CONFIG_KALLSYMS 55#ifdef CONFIG_KALLSYMS
56 char sym1[KSYM_SYMBOL_LEN], sym2[KSYM_SYMBOL_LEN]; 56 printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
57 sprint_symbol(sym1, where);
58 sprint_symbol(sym2, from);
59 printk("[<%08lx>] (%s) from [<%08lx>] (%s)\n", where, sym1, from, sym2);
60#else 57#else
61 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); 58 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
62#endif 59#endif
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 2a161765f6d..d2cb0b3c987 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -279,7 +279,7 @@ int unwind_frame(struct stackframe *frame)
279 279
280 /* only go to a higher address on the stack */ 280 /* only go to a higher address on the stack */
281 low = frame->sp; 281 low = frame->sp;
282 high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE; 282 high = ALIGN(low, THREAD_SIZE);
283 283
284 pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__, 284 pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
285 frame->pc, frame->lr, frame->sp); 285 frame->pc, frame->lr, frame->sp);
diff --git a/arch/arm/mach-ep93xx/include/mach/dma.h b/arch/arm/mach-ep93xx/include/mach/dma.h
index 3a5961d3f3b..5e31b2b25da 100644
--- a/arch/arm/mach-ep93xx/include/mach/dma.h
+++ b/arch/arm/mach-ep93xx/include/mach/dma.h
@@ -1,5 +1,13 @@
1/* 1/**
2 * arch/arm/mach-ep93xx/include/mach/dma.h 2 * DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine
3 *
4 * The EP93xx DMA M2P subsystem handles DMA transfers between memory and
5 * peripherals. DMA M2P channels are available for audio, UARTs and IrDA.
6 * See chapter 10 of the EP93xx users guide for full details on the DMA M2P
7 * engine.
8 *
9 * See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code.
10 *
3 */ 11 */
4 12
5#ifndef __ASM_ARCH_DMA_H 13#ifndef __ASM_ARCH_DMA_H
@@ -8,12 +16,34 @@
8#include <linux/list.h> 16#include <linux/list.h>
9#include <linux/types.h> 17#include <linux/types.h>
10 18
19/**
20 * struct ep93xx_dma_buffer - Information about a buffer to be transferred
21 * using the DMA M2P engine
22 *
23 * @list: Entry in DMA buffer list
24 * @bus_addr: Physical address of the buffer
25 * @size: Size of the buffer in bytes
26 */
11struct ep93xx_dma_buffer { 27struct ep93xx_dma_buffer {
12 struct list_head list; 28 struct list_head list;
13 u32 bus_addr; 29 u32 bus_addr;
14 u16 size; 30 u16 size;
15}; 31};
16 32
33/**
34 * struct ep93xx_dma_m2p_client - Information about a DMA M2P client
35 *
36 * @name: Unique name for this client
37 * @flags: Client flags
38 * @cookie: User data to pass to callback functions
39 * @buffer_started: Non NULL function to call when a transfer is started.
40 * The arguments are the user data cookie and the DMA
41 * buffer which is starting.
42 * @buffer_finished: Non NULL function to call when a transfer is completed.
43 * The arguments are the user data cookie, the DMA buffer
44 * which has completed, and a boolean flag indicating if
45 * the transfer had an error.
46 */
17struct ep93xx_dma_m2p_client { 47struct ep93xx_dma_m2p_client {
18 char *name; 48 char *name;
19 u8 flags; 49 u8 flags;
@@ -24,10 +54,11 @@ struct ep93xx_dma_m2p_client {
24 struct ep93xx_dma_buffer *buf, 54 struct ep93xx_dma_buffer *buf,
25 int bytes, int error); 55 int bytes, int error);
26 56
27 /* Internal to the DMA code. */ 57 /* private: Internal use only */
28 void *channel; 58 void *channel;
29}; 59};
30 60
61/* DMA M2P ports */
31#define EP93XX_DMA_M2P_PORT_I2S1 0x00 62#define EP93XX_DMA_M2P_PORT_I2S1 0x00
32#define EP93XX_DMA_M2P_PORT_I2S2 0x01 63#define EP93XX_DMA_M2P_PORT_I2S2 0x01
33#define EP93XX_DMA_M2P_PORT_AAC1 0x02 64#define EP93XX_DMA_M2P_PORT_AAC1 0x02
@@ -39,18 +70,80 @@ struct ep93xx_dma_m2p_client {
39#define EP93XX_DMA_M2P_PORT_UART3 0x08 70#define EP93XX_DMA_M2P_PORT_UART3 0x08
40#define EP93XX_DMA_M2P_PORT_IRDA 0x09 71#define EP93XX_DMA_M2P_PORT_IRDA 0x09
41#define EP93XX_DMA_M2P_PORT_MASK 0x0f 72#define EP93XX_DMA_M2P_PORT_MASK 0x0f
42#define EP93XX_DMA_M2P_TX 0x00
43#define EP93XX_DMA_M2P_RX 0x10
44#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20
45#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40
46#define EP93XX_DMA_M2P_ERROR_MASK 0x60
47 73
48int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p); 74/* DMA M2P client flags */
75#define EP93XX_DMA_M2P_TX 0x00 /* Memory to peripheral */
76#define EP93XX_DMA_M2P_RX 0x10 /* Peripheral to memory */
77
78/*
79 * DMA M2P client error handling flags. See the EP93xx users guide
80 * documentation on the DMA M2P CONTROL register for more details
81 */
82#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20 /* Abort on peripheral error */
83#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40 /* Ignore peripheral errors */
84#define EP93XX_DMA_M2P_ERROR_MASK 0x60 /* Mask of error bits */
85
86/**
87 * ep93xx_dma_m2p_client_register - Register a client with the DMA M2P
88 * subsystem
89 *
90 * @m2p: Client information to register
91 * returns 0 on success
92 *
93 * The DMA M2P subsystem allocates a channel and an interrupt line for the DMA
94 * client
95 */
96int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p);
97
98/**
99 * ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P
100 * subsystem
101 *
102 * @m2p: Client to unregister
103 *
104 * Any transfers currently in progress will be completed in hardware, but
105 * ignored in software.
106 */
49void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p); 107void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p);
108
109/**
110 * ep93xx_dma_m2p_submit - Submit a DMA M2P transfer
111 *
112 * @m2p: DMA Client to submit the transfer on
113 * @buf: DMA Buffer to submit
114 *
115 * If the current or next transfer positions are free on the M2P client then
116 * the transfer is started immediately. If not, the transfer is added to the
117 * list of pending transfers. This function must not be called from the
118 * buffer_finished callback for an M2P channel.
119 *
120 */
50void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p, 121void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p,
51 struct ep93xx_dma_buffer *buf); 122 struct ep93xx_dma_buffer *buf);
123
124/**
125 * ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list
126 * for an M2P channel
127 *
128 * @m2p: DMA Client to submit the transfer on
129 * @buf: DMA Buffer to submit
130 *
131 * This function must only be called from the buffer_finished callback for an
132 * M2P channel. It is commonly used to add the next transfer in a chained list
133 * of DMA transfers.
134 */
52void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p, 135void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p,
53 struct ep93xx_dma_buffer *buf); 136 struct ep93xx_dma_buffer *buf);
137
138/**
139 * ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client
140 *
141 * @m2p: DMA client to flush transfers on
142 *
143 * Any transfers currently in progress will be completed in hardware, but
144 * ignored in software.
145 *
146 */
54void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p); 147void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p);
55 148
56#endif /* __ASM_ARCH_DMA_H */ 149#endif /* __ASM_ARCH_DMA_H */
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 51ff23b72d3..3688123b5ad 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -854,10 +854,9 @@ int __init kirkwood_find_tclk(void)
854 854
855 kirkwood_pcie_id(&dev, &rev); 855 kirkwood_pcie_id(&dev, &rev);
856 856
857 if ((dev == MV88F6281_DEV_ID && (rev == MV88F6281_REV_A0 || 857 if (dev == MV88F6281_DEV_ID || dev == MV88F6282_DEV_ID)
858 rev == MV88F6281_REV_A1)) || 858 if (((readl(SAMPLE_AT_RESET) >> 21) & 1) == 0)
859 (dev == MV88F6282_DEV_ID)) 859 return 200000000;
860 return 200000000;
861 860
862 return 166666667; 861 return 166666667;
863} 862}
diff --git a/arch/arm/mach-kirkwood/d2net_v2-setup.c b/arch/arm/mach-kirkwood/d2net_v2-setup.c
index 4aa86e4a152..a31c9499ab3 100644
--- a/arch/arm/mach-kirkwood/d2net_v2-setup.c
+++ b/arch/arm/mach-kirkwood/d2net_v2-setup.c
@@ -225,5 +225,5 @@ MACHINE_START(D2NET_V2, "LaCie d2 Network v2")
225 .init_machine = d2net_v2_init, 225 .init_machine = d2net_v2_init,
226 .map_io = kirkwood_map_io, 226 .map_io = kirkwood_map_io,
227 .init_irq = kirkwood_init_irq, 227 .init_irq = kirkwood_init_irq,
228 .timer = &lacie_v2_timer, 228 .timer = &kirkwood_timer,
229MACHINE_END 229MACHINE_END
diff --git a/arch/arm/mach-kirkwood/lacie_v2-common.c b/arch/arm/mach-kirkwood/lacie_v2-common.c
index d3ea1b6c8a0..285edab776e 100644
--- a/arch/arm/mach-kirkwood/lacie_v2-common.c
+++ b/arch/arm/mach-kirkwood/lacie_v2-common.c
@@ -111,17 +111,3 @@ void __init lacie_v2_hdd_power_init(int hdd_num)
111 pr_err("Failed to power up HDD%d\n", i + 1); 111 pr_err("Failed to power up HDD%d\n", i + 1);
112 } 112 }
113} 113}
114
115/*****************************************************************************
116 * Timer
117 ****************************************************************************/
118
119static void lacie_v2_timer_init(void)
120{
121 kirkwood_tclk = 166666667;
122 orion_time_init(IRQ_KIRKWOOD_BRIDGE, kirkwood_tclk);
123}
124
125struct sys_timer lacie_v2_timer = {
126 .init = lacie_v2_timer_init,
127};
diff --git a/arch/arm/mach-kirkwood/lacie_v2-common.h b/arch/arm/mach-kirkwood/lacie_v2-common.h
index af521315b87..fc64f578536 100644
--- a/arch/arm/mach-kirkwood/lacie_v2-common.h
+++ b/arch/arm/mach-kirkwood/lacie_v2-common.h
@@ -13,6 +13,4 @@ void lacie_v2_register_flash(void);
13void lacie_v2_register_i2c_devices(void); 13void lacie_v2_register_i2c_devices(void);
14void lacie_v2_hdd_power_init(int hdd_num); 14void lacie_v2_hdd_power_init(int hdd_num);
15 15
16extern struct sys_timer lacie_v2_timer;
17
18#endif 16#endif
diff --git a/arch/arm/mach-kirkwood/mpp.c b/arch/arm/mach-kirkwood/mpp.c
index 065187d177c..27901f702fe 100644
--- a/arch/arm/mach-kirkwood/mpp.c
+++ b/arch/arm/mach-kirkwood/mpp.c
@@ -59,7 +59,7 @@ void __init kirkwood_mpp_conf(unsigned int *mpp_list)
59 } 59 }
60 printk("\n"); 60 printk("\n");
61 61
62 while (*mpp_list) { 62 for ( ; *mpp_list; mpp_list++) {
63 unsigned int num = MPP_NUM(*mpp_list); 63 unsigned int num = MPP_NUM(*mpp_list);
64 unsigned int sel = MPP_SEL(*mpp_list); 64 unsigned int sel = MPP_SEL(*mpp_list);
65 int shift, gpio_mode; 65 int shift, gpio_mode;
@@ -88,8 +88,6 @@ void __init kirkwood_mpp_conf(unsigned int *mpp_list)
88 if (sel != 0) 88 if (sel != 0)
89 gpio_mode = 0; 89 gpio_mode = 0;
90 orion_gpio_set_valid(num, gpio_mode); 90 orion_gpio_set_valid(num, gpio_mode);
91
92 mpp_list++;
93 } 91 }
94 92
95 printk(KERN_DEBUG " final MPP regs:"); 93 printk(KERN_DEBUG " final MPP regs:");
diff --git a/arch/arm/mach-kirkwood/netspace_v2-setup.c b/arch/arm/mach-kirkwood/netspace_v2-setup.c
index 5ea66f1f417..65ee21fd2f3 100644
--- a/arch/arm/mach-kirkwood/netspace_v2-setup.c
+++ b/arch/arm/mach-kirkwood/netspace_v2-setup.c
@@ -262,7 +262,7 @@ MACHINE_START(NETSPACE_V2, "LaCie Network Space v2")
262 .init_machine = netspace_v2_init, 262 .init_machine = netspace_v2_init,
263 .map_io = kirkwood_map_io, 263 .map_io = kirkwood_map_io,
264 .init_irq = kirkwood_init_irq, 264 .init_irq = kirkwood_init_irq,
265 .timer = &lacie_v2_timer, 265 .timer = &kirkwood_timer,
266MACHINE_END 266MACHINE_END
267#endif 267#endif
268 268
@@ -272,7 +272,7 @@ MACHINE_START(INETSPACE_V2, "LaCie Internet Space v2")
272 .init_machine = netspace_v2_init, 272 .init_machine = netspace_v2_init,
273 .map_io = kirkwood_map_io, 273 .map_io = kirkwood_map_io,
274 .init_irq = kirkwood_init_irq, 274 .init_irq = kirkwood_init_irq,
275 .timer = &lacie_v2_timer, 275 .timer = &kirkwood_timer,
276MACHINE_END 276MACHINE_END
277#endif 277#endif
278 278
@@ -282,6 +282,6 @@ MACHINE_START(NETSPACE_MAX_V2, "LaCie Network Space Max v2")
282 .init_machine = netspace_v2_init, 282 .init_machine = netspace_v2_init,
283 .map_io = kirkwood_map_io, 283 .map_io = kirkwood_map_io,
284 .init_irq = kirkwood_init_irq, 284 .init_irq = kirkwood_init_irq,
285 .timer = &lacie_v2_timer, 285 .timer = &kirkwood_timer,
286MACHINE_END 286MACHINE_END
287#endif 287#endif
diff --git a/arch/arm/mach-kirkwood/netxbig_v2-setup.c b/arch/arm/mach-kirkwood/netxbig_v2-setup.c
index a1b45d501ae..93afd3c8bfd 100644
--- a/arch/arm/mach-kirkwood/netxbig_v2-setup.c
+++ b/arch/arm/mach-kirkwood/netxbig_v2-setup.c
@@ -403,7 +403,7 @@ MACHINE_START(NET2BIG_V2, "LaCie 2Big Network v2")
403 .init_machine = netxbig_v2_init, 403 .init_machine = netxbig_v2_init,
404 .map_io = kirkwood_map_io, 404 .map_io = kirkwood_map_io,
405 .init_irq = kirkwood_init_irq, 405 .init_irq = kirkwood_init_irq,
406 .timer = &lacie_v2_timer, 406 .timer = &kirkwood_timer,
407MACHINE_END 407MACHINE_END
408#endif 408#endif
409 409
@@ -413,6 +413,6 @@ MACHINE_START(NET5BIG_V2, "LaCie 5Big Network v2")
413 .init_machine = netxbig_v2_init, 413 .init_machine = netxbig_v2_init,
414 .map_io = kirkwood_map_io, 414 .map_io = kirkwood_map_io,
415 .init_irq = kirkwood_init_irq, 415 .init_irq = kirkwood_init_irq,
416 .timer = &lacie_v2_timer, 416 .timer = &kirkwood_timer,
417MACHINE_END 417MACHINE_END
418#endif 418#endif
diff --git a/arch/arm/mach-kirkwood/ts41x-setup.c b/arch/arm/mach-kirkwood/ts41x-setup.c
index 8be09a0ce4a..3587a281d99 100644
--- a/arch/arm/mach-kirkwood/ts41x-setup.c
+++ b/arch/arm/mach-kirkwood/ts41x-setup.c
@@ -27,6 +27,10 @@
27#include "mpp.h" 27#include "mpp.h"
28#include "tsx1x-common.h" 28#include "tsx1x-common.h"
29 29
30/* for the PCIe reset workaround */
31#include <plat/pcie.h>
32
33
30#define QNAP_TS41X_JUMPER_JP1 45 34#define QNAP_TS41X_JUMPER_JP1 45
31 35
32static struct i2c_board_info __initdata qnap_ts41x_i2c_rtc = { 36static struct i2c_board_info __initdata qnap_ts41x_i2c_rtc = {
@@ -140,8 +144,16 @@ static void __init qnap_ts41x_init(void)
140 144
141static int __init ts41x_pci_init(void) 145static int __init ts41x_pci_init(void)
142{ 146{
143 if (machine_is_ts41x()) 147 if (machine_is_ts41x()) {
148 /*
149 * Without this explicit reset, the PCIe SATA controller
150 * (Marvell 88sx7042/sata_mv) is known to stop working
151 * after a few minutes.
152 */
153 orion_pcie_reset((void __iomem *)PCIE_VIRT_BASE);
154
144 kirkwood_pcie_init(KW_PCIE0); 155 kirkwood_pcie_init(KW_PCIE0);
156 }
145 157
146 return 0; 158 return 0;
147} 159}
diff --git a/arch/arm/mach-mmp/include/mach/cputype.h b/arch/arm/mach-mmp/include/mach/cputype.h
index f43a68b213f..8a3b56dfd35 100644
--- a/arch/arm/mach-mmp/include/mach/cputype.h
+++ b/arch/arm/mach-mmp/include/mach/cputype.h
@@ -46,7 +46,8 @@ static inline int cpu_is_pxa910(void)
46#ifdef CONFIG_CPU_MMP2 46#ifdef CONFIG_CPU_MMP2
47static inline int cpu_is_mmp2(void) 47static inline int cpu_is_mmp2(void)
48{ 48{
49 return (((cpu_readid_id() >> 8) & 0xff) == 0x58); 49 return (((read_cpuid_id() >> 8) & 0xff) == 0x58);
50}
50#else 51#else
51#define cpu_is_mmp2() (0) 52#define cpu_is_mmp2() (0)
52#endif 53#endif
diff --git a/arch/arm/mach-mv78xx0/mpp.c b/arch/arm/mach-mv78xx0/mpp.c
index 354ac514eb8..84db2dfc475 100644
--- a/arch/arm/mach-mv78xx0/mpp.c
+++ b/arch/arm/mach-mv78xx0/mpp.c
@@ -54,7 +54,7 @@ void __init mv78xx0_mpp_conf(unsigned int *mpp_list)
54 } 54 }
55 printk("\n"); 55 printk("\n");
56 56
57 while (*mpp_list) { 57 for ( ; *mpp_list; mpp_list++) {
58 unsigned int num = MPP_NUM(*mpp_list); 58 unsigned int num = MPP_NUM(*mpp_list);
59 unsigned int sel = MPP_SEL(*mpp_list); 59 unsigned int sel = MPP_SEL(*mpp_list);
60 int shift, gpio_mode; 60 int shift, gpio_mode;
@@ -83,8 +83,6 @@ void __init mv78xx0_mpp_conf(unsigned int *mpp_list)
83 if (sel != 0) 83 if (sel != 0)
84 gpio_mode = 0; 84 gpio_mode = 0;
85 orion_gpio_set_valid(num, gpio_mode); 85 orion_gpio_set_valid(num, gpio_mode);
86
87 mpp_list++;
88 } 86 }
89 87
90 printk(KERN_DEBUG " final MPP regs:"); 88 printk(KERN_DEBUG " final MPP regs:");
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index ea0d80a89da..e7f9ee63dce 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -321,10 +321,9 @@ static struct platform_device omap_wdt_device = {
321static int __init omap_init_wdt(void) 321static int __init omap_init_wdt(void)
322{ 322{
323 if (!cpu_is_omap16xx()) 323 if (!cpu_is_omap16xx())
324 return; 324 return -ENODEV;
325 325
326 platform_device_register(&omap_wdt_device); 326 return platform_device_register(&omap_wdt_device);
327 return 0;
328} 327}
329subsys_initcall(omap_init_wdt); 328subsys_initcall(omap_init_wdt);
330#endif 329#endif
diff --git a/arch/arm/mach-omap1/include/mach/camera.h b/arch/arm/mach-omap1/include/mach/camera.h
index fd54b452eb2..847d00f0bb0 100644
--- a/arch/arm/mach-omap1/include/mach/camera.h
+++ b/arch/arm/mach-omap1/include/mach/camera.h
@@ -1,6 +1,8 @@
1#ifndef __ASM_ARCH_CAMERA_H_ 1#ifndef __ASM_ARCH_CAMERA_H_
2#define __ASM_ARCH_CAMERA_H_ 2#define __ASM_ARCH_CAMERA_H_
3 3
4#include <media/omap1_camera.h>
5
4void omap1_camera_init(void *); 6void omap1_camera_init(void *);
5 7
6static inline void omap1_set_camera_info(struct omap1_cam_platform_data *info) 8static inline void omap1_set_camera_info(struct omap1_cam_platform_data *info)
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 067f4379c87..53ac762518b 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -242,9 +242,6 @@ static int devkit8000_twl_gpio_setup(struct device *dev,
242 mmc[0].gpio_cd = gpio + 0; 242 mmc[0].gpio_cd = gpio + 0;
243 omap2_hsmmc_init(mmc); 243 omap2_hsmmc_init(mmc);
244 244
245 /* link regulators to MMC adapters */
246 devkit8000_vmmc1_supply.dev = mmc[0].dev;
247
248 /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */ 245 /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
249 gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; 246 gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
250 247
diff --git a/arch/arm/mach-orion5x/mpp.c b/arch/arm/mach-orion5x/mpp.c
index bc4c3b9aaf8..db485d3b814 100644
--- a/arch/arm/mach-orion5x/mpp.c
+++ b/arch/arm/mach-orion5x/mpp.c
@@ -127,7 +127,7 @@ void __init orion5x_mpp_conf(struct orion5x_mpp_mode *mode)
127 /* Initialize gpiolib. */ 127 /* Initialize gpiolib. */
128 orion_gpio_init(); 128 orion_gpio_init();
129 129
130 while (mode->mpp >= 0) { 130 for ( ; mode->mpp >= 0; mode++) {
131 u32 *reg; 131 u32 *reg;
132 int num_type; 132 int num_type;
133 int shift; 133 int shift;
@@ -160,8 +160,6 @@ void __init orion5x_mpp_conf(struct orion5x_mpp_mode *mode)
160 orion_gpio_set_unused(mode->mpp); 160 orion_gpio_set_unused(mode->mpp);
161 161
162 orion_gpio_set_valid(mode->mpp, !!(mode->type == MPP_GPIO)); 162 orion_gpio_set_valid(mode->mpp, !!(mode->type == MPP_GPIO));
163
164 mode++;
165 } 163 }
166 164
167 writel(mpp_0_7_ctrl, MPP_0_7_CTRL); 165 writel(mpp_0_7_ctrl, MPP_0_7_CTRL);
diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
index 16f1bd5324b..c1c1cd04bdd 100644
--- a/arch/arm/mach-orion5x/ts78xx-setup.c
+++ b/arch/arm/mach-orion5x/ts78xx-setup.c
@@ -239,7 +239,7 @@ static struct platform_nand_data ts78xx_ts_nand_data = {
239static struct resource ts78xx_ts_nand_resources = { 239static struct resource ts78xx_ts_nand_resources = {
240 .start = TS_NAND_DATA, 240 .start = TS_NAND_DATA,
241 .end = TS_NAND_DATA + 4, 241 .end = TS_NAND_DATA + 4,
242 .flags = IORESOURCE_IO, 242 .flags = IORESOURCE_MEM,
243}; 243};
244 244
245static struct platform_device ts78xx_ts_nand_device = { 245static struct platform_device ts78xx_ts_nand_device = {
diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c
index ac5598ce972..d34b99febeb 100644
--- a/arch/arm/mach-pxa/cm-x2xx.c
+++ b/arch/arm/mach-pxa/cm-x2xx.c
@@ -476,8 +476,6 @@ static void __init cmx2xx_init(void)
476 476
477static void __init cmx2xx_init_irq(void) 477static void __init cmx2xx_init_irq(void)
478{ 478{
479 pxa27x_init_irq();
480
481 if (cpu_is_pxa25x()) { 479 if (cpu_is_pxa25x()) {
482 pxa25x_init_irq(); 480 pxa25x_init_irq();
483 cmx2xx_pci_init_irq(CMX255_GPIO_IT8152_IRQ); 481 cmx2xx_pci_init_irq(CMX255_GPIO_IT8152_IRQ);
diff --git a/arch/arm/mach-pxa/saar.c b/arch/arm/mach-pxa/saar.c
index 4b521e045d7..ffa50e633ee 100644
--- a/arch/arm/mach-pxa/saar.c
+++ b/arch/arm/mach-pxa/saar.c
@@ -116,7 +116,7 @@ static struct platform_device smc91x_device = {
116 }, 116 },
117}; 117};
118 118
119#if defined(CONFIG_FB_PXA) || (CONFIG_FB_PXA_MODULE) 119#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
120static uint16_t lcd_power_on[] = { 120static uint16_t lcd_power_on[] = {
121 /* single frame */ 121 /* single frame */
122 SMART_CMD_NOOP, 122 SMART_CMD_NOOP,
diff --git a/arch/arm/mach-s3c64xx/Kconfig b/arch/arm/mach-s3c64xx/Kconfig
index 1ca7bdc6485..579d2f0f4dd 100644
--- a/arch/arm/mach-s3c64xx/Kconfig
+++ b/arch/arm/mach-s3c64xx/Kconfig
@@ -143,7 +143,7 @@ config MACH_SMDK6410
143 select S3C_DEV_USB_HSOTG 143 select S3C_DEV_USB_HSOTG
144 select S3C_DEV_WDT 144 select S3C_DEV_WDT
145 select SAMSUNG_DEV_KEYPAD 145 select SAMSUNG_DEV_KEYPAD
146 select HAVE_S3C2410_WATCHDOG 146 select HAVE_S3C2410_WATCHDOG if WATCHDOG
147 select S3C64XX_SETUP_SDHCI 147 select S3C64XX_SETUP_SDHCI
148 select S3C64XX_SETUP_I2C1 148 select S3C64XX_SETUP_I2C1
149 select S3C64XX_SETUP_IDE 149 select S3C64XX_SETUP_IDE
diff --git a/arch/arm/mach-s3c64xx/clock.c b/arch/arm/mach-s3c64xx/clock.c
index 7e03f0ae2fc..1c98d2ff2ed 100644
--- a/arch/arm/mach-s3c64xx/clock.c
+++ b/arch/arm/mach-s3c64xx/clock.c
@@ -695,7 +695,7 @@ static struct clksrc_clk clksrcs[] = {
695 }, { 695 }, {
696 .clk = { 696 .clk = {
697 .name = "audio-bus", 697 .name = "audio-bus",
698 .id = -1, /* There's only one IISv4 port */ 698 .id = 2,
699 .ctrlbit = S3C6410_CLKCON_SCLK_AUDIO2, 699 .ctrlbit = S3C6410_CLKCON_SCLK_AUDIO2,
700 .enable = s3c64xx_sclk_ctrl, 700 .enable = s3c64xx_sclk_ctrl,
701 }, 701 },
diff --git a/arch/arm/mach-s3c64xx/dev-audio.c b/arch/arm/mach-s3c64xx/dev-audio.c
index 76426a32c01..7618627b98f 100644
--- a/arch/arm/mach-s3c64xx/dev-audio.c
+++ b/arch/arm/mach-s3c64xx/dev-audio.c
@@ -22,7 +22,12 @@
22#include <plat/audio.h> 22#include <plat/audio.h>
23#include <plat/gpio-cfg.h> 23#include <plat/gpio-cfg.h>
24 24
25static int s3c64xx_i2sv3_cfg_gpio(struct platform_device *pdev) 25static const char *rclksrc[] = {
26 [0] = "iis",
27 [1] = "audio-bus",
28};
29
30static int s3c64xx_i2s_cfg_gpio(struct platform_device *pdev)
26{ 31{
27 unsigned int base; 32 unsigned int base;
28 33
@@ -33,6 +38,12 @@ static int s3c64xx_i2sv3_cfg_gpio(struct platform_device *pdev)
33 case 1: 38 case 1:
34 base = S3C64XX_GPE(0); 39 base = S3C64XX_GPE(0);
35 break; 40 break;
41 case 2:
42 s3c_gpio_cfgpin(S3C64XX_GPC(4), S3C_GPIO_SFN(5));
43 s3c_gpio_cfgpin(S3C64XX_GPC(5), S3C_GPIO_SFN(5));
44 s3c_gpio_cfgpin(S3C64XX_GPC(7), S3C_GPIO_SFN(5));
45 s3c_gpio_cfgpin_range(S3C64XX_GPH(6), 4, S3C_GPIO_SFN(5));
46 return 0;
36 default: 47 default:
37 printk(KERN_DEBUG "Invalid I2S Controller number: %d\n", 48 printk(KERN_DEBUG "Invalid I2S Controller number: %d\n",
38 pdev->id); 49 pdev->id);
@@ -44,16 +55,6 @@ static int s3c64xx_i2sv3_cfg_gpio(struct platform_device *pdev)
44 return 0; 55 return 0;
45} 56}
46 57
47static int s3c64xx_i2sv4_cfg_gpio(struct platform_device *pdev)
48{
49 s3c_gpio_cfgpin(S3C64XX_GPC(4), S3C_GPIO_SFN(5));
50 s3c_gpio_cfgpin(S3C64XX_GPC(5), S3C_GPIO_SFN(5));
51 s3c_gpio_cfgpin(S3C64XX_GPC(7), S3C_GPIO_SFN(5));
52 s3c_gpio_cfgpin_range(S3C64XX_GPH(6), 4, S3C_GPIO_SFN(5));
53
54 return 0;
55}
56
57static struct resource s3c64xx_iis0_resource[] = { 58static struct resource s3c64xx_iis0_resource[] = {
58 [0] = { 59 [0] = {
59 .start = S3C64XX_PA_IIS0, 60 .start = S3C64XX_PA_IIS0,
@@ -72,17 +73,22 @@ static struct resource s3c64xx_iis0_resource[] = {
72 }, 73 },
73}; 74};
74 75
75static struct s3c_audio_pdata s3c_i2s0_pdata = { 76static struct s3c_audio_pdata i2sv3_pdata = {
76 .cfg_gpio = s3c64xx_i2sv3_cfg_gpio, 77 .cfg_gpio = s3c64xx_i2s_cfg_gpio,
78 .type = {
79 .i2s = {
80 .src_clk = rclksrc,
81 },
82 },
77}; 83};
78 84
79struct platform_device s3c64xx_device_iis0 = { 85struct platform_device s3c64xx_device_iis0 = {
80 .name = "s3c64xx-iis", 86 .name = "samsung-i2s",
81 .id = 0, 87 .id = 0,
82 .num_resources = ARRAY_SIZE(s3c64xx_iis0_resource), 88 .num_resources = ARRAY_SIZE(s3c64xx_iis0_resource),
83 .resource = s3c64xx_iis0_resource, 89 .resource = s3c64xx_iis0_resource,
84 .dev = { 90 .dev = {
85 .platform_data = &s3c_i2s0_pdata, 91 .platform_data = &i2sv3_pdata,
86 }, 92 },
87}; 93};
88EXPORT_SYMBOL(s3c64xx_device_iis0); 94EXPORT_SYMBOL(s3c64xx_device_iis0);
@@ -105,17 +111,13 @@ static struct resource s3c64xx_iis1_resource[] = {
105 }, 111 },
106}; 112};
107 113
108static struct s3c_audio_pdata s3c_i2s1_pdata = {
109 .cfg_gpio = s3c64xx_i2sv3_cfg_gpio,
110};
111
112struct platform_device s3c64xx_device_iis1 = { 114struct platform_device s3c64xx_device_iis1 = {
113 .name = "s3c64xx-iis", 115 .name = "samsung-i2s",
114 .id = 1, 116 .id = 1,
115 .num_resources = ARRAY_SIZE(s3c64xx_iis1_resource), 117 .num_resources = ARRAY_SIZE(s3c64xx_iis1_resource),
116 .resource = s3c64xx_iis1_resource, 118 .resource = s3c64xx_iis1_resource,
117 .dev = { 119 .dev = {
118 .platform_data = &s3c_i2s1_pdata, 120 .platform_data = &i2sv3_pdata,
119 }, 121 },
120}; 122};
121EXPORT_SYMBOL(s3c64xx_device_iis1); 123EXPORT_SYMBOL(s3c64xx_device_iis1);
@@ -138,17 +140,23 @@ static struct resource s3c64xx_iisv4_resource[] = {
138 }, 140 },
139}; 141};
140 142
141static struct s3c_audio_pdata s3c_i2sv4_pdata = { 143static struct s3c_audio_pdata i2sv4_pdata = {
142 .cfg_gpio = s3c64xx_i2sv4_cfg_gpio, 144 .cfg_gpio = s3c64xx_i2s_cfg_gpio,
145 .type = {
146 .i2s = {
147 .quirks = QUIRK_PRI_6CHAN,
148 .src_clk = rclksrc,
149 },
150 },
143}; 151};
144 152
145struct platform_device s3c64xx_device_iisv4 = { 153struct platform_device s3c64xx_device_iisv4 = {
146 .name = "s3c64xx-iis-v4", 154 .name = "samsung-i2s",
147 .id = -1, 155 .id = 2,
148 .num_resources = ARRAY_SIZE(s3c64xx_iisv4_resource), 156 .num_resources = ARRAY_SIZE(s3c64xx_iisv4_resource),
149 .resource = s3c64xx_iisv4_resource, 157 .resource = s3c64xx_iisv4_resource,
150 .dev = { 158 .dev = {
151 .platform_data = &s3c_i2sv4_pdata, 159 .platform_data = &i2sv4_pdata,
152 }, 160 },
153}; 161};
154EXPORT_SYMBOL(s3c64xx_device_iisv4); 162EXPORT_SYMBOL(s3c64xx_device_iisv4);
diff --git a/arch/arm/mach-s5p6442/dev-audio.c b/arch/arm/mach-s5p6442/dev-audio.c
index 3462197ff35..8719dc41fe3 100644
--- a/arch/arm/mach-s5p6442/dev-audio.c
+++ b/arch/arm/mach-s5p6442/dev-audio.c
@@ -29,7 +29,7 @@ static int s5p6442_cfg_i2s(struct platform_device *pdev)
29 base = S5P6442_GPC1(0); 29 base = S5P6442_GPC1(0);
30 break; 30 break;
31 31
32 case -1: 32 case 0:
33 base = S5P6442_GPC0(0); 33 base = S5P6442_GPC0(0);
34 break; 34 break;
35 35
@@ -42,8 +42,19 @@ static int s5p6442_cfg_i2s(struct platform_device *pdev)
42 return 0; 42 return 0;
43} 43}
44 44
45static struct s3c_audio_pdata s3c_i2s_pdata = { 45static const char *rclksrc_v35[] = {
46 [0] = "busclk",
47 [1] = "i2sclk",
48};
49
50static struct s3c_audio_pdata i2sv35_pdata = {
46 .cfg_gpio = s5p6442_cfg_i2s, 51 .cfg_gpio = s5p6442_cfg_i2s,
52 .type = {
53 .i2s = {
54 .quirks = QUIRK_SEC_DAI | QUIRK_NEED_RSTCLR,
55 .src_clk = rclksrc_v35,
56 },
57 },
47}; 58};
48 59
49static struct resource s5p6442_iis0_resource[] = { 60static struct resource s5p6442_iis0_resource[] = {
@@ -62,15 +73,34 @@ static struct resource s5p6442_iis0_resource[] = {
62 .end = DMACH_I2S0_RX, 73 .end = DMACH_I2S0_RX,
63 .flags = IORESOURCE_DMA, 74 .flags = IORESOURCE_DMA,
64 }, 75 },
76 [3] = {
77 .start = DMACH_I2S0S_TX,
78 .end = DMACH_I2S0S_TX,
79 .flags = IORESOURCE_DMA,
80 },
65}; 81};
66 82
67struct platform_device s5p6442_device_iis0 = { 83struct platform_device s5p6442_device_iis0 = {
68 .name = "s3c64xx-iis-v4", 84 .name = "samsung-i2s",
69 .id = -1, 85 .id = 0,
70 .num_resources = ARRAY_SIZE(s5p6442_iis0_resource), 86 .num_resources = ARRAY_SIZE(s5p6442_iis0_resource),
71 .resource = s5p6442_iis0_resource, 87 .resource = s5p6442_iis0_resource,
72 .dev = { 88 .dev = {
73 .platform_data = &s3c_i2s_pdata, 89 .platform_data = &i2sv35_pdata,
90 },
91};
92
93static const char *rclksrc_v3[] = {
94 [0] = "iis",
95 [1] = "sclk_audio",
96};
97
98static struct s3c_audio_pdata i2sv3_pdata = {
99 .cfg_gpio = s5p6442_cfg_i2s,
100 .type = {
101 .i2s = {
102 .src_clk = rclksrc_v3,
103 },
74 }, 104 },
75}; 105};
76 106
@@ -93,12 +123,12 @@ static struct resource s5p6442_iis1_resource[] = {
93}; 123};
94 124
95struct platform_device s5p6442_device_iis1 = { 125struct platform_device s5p6442_device_iis1 = {
96 .name = "s3c64xx-iis", 126 .name = "samsung-i2s",
97 .id = 1, 127 .id = 1,
98 .num_resources = ARRAY_SIZE(s5p6442_iis1_resource), 128 .num_resources = ARRAY_SIZE(s5p6442_iis1_resource),
99 .resource = s5p6442_iis1_resource, 129 .resource = s5p6442_iis1_resource,
100 .dev = { 130 .dev = {
101 .platform_data = &s3c_i2s_pdata, 131 .platform_data = &i2sv3_pdata,
102 }, 132 },
103}; 133};
104 134
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6440.c b/arch/arm/mach-s5p64x0/clock-s5p6440.c
index e4883dc1c8d..409c5fc3670 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6440.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6440.c
@@ -261,7 +261,7 @@ static struct clk init_clocks_disable[] = {
261 .enable = s5p64x0_pclk_ctrl, 261 .enable = s5p64x0_pclk_ctrl,
262 .ctrlbit = (1 << 25), 262 .ctrlbit = (1 << 25),
263 }, { 263 }, {
264 .name = "i2s_v40", 264 .name = "iis",
265 .id = 0, 265 .id = 0,
266 .parent = &clk_pclk_low.clk, 266 .parent = &clk_pclk_low.clk,
267 .enable = s5p64x0_pclk_ctrl, 267 .enable = s5p64x0_pclk_ctrl,
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6450.c b/arch/arm/mach-s5p64x0/clock-s5p6450.c
index 7dbf3c968f5..7fc6abd3591 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6450.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6450.c
@@ -256,7 +256,7 @@ static struct clk init_clocks_disable[] = {
256 .ctrlbit = (1 << 22), 256 .ctrlbit = (1 << 22),
257 }, { 257 }, {
258 .name = "iis", 258 .name = "iis",
259 .id = -1, 259 .id = 0,
260 .parent = &clk_pclk_low.clk, 260 .parent = &clk_pclk_low.clk,
261 .enable = s5p64x0_pclk_ctrl, 261 .enable = s5p64x0_pclk_ctrl,
262 .ctrlbit = (1 << 26), 262 .ctrlbit = (1 << 26),
diff --git a/arch/arm/mach-s5p64x0/dev-audio.c b/arch/arm/mach-s5p64x0/dev-audio.c
index 396bacc0a39..14f89e73b8d 100644
--- a/arch/arm/mach-s5p64x0/dev-audio.c
+++ b/arch/arm/mach-s5p64x0/dev-audio.c
@@ -19,15 +19,19 @@
19#include <mach/dma.h> 19#include <mach/dma.h>
20#include <mach/irqs.h> 20#include <mach/irqs.h>
21 21
22static int s5p6440_cfg_i2s(struct platform_device *pdev) 22static const char *rclksrc[] = {
23 [0] = "iis",
24 [1] = "sclk_audio2",
25};
26
27static int s5p64x0_cfg_i2s(struct platform_device *pdev)
23{ 28{
24 /* configure GPIO for i2s port */ 29 /* configure GPIO for i2s port */
25 switch (pdev->id) { 30 switch (pdev->id) {
26 case -1: 31 case 0:
27 s3c_gpio_cfgpin_range(S5P6440_GPR(4), 5, S3C_GPIO_SFN(5)); 32 s3c_gpio_cfgpin_range(S5P6440_GPR(4), 5, S3C_GPIO_SFN(5));
28 s3c_gpio_cfgpin_range(S5P6440_GPR(13), 2, S3C_GPIO_SFN(5)); 33 s3c_gpio_cfgpin_range(S5P6440_GPR(13), 2, S3C_GPIO_SFN(5));
29 break; 34 break;
30
31 default: 35 default:
32 printk(KERN_ERR "Invalid Device %d\n", pdev->id); 36 printk(KERN_ERR "Invalid Device %d\n", pdev->id);
33 return -EINVAL; 37 return -EINVAL;
@@ -36,31 +40,14 @@ static int s5p6440_cfg_i2s(struct platform_device *pdev)
36 return 0; 40 return 0;
37} 41}
38 42
39static int s5p6450_cfg_i2s(struct platform_device *pdev) 43static struct s3c_audio_pdata s5p64x0_i2s_pdata = {
40{ 44 .cfg_gpio = s5p64x0_cfg_i2s,
41 /* configure GPIO for i2s port */ 45 .type = {
42 switch (pdev->id) { 46 .i2s = {
43 case -1: 47 .quirks = QUIRK_PRI_6CHAN,
44 s3c_gpio_cfgpin(S5P6450_GPB(4), S3C_GPIO_SFN(5)); 48 .src_clk = rclksrc,
45 s3c_gpio_cfgpin_range(S5P6450_GPR(4), 5, S3C_GPIO_SFN(5)); 49 },
46 s3c_gpio_cfgpin_range(S5P6450_GPR(13), 2, S3C_GPIO_SFN(5)); 50 },
47
48 break;
49
50 default:
51 printk(KERN_ERR "Invalid Device %d\n", pdev->id);
52 return -EINVAL;
53 }
54
55 return 0;
56}
57
58static struct s3c_audio_pdata s5p6440_i2s_pdata = {
59 .cfg_gpio = s5p6440_cfg_i2s,
60};
61
62static struct s3c_audio_pdata s5p6450_i2s_pdata = {
63 .cfg_gpio = s5p6450_cfg_i2s,
64}; 51};
65 52
66static struct resource s5p64x0_iis0_resource[] = { 53static struct resource s5p64x0_iis0_resource[] = {
@@ -82,22 +69,22 @@ static struct resource s5p64x0_iis0_resource[] = {
82}; 69};
83 70
84struct platform_device s5p6440_device_iis = { 71struct platform_device s5p6440_device_iis = {
85 .name = "s3c64xx-iis-v4", 72 .name = "samsung-i2s",
86 .id = -1, 73 .id = 0,
87 .num_resources = ARRAY_SIZE(s5p64x0_iis0_resource), 74 .num_resources = ARRAY_SIZE(s5p64x0_iis0_resource),
88 .resource = s5p64x0_iis0_resource, 75 .resource = s5p64x0_iis0_resource,
89 .dev = { 76 .dev = {
90 .platform_data = &s5p6440_i2s_pdata, 77 .platform_data = &s5p64x0_i2s_pdata,
91 }, 78 },
92}; 79};
93 80
94struct platform_device s5p6450_device_iis0 = { 81struct platform_device s5p6450_device_iis0 = {
95 .name = "s3c64xx-iis-v4", 82 .name = "samsung-i2s",
96 .id = -1, 83 .id = 0,
97 .num_resources = ARRAY_SIZE(s5p64x0_iis0_resource), 84 .num_resources = ARRAY_SIZE(s5p64x0_iis0_resource),
98 .resource = s5p64x0_iis0_resource, 85 .resource = s5p64x0_iis0_resource,
99 .dev = { 86 .dev = {
100 .platform_data = &s5p6450_i2s_pdata, 87 .platform_data = &s5p64x0_i2s_pdata,
101 }, 88 },
102}; 89};
103 90
diff --git a/arch/arm/mach-s5pc100/dev-audio.c b/arch/arm/mach-s5pc100/dev-audio.c
index 564e195ec49..10ab275ebd6 100644
--- a/arch/arm/mach-s5pc100/dev-audio.c
+++ b/arch/arm/mach-s5pc100/dev-audio.c
@@ -23,17 +23,14 @@ static int s5pc100_cfg_i2s(struct platform_device *pdev)
23{ 23{
24 /* configure GPIO for i2s port */ 24 /* configure GPIO for i2s port */
25 switch (pdev->id) { 25 switch (pdev->id) {
26 case 0: /* Dedicated pins */
27 break;
26 case 1: 28 case 1:
27 s3c_gpio_cfgpin_range(S5PC100_GPC(0), 5, S3C_GPIO_SFN(2)); 29 s3c_gpio_cfgpin_range(S5PC100_GPC(0), 5, S3C_GPIO_SFN(2));
28 break; 30 break;
29
30 case 2: 31 case 2:
31 s3c_gpio_cfgpin_range(S5PC100_GPG3(0), 5, S3C_GPIO_SFN(4)); 32 s3c_gpio_cfgpin_range(S5PC100_GPG3(0), 5, S3C_GPIO_SFN(4));
32 break; 33 break;
33
34 case -1: /* Dedicated pins */
35 break;
36
37 default: 34 default:
38 printk(KERN_ERR "Invalid Device %d\n", pdev->id); 35 printk(KERN_ERR "Invalid Device %d\n", pdev->id);
39 return -EINVAL; 36 return -EINVAL;
@@ -42,8 +39,20 @@ static int s5pc100_cfg_i2s(struct platform_device *pdev)
42 return 0; 39 return 0;
43} 40}
44 41
45static struct s3c_audio_pdata s3c_i2s_pdata = { 42static const char *rclksrc_v5[] = {
43 [0] = "iis",
44 [1] = "i2sclkd2",
45};
46
47static struct s3c_audio_pdata i2sv5_pdata = {
46 .cfg_gpio = s5pc100_cfg_i2s, 48 .cfg_gpio = s5pc100_cfg_i2s,
49 .type = {
50 .i2s = {
51 .quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI
52 | QUIRK_NEED_RSTCLR,
53 .src_clk = rclksrc_v5,
54 },
55 },
47}; 56};
48 57
49static struct resource s5pc100_iis0_resource[] = { 58static struct resource s5pc100_iis0_resource[] = {
@@ -62,15 +71,34 @@ static struct resource s5pc100_iis0_resource[] = {
62 .end = DMACH_I2S0_RX, 71 .end = DMACH_I2S0_RX,
63 .flags = IORESOURCE_DMA, 72 .flags = IORESOURCE_DMA,
64 }, 73 },
74 [3] = {
75 .start = DMACH_I2S0S_TX,
76 .end = DMACH_I2S0S_TX,
77 .flags = IORESOURCE_DMA,
78 },
65}; 79};
66 80
67struct platform_device s5pc100_device_iis0 = { 81struct platform_device s5pc100_device_iis0 = {
68 .name = "s3c64xx-iis-v4", 82 .name = "samsung-i2s",
69 .id = -1, 83 .id = 0,
70 .num_resources = ARRAY_SIZE(s5pc100_iis0_resource), 84 .num_resources = ARRAY_SIZE(s5pc100_iis0_resource),
71 .resource = s5pc100_iis0_resource, 85 .resource = s5pc100_iis0_resource,
72 .dev = { 86 .dev = {
73 .platform_data = &s3c_i2s_pdata, 87 .platform_data = &i2sv5_pdata,
88 },
89};
90
91static const char *rclksrc_v3[] = {
92 [0] = "iis",
93 [1] = "sclk_audio",
94};
95
96static struct s3c_audio_pdata i2sv3_pdata = {
97 .cfg_gpio = s5pc100_cfg_i2s,
98 .type = {
99 .i2s = {
100 .src_clk = rclksrc_v3,
101 },
74 }, 102 },
75}; 103};
76 104
@@ -93,12 +121,12 @@ static struct resource s5pc100_iis1_resource[] = {
93}; 121};
94 122
95struct platform_device s5pc100_device_iis1 = { 123struct platform_device s5pc100_device_iis1 = {
96 .name = "s3c64xx-iis", 124 .name = "samsung-i2s",
97 .id = 1, 125 .id = 1,
98 .num_resources = ARRAY_SIZE(s5pc100_iis1_resource), 126 .num_resources = ARRAY_SIZE(s5pc100_iis1_resource),
99 .resource = s5pc100_iis1_resource, 127 .resource = s5pc100_iis1_resource,
100 .dev = { 128 .dev = {
101 .platform_data = &s3c_i2s_pdata, 129 .platform_data = &i2sv3_pdata,
102 }, 130 },
103}; 131};
104 132
@@ -121,12 +149,12 @@ static struct resource s5pc100_iis2_resource[] = {
121}; 149};
122 150
123struct platform_device s5pc100_device_iis2 = { 151struct platform_device s5pc100_device_iis2 = {
124 .name = "s3c64xx-iis", 152 .name = "samsung-i2s",
125 .id = 2, 153 .id = 2,
126 .num_resources = ARRAY_SIZE(s5pc100_iis2_resource), 154 .num_resources = ARRAY_SIZE(s5pc100_iis2_resource),
127 .resource = s5pc100_iis2_resource, 155 .resource = s5pc100_iis2_resource,
128 .dev = { 156 .dev = {
129 .platform_data = &s3c_i2s_pdata, 157 .platform_data = &i2sv3_pdata,
130 }, 158 },
131}; 159};
132 160
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index 019c3a69b0e..b774ff1805d 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -467,20 +467,20 @@ static struct clk init_clocks_disable[] = {
467 .enable = s5pv210_clk_ip3_ctrl, 467 .enable = s5pv210_clk_ip3_ctrl,
468 .ctrlbit = (1<<21), 468 .ctrlbit = (1<<21),
469 }, { 469 }, {
470 .name = "i2s_v50", 470 .name = "iis",
471 .id = 0, 471 .id = 0,
472 .parent = &clk_p, 472 .parent = &clk_p,
473 .enable = s5pv210_clk_ip3_ctrl, 473 .enable = s5pv210_clk_ip3_ctrl,
474 .ctrlbit = (1<<4), 474 .ctrlbit = (1<<4),
475 }, { 475 }, {
476 .name = "i2s_v32", 476 .name = "iis",
477 .id = 0, 477 .id = 1,
478 .parent = &clk_p, 478 .parent = &clk_p,
479 .enable = s5pv210_clk_ip3_ctrl, 479 .enable = s5pv210_clk_ip3_ctrl,
480 .ctrlbit = (1 << 5), 480 .ctrlbit = (1 << 5),
481 }, { 481 }, {
482 .name = "i2s_v32", 482 .name = "iis",
483 .id = 1, 483 .id = 2,
484 .parent = &clk_p, 484 .parent = &clk_p,
485 .enable = s5pv210_clk_ip3_ctrl, 485 .enable = s5pv210_clk_ip3_ctrl,
486 .ctrlbit = (1 << 6), 486 .ctrlbit = (1 << 6),
diff --git a/arch/arm/mach-s5pv210/dev-audio.c b/arch/arm/mach-s5pv210/dev-audio.c
index 1303fcb12b5..ddd2704b346 100644
--- a/arch/arm/mach-s5pv210/dev-audio.c
+++ b/arch/arm/mach-s5pv210/dev-audio.c
@@ -19,22 +19,24 @@
19#include <mach/dma.h> 19#include <mach/dma.h>
20#include <mach/irqs.h> 20#include <mach/irqs.h>
21 21
22static const char *rclksrc[] = {
23 [0] = "busclk",
24 [1] = "i2sclk",
25};
26
22static int s5pv210_cfg_i2s(struct platform_device *pdev) 27static int s5pv210_cfg_i2s(struct platform_device *pdev)
23{ 28{
24 /* configure GPIO for i2s port */ 29 /* configure GPIO for i2s port */
25 switch (pdev->id) { 30 switch (pdev->id) {
31 case 0:
32 s3c_gpio_cfgpin_range(S5PV210_GPI(0), 7, S3C_GPIO_SFN(2));
33 break;
26 case 1: 34 case 1:
27 s3c_gpio_cfgpin_range(S5PV210_GPC0(0), 5, S3C_GPIO_SFN(2)); 35 s3c_gpio_cfgpin_range(S5PV210_GPC0(0), 5, S3C_GPIO_SFN(2));
28 break; 36 break;
29
30 case 2: 37 case 2:
31 s3c_gpio_cfgpin_range(S5PV210_GPC1(0), 5, S3C_GPIO_SFN(4)); 38 s3c_gpio_cfgpin_range(S5PV210_GPC1(0), 5, S3C_GPIO_SFN(4));
32 break; 39 break;
33
34 case -1:
35 s3c_gpio_cfgpin_range(S5PV210_GPI(0), 7, S3C_GPIO_SFN(2));
36 break;
37
38 default: 40 default:
39 printk(KERN_ERR "Invalid Device %d\n", pdev->id); 41 printk(KERN_ERR "Invalid Device %d\n", pdev->id);
40 return -EINVAL; 42 return -EINVAL;
@@ -43,8 +45,15 @@ static int s5pv210_cfg_i2s(struct platform_device *pdev)
43 return 0; 45 return 0;
44} 46}
45 47
46static struct s3c_audio_pdata s3c_i2s_pdata = { 48static struct s3c_audio_pdata i2sv5_pdata = {
47 .cfg_gpio = s5pv210_cfg_i2s, 49 .cfg_gpio = s5pv210_cfg_i2s,
50 .type = {
51 .i2s = {
52 .quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI
53 | QUIRK_NEED_RSTCLR,
54 .src_clk = rclksrc,
55 },
56 },
48}; 57};
49 58
50static struct resource s5pv210_iis0_resource[] = { 59static struct resource s5pv210_iis0_resource[] = {
@@ -63,15 +72,34 @@ static struct resource s5pv210_iis0_resource[] = {
63 .end = DMACH_I2S0_RX, 72 .end = DMACH_I2S0_RX,
64 .flags = IORESOURCE_DMA, 73 .flags = IORESOURCE_DMA,
65 }, 74 },
75 [3] = {
76 .start = DMACH_I2S0S_TX,
77 .end = DMACH_I2S0S_TX,
78 .flags = IORESOURCE_DMA,
79 },
66}; 80};
67 81
68struct platform_device s5pv210_device_iis0 = { 82struct platform_device s5pv210_device_iis0 = {
69 .name = "s3c64xx-iis-v4", 83 .name = "samsung-i2s",
70 .id = -1, 84 .id = 0,
71 .num_resources = ARRAY_SIZE(s5pv210_iis0_resource), 85 .num_resources = ARRAY_SIZE(s5pv210_iis0_resource),
72 .resource = s5pv210_iis0_resource, 86 .resource = s5pv210_iis0_resource,
73 .dev = { 87 .dev = {
74 .platform_data = &s3c_i2s_pdata, 88 .platform_data = &i2sv5_pdata,
89 },
90};
91
92static const char *rclksrc_v3[] = {
93 [0] = "iis",
94 [1] = "audio-bus",
95};
96
97static struct s3c_audio_pdata i2sv3_pdata = {
98 .cfg_gpio = s5pv210_cfg_i2s,
99 .type = {
100 .i2s = {
101 .src_clk = rclksrc_v3,
102 },
75 }, 103 },
76}; 104};
77 105
@@ -94,12 +122,12 @@ static struct resource s5pv210_iis1_resource[] = {
94}; 122};
95 123
96struct platform_device s5pv210_device_iis1 = { 124struct platform_device s5pv210_device_iis1 = {
97 .name = "s3c64xx-iis", 125 .name = "samsung-i2s",
98 .id = 1, 126 .id = 1,
99 .num_resources = ARRAY_SIZE(s5pv210_iis1_resource), 127 .num_resources = ARRAY_SIZE(s5pv210_iis1_resource),
100 .resource = s5pv210_iis1_resource, 128 .resource = s5pv210_iis1_resource,
101 .dev = { 129 .dev = {
102 .platform_data = &s3c_i2s_pdata, 130 .platform_data = &i2sv3_pdata,
103 }, 131 },
104}; 132};
105 133
@@ -122,12 +150,12 @@ static struct resource s5pv210_iis2_resource[] = {
122}; 150};
123 151
124struct platform_device s5pv210_device_iis2 = { 152struct platform_device s5pv210_device_iis2 = {
125 .name = "s3c64xx-iis", 153 .name = "samsung-i2s",
126 .id = 2, 154 .id = 2,
127 .num_resources = ARRAY_SIZE(s5pv210_iis2_resource), 155 .num_resources = ARRAY_SIZE(s5pv210_iis2_resource),
128 .resource = s5pv210_iis2_resource, 156 .resource = s5pv210_iis2_resource,
129 .dev = { 157 .dev = {
130 .platform_data = &s3c_i2s_pdata, 158 .platform_data = &i2sv3_pdata,
131 }, 159 },
132}; 160};
133 161
diff --git a/arch/arm/mach-s5pv310/Kconfig b/arch/arm/mach-s5pv310/Kconfig
index 1150b360f38..d64efe0d4c9 100644
--- a/arch/arm/mach-s5pv310/Kconfig
+++ b/arch/arm/mach-s5pv310/Kconfig
@@ -11,6 +11,7 @@ if ARCH_S5PV310
11 11
12config CPU_S5PV310 12config CPU_S5PV310
13 bool 13 bool
14 select S3C_PL330_DMA
14 help 15 help
15 Enable S5PV310 CPU support 16 Enable S5PV310 CPU support
16 17
diff --git a/arch/arm/mach-s5pv310/Makefile b/arch/arm/mach-s5pv310/Makefile
index 84afc64e7c0..61e3cb65426 100644
--- a/arch/arm/mach-s5pv310/Makefile
+++ b/arch/arm/mach-s5pv310/Makefile
@@ -13,7 +13,7 @@ obj- :=
13# Core support for S5PV310 system 13# Core support for S5PV310 system
14 14
15obj-$(CONFIG_CPU_S5PV310) += cpu.o init.o clock.o irq-combiner.o 15obj-$(CONFIG_CPU_S5PV310) += cpu.o init.o clock.o irq-combiner.o
16obj-$(CONFIG_CPU_S5PV310) += setup-i2c0.o time.o gpiolib.o irq-eint.o 16obj-$(CONFIG_CPU_S5PV310) += setup-i2c0.o time.o gpiolib.o irq-eint.o dma.o
17 17
18obj-$(CONFIG_SMP) += platsmp.o headsmp.o 18obj-$(CONFIG_SMP) += platsmp.o headsmp.o
19obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o 19obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
@@ -27,6 +27,7 @@ obj-$(CONFIG_MACH_UNIVERSAL_C210) += mach-universal_c210.o
27 27
28# device support 28# device support
29 29
30obj-y += dev-audio.o
30obj-$(CONFIG_S5PV310_SETUP_I2C1) += setup-i2c1.o 31obj-$(CONFIG_S5PV310_SETUP_I2C1) += setup-i2c1.o
31obj-$(CONFIG_S5PV310_SETUP_I2C2) += setup-i2c2.o 32obj-$(CONFIG_S5PV310_SETUP_I2C2) += setup-i2c2.o
32obj-$(CONFIG_S5PV310_SETUP_I2C3) += setup-i2c3.o 33obj-$(CONFIG_S5PV310_SETUP_I2C3) += setup-i2c3.o
diff --git a/arch/arm/mach-s5pv310/dev-audio.c b/arch/arm/mach-s5pv310/dev-audio.c
new file mode 100644
index 00000000000..a1964242f0f
--- /dev/null
+++ b/arch/arm/mach-s5pv310/dev-audio.c
@@ -0,0 +1,364 @@
1/* linux/arch/arm/mach-s5pv310/dev-audio.c
2 *
3 * Copyright (c) 2010 Samsung Electronics Co. Ltd
4 * Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/platform_device.h>
12#include <linux/dma-mapping.h>
13#include <linux/gpio.h>
14
15#include <plat/gpio-cfg.h>
16#include <plat/audio.h>
17
18#include <mach/map.h>
19#include <mach/dma.h>
20#include <mach/irqs.h>
21
22static const char *rclksrc[] = {
23 [0] = "busclk",
24 [1] = "i2sclk",
25};
26
27static int s5pv310_cfg_i2s(struct platform_device *pdev)
28{
29 /* configure GPIO for i2s port */
30 switch (pdev->id) {
31 case 0:
32 s3c_gpio_cfgpin_range(S5PV310_GPZ(0), 7, S3C_GPIO_SFN(2));
33 break;
34 case 1:
35 s3c_gpio_cfgpin_range(S5PV310_GPC0(0), 5, S3C_GPIO_SFN(2));
36 break;
37 case 2:
38 s3c_gpio_cfgpin_range(S5PV310_GPC1(0), 5, S3C_GPIO_SFN(4));
39 break;
40 default:
41 printk(KERN_ERR "Invalid Device %d\n", pdev->id);
42 return -EINVAL;
43 }
44
45 return 0;
46}
47
48static struct s3c_audio_pdata i2sv5_pdata = {
49 .cfg_gpio = s5pv310_cfg_i2s,
50 .type = {
51 .i2s = {
52 .quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI
53 | QUIRK_NEED_RSTCLR,
54 .src_clk = rclksrc,
55 },
56 },
57};
58
59static struct resource s5pv310_i2s0_resource[] = {
60 [0] = {
61 .start = S5PV310_PA_I2S0,
62 .end = S5PV310_PA_I2S0 + 0x100 - 1,
63 .flags = IORESOURCE_MEM,
64 },
65 [1] = {
66 .start = DMACH_I2S0_TX,
67 .end = DMACH_I2S0_TX,
68 .flags = IORESOURCE_DMA,
69 },
70 [2] = {
71 .start = DMACH_I2S0_RX,
72 .end = DMACH_I2S0_RX,
73 .flags = IORESOURCE_DMA,
74 },
75 [3] = {
76 .start = DMACH_I2S0S_TX,
77 .end = DMACH_I2S0S_TX,
78 .flags = IORESOURCE_DMA,
79 },
80};
81
82struct platform_device s5pv310_device_i2s0 = {
83 .name = "samsung-i2s",
84 .id = 0,
85 .num_resources = ARRAY_SIZE(s5pv310_i2s0_resource),
86 .resource = s5pv310_i2s0_resource,
87 .dev = {
88 .platform_data = &i2sv5_pdata,
89 },
90};
91
92static const char *rclksrc_v3[] = {
93 [0] = "sclk_i2s",
94 [1] = "no_such_clock",
95};
96
97static struct s3c_audio_pdata i2sv3_pdata = {
98 .cfg_gpio = s5pv310_cfg_i2s,
99 .type = {
100 .i2s = {
101 .quirks = QUIRK_NO_MUXPSR,
102 .src_clk = rclksrc_v3,
103 },
104 },
105};
106
107static struct resource s5pv310_i2s1_resource[] = {
108 [0] = {
109 .start = S5PV310_PA_I2S1,
110 .end = S5PV310_PA_I2S1 + 0x100 - 1,
111 .flags = IORESOURCE_MEM,
112 },
113 [1] = {
114 .start = DMACH_I2S1_TX,
115 .end = DMACH_I2S1_TX,
116 .flags = IORESOURCE_DMA,
117 },
118 [2] = {
119 .start = DMACH_I2S1_RX,
120 .end = DMACH_I2S1_RX,
121 .flags = IORESOURCE_DMA,
122 },
123};
124
125struct platform_device s5pv310_device_i2s1 = {
126 .name = "samsung-i2s",
127 .id = 1,
128 .num_resources = ARRAY_SIZE(s5pv310_i2s1_resource),
129 .resource = s5pv310_i2s1_resource,
130 .dev = {
131 .platform_data = &i2sv3_pdata,
132 },
133};
134
135static struct resource s5pv310_i2s2_resource[] = {
136 [0] = {
137 .start = S5PV310_PA_I2S2,
138 .end = S5PV310_PA_I2S2 + 0x100 - 1,
139 .flags = IORESOURCE_MEM,
140 },
141 [1] = {
142 .start = DMACH_I2S2_TX,
143 .end = DMACH_I2S2_TX,
144 .flags = IORESOURCE_DMA,
145 },
146 [2] = {
147 .start = DMACH_I2S2_RX,
148 .end = DMACH_I2S2_RX,
149 .flags = IORESOURCE_DMA,
150 },
151};
152
153struct platform_device s5pv310_device_i2s2 = {
154 .name = "samsung-i2s",
155 .id = 2,
156 .num_resources = ARRAY_SIZE(s5pv310_i2s2_resource),
157 .resource = s5pv310_i2s2_resource,
158 .dev = {
159 .platform_data = &i2sv3_pdata,
160 },
161};
162
163/* PCM Controller platform_devices */
164
165static int s5pv310_pcm_cfg_gpio(struct platform_device *pdev)
166{
167 switch (pdev->id) {
168 case 0:
169 s3c_gpio_cfgpin_range(S5PV310_GPZ(0), 5, S3C_GPIO_SFN(3));
170 break;
171 case 1:
172 s3c_gpio_cfgpin_range(S5PV310_GPC0(0), 5, S3C_GPIO_SFN(3));
173 break;
174 case 2:
175 s3c_gpio_cfgpin_range(S5PV310_GPC1(0), 5, S3C_GPIO_SFN(3));
176 break;
177 default:
178 printk(KERN_DEBUG "Invalid PCM Controller number!");
179 return -EINVAL;
180 }
181
182 return 0;
183}
184
185static struct s3c_audio_pdata s3c_pcm_pdata = {
186 .cfg_gpio = s5pv310_pcm_cfg_gpio,
187};
188
189static struct resource s5pv310_pcm0_resource[] = {
190 [0] = {
191 .start = S5PV310_PA_PCM0,
192 .end = S5PV310_PA_PCM0 + 0x100 - 1,
193 .flags = IORESOURCE_MEM,
194 },
195 [1] = {
196 .start = DMACH_PCM0_TX,
197 .end = DMACH_PCM0_TX,
198 .flags = IORESOURCE_DMA,
199 },
200 [2] = {
201 .start = DMACH_PCM0_RX,
202 .end = DMACH_PCM0_RX,
203 .flags = IORESOURCE_DMA,
204 },
205};
206
207struct platform_device s5pv310_device_pcm0 = {
208 .name = "samsung-pcm",
209 .id = 0,
210 .num_resources = ARRAY_SIZE(s5pv310_pcm0_resource),
211 .resource = s5pv310_pcm0_resource,
212 .dev = {
213 .platform_data = &s3c_pcm_pdata,
214 },
215};
216
217static struct resource s5pv310_pcm1_resource[] = {
218 [0] = {
219 .start = S5PV310_PA_PCM1,
220 .end = S5PV310_PA_PCM1 + 0x100 - 1,
221 .flags = IORESOURCE_MEM,
222 },
223 [1] = {
224 .start = DMACH_PCM1_TX,
225 .end = DMACH_PCM1_TX,
226 .flags = IORESOURCE_DMA,
227 },
228 [2] = {
229 .start = DMACH_PCM1_RX,
230 .end = DMACH_PCM1_RX,
231 .flags = IORESOURCE_DMA,
232 },
233};
234
235struct platform_device s5pv310_device_pcm1 = {
236 .name = "samsung-pcm",
237 .id = 1,
238 .num_resources = ARRAY_SIZE(s5pv310_pcm1_resource),
239 .resource = s5pv310_pcm1_resource,
240 .dev = {
241 .platform_data = &s3c_pcm_pdata,
242 },
243};
244
245static struct resource s5pv310_pcm2_resource[] = {
246 [0] = {
247 .start = S5PV310_PA_PCM2,
248 .end = S5PV310_PA_PCM2 + 0x100 - 1,
249 .flags = IORESOURCE_MEM,
250 },
251 [1] = {
252 .start = DMACH_PCM2_TX,
253 .end = DMACH_PCM2_TX,
254 .flags = IORESOURCE_DMA,
255 },
256 [2] = {
257 .start = DMACH_PCM2_RX,
258 .end = DMACH_PCM2_RX,
259 .flags = IORESOURCE_DMA,
260 },
261};
262
263struct platform_device s5pv310_device_pcm2 = {
264 .name = "samsung-pcm",
265 .id = 2,
266 .num_resources = ARRAY_SIZE(s5pv310_pcm2_resource),
267 .resource = s5pv310_pcm2_resource,
268 .dev = {
269 .platform_data = &s3c_pcm_pdata,
270 },
271};
272
273/* AC97 Controller platform devices */
274
275static int s5pv310_ac97_cfg_gpio(struct platform_device *pdev)
276{
277 return s3c_gpio_cfgpin_range(S5PV310_GPC0(0), 5, S3C_GPIO_SFN(4));
278}
279
280static struct resource s5pv310_ac97_resource[] = {
281 [0] = {
282 .start = S5PV310_PA_AC97,
283 .end = S5PV310_PA_AC97 + 0x100 - 1,
284 .flags = IORESOURCE_MEM,
285 },
286 [1] = {
287 .start = DMACH_AC97_PCMOUT,
288 .end = DMACH_AC97_PCMOUT,
289 .flags = IORESOURCE_DMA,
290 },
291 [2] = {
292 .start = DMACH_AC97_PCMIN,
293 .end = DMACH_AC97_PCMIN,
294 .flags = IORESOURCE_DMA,
295 },
296 [3] = {
297 .start = DMACH_AC97_MICIN,
298 .end = DMACH_AC97_MICIN,
299 .flags = IORESOURCE_DMA,
300 },
301 [4] = {
302 .start = IRQ_AC97,
303 .end = IRQ_AC97,
304 .flags = IORESOURCE_IRQ,
305 },
306};
307
308static struct s3c_audio_pdata s3c_ac97_pdata = {
309 .cfg_gpio = s5pv310_ac97_cfg_gpio,
310};
311
312static u64 s5pv310_ac97_dmamask = DMA_BIT_MASK(32);
313
314struct platform_device s5pv310_device_ac97 = {
315 .name = "samsung-ac97",
316 .id = -1,
317 .num_resources = ARRAY_SIZE(s5pv310_ac97_resource),
318 .resource = s5pv310_ac97_resource,
319 .dev = {
320 .platform_data = &s3c_ac97_pdata,
321 .dma_mask = &s5pv310_ac97_dmamask,
322 .coherent_dma_mask = DMA_BIT_MASK(32),
323 },
324};
325
326/* S/PDIF Controller platform_device */
327
328static int s5pv310_spdif_cfg_gpio(struct platform_device *pdev)
329{
330 s3c_gpio_cfgpin_range(S5PV310_GPC1(0), 2, S3C_GPIO_SFN(3));
331
332 return 0;
333}
334
335static struct resource s5pv310_spdif_resource[] = {
336 [0] = {
337 .start = S5PV310_PA_SPDIF,
338 .end = S5PV310_PA_SPDIF + 0x100 - 1,
339 .flags = IORESOURCE_MEM,
340 },
341 [1] = {
342 .start = DMACH_SPDIF,
343 .end = DMACH_SPDIF,
344 .flags = IORESOURCE_DMA,
345 },
346};
347
348static struct s3c_audio_pdata samsung_spdif_pdata = {
349 .cfg_gpio = s5pv310_spdif_cfg_gpio,
350};
351
352static u64 s5pv310_spdif_dmamask = DMA_BIT_MASK(32);
353
354struct platform_device s5pv310_device_spdif = {
355 .name = "samsung-spdif",
356 .id = -1,
357 .num_resources = ARRAY_SIZE(s5pv310_spdif_resource),
358 .resource = s5pv310_spdif_resource,
359 .dev = {
360 .platform_data = &samsung_spdif_pdata,
361 .dma_mask = &s5pv310_spdif_dmamask,
362 .coherent_dma_mask = DMA_BIT_MASK(32),
363 },
364};
diff --git a/arch/arm/mach-s5pv310/dma.c b/arch/arm/mach-s5pv310/dma.c
new file mode 100644
index 00000000000..20066c7c9e5
--- /dev/null
+++ b/arch/arm/mach-s5pv310/dma.c
@@ -0,0 +1,168 @@
1/*
2 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
3 * Jaswinder Singh <jassi.brar@samsung.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20#include <linux/platform_device.h>
21#include <linux/dma-mapping.h>
22
23#include <plat/devs.h>
24#include <plat/irqs.h>
25
26#include <mach/map.h>
27#include <mach/irqs.h>
28
29#include <plat/s3c-pl330-pdata.h>
30
31static u64 dma_dmamask = DMA_BIT_MASK(32);
32
33static struct resource s5pv310_pdma0_resource[] = {
34 [0] = {
35 .start = S5PV310_PA_PDMA0,
36 .end = S5PV310_PA_PDMA0 + SZ_4K,
37 .flags = IORESOURCE_MEM,
38 },
39 [1] = {
40 .start = IRQ_PDMA0,
41 .end = IRQ_PDMA0,
42 .flags = IORESOURCE_IRQ,
43 },
44};
45
46static struct s3c_pl330_platdata s5pv310_pdma0_pdata = {
47 .peri = {
48 [0] = DMACH_PCM0_RX,
49 [1] = DMACH_PCM0_TX,
50 [2] = DMACH_PCM2_RX,
51 [3] = DMACH_PCM2_TX,
52 [4] = DMACH_MSM_REQ0,
53 [5] = DMACH_MSM_REQ2,
54 [6] = DMACH_SPI0_RX,
55 [7] = DMACH_SPI0_TX,
56 [8] = DMACH_SPI2_RX,
57 [9] = DMACH_SPI2_TX,
58 [10] = DMACH_I2S0S_TX,
59 [11] = DMACH_I2S0_RX,
60 [12] = DMACH_I2S0_TX,
61 [13] = DMACH_I2S2_RX,
62 [14] = DMACH_I2S2_TX,
63 [15] = DMACH_UART0_RX,
64 [16] = DMACH_UART0_TX,
65 [17] = DMACH_UART2_RX,
66 [18] = DMACH_UART2_TX,
67 [19] = DMACH_UART4_RX,
68 [20] = DMACH_UART4_TX,
69 [21] = DMACH_SLIMBUS0_RX,
70 [22] = DMACH_SLIMBUS0_TX,
71 [23] = DMACH_SLIMBUS2_RX,
72 [24] = DMACH_SLIMBUS2_TX,
73 [25] = DMACH_SLIMBUS4_RX,
74 [26] = DMACH_SLIMBUS4_TX,
75 [27] = DMACH_AC97_MICIN,
76 [28] = DMACH_AC97_PCMIN,
77 [29] = DMACH_AC97_PCMOUT,
78 [30] = DMACH_MAX,
79 [31] = DMACH_MAX,
80 },
81};
82
83static struct platform_device s5pv310_device_pdma0 = {
84 .name = "s3c-pl330",
85 .id = 0,
86 .num_resources = ARRAY_SIZE(s5pv310_pdma0_resource),
87 .resource = s5pv310_pdma0_resource,
88 .dev = {
89 .dma_mask = &dma_dmamask,
90 .coherent_dma_mask = DMA_BIT_MASK(32),
91 .platform_data = &s5pv310_pdma0_pdata,
92 },
93};
94
95static struct resource s5pv310_pdma1_resource[] = {
96 [0] = {
97 .start = S5PV310_PA_PDMA1,
98 .end = S5PV310_PA_PDMA1 + SZ_4K,
99 .flags = IORESOURCE_MEM,
100 },
101 [1] = {
102 .start = IRQ_PDMA1,
103 .end = IRQ_PDMA1,
104 .flags = IORESOURCE_IRQ,
105 },
106};
107
108static struct s3c_pl330_platdata s5pv310_pdma1_pdata = {
109 .peri = {
110 [0] = DMACH_PCM0_RX,
111 [1] = DMACH_PCM0_TX,
112 [2] = DMACH_PCM1_RX,
113 [3] = DMACH_PCM1_TX,
114 [4] = DMACH_MSM_REQ1,
115 [5] = DMACH_MSM_REQ3,
116 [6] = DMACH_SPI1_RX,
117 [7] = DMACH_SPI1_TX,
118 [8] = DMACH_I2S0S_TX,
119 [9] = DMACH_I2S0_RX,
120 [10] = DMACH_I2S0_TX,
121 [11] = DMACH_I2S1_RX,
122 [12] = DMACH_I2S1_TX,
123 [13] = DMACH_UART0_RX,
124 [14] = DMACH_UART0_TX,
125 [15] = DMACH_UART1_RX,
126 [16] = DMACH_UART1_TX,
127 [17] = DMACH_UART3_RX,
128 [18] = DMACH_UART3_TX,
129 [19] = DMACH_SLIMBUS1_RX,
130 [20] = DMACH_SLIMBUS1_TX,
131 [21] = DMACH_SLIMBUS3_RX,
132 [22] = DMACH_SLIMBUS3_TX,
133 [23] = DMACH_SLIMBUS5_RX,
134 [24] = DMACH_SLIMBUS5_TX,
135 [25] = DMACH_SLIMBUS0AUX_RX,
136 [26] = DMACH_SLIMBUS0AUX_TX,
137 [27] = DMACH_SPDIF,
138 [28] = DMACH_MAX,
139 [29] = DMACH_MAX,
140 [30] = DMACH_MAX,
141 [31] = DMACH_MAX,
142 },
143};
144
145static struct platform_device s5pv310_device_pdma1 = {
146 .name = "s3c-pl330",
147 .id = 1,
148 .num_resources = ARRAY_SIZE(s5pv310_pdma1_resource),
149 .resource = s5pv310_pdma1_resource,
150 .dev = {
151 .dma_mask = &dma_dmamask,
152 .coherent_dma_mask = DMA_BIT_MASK(32),
153 .platform_data = &s5pv310_pdma1_pdata,
154 },
155};
156
157static struct platform_device *s5pv310_dmacs[] __initdata = {
158 &s5pv310_device_pdma0,
159 &s5pv310_device_pdma1,
160};
161
162static int __init s5pv310_dma_init(void)
163{
164 platform_add_devices(s5pv310_dmacs, ARRAY_SIZE(s5pv310_dmacs));
165
166 return 0;
167}
168arch_initcall(s5pv310_dma_init);
diff --git a/arch/arm/mach-s5pv310/include/mach/dma.h b/arch/arm/mach-s5pv310/include/mach/dma.h
new file mode 100644
index 00000000000..81209eb1409
--- /dev/null
+++ b/arch/arm/mach-s5pv310/include/mach/dma.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
3 * Jaswinder Singh <jassi.brar@samsung.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20#ifndef __MACH_DMA_H
21#define __MACH_DMA_H
22
23/* This platform uses the common S3C DMA API driver for PL330 */
24#include <plat/s3c-dma-pl330.h>
25
26#endif /* __MACH_DMA_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/irqs.h b/arch/arm/mach-s5pv310/include/mach/irqs.h
index 99e7dad8a85..3c05c58b539 100644
--- a/arch/arm/mach-s5pv310/include/mach/irqs.h
+++ b/arch/arm/mach-s5pv310/include/mach/irqs.h
@@ -54,6 +54,9 @@
54#define COMBINER_GROUP(x) ((x) * MAX_IRQ_IN_COMBINER + IRQ_SPI(64)) 54#define COMBINER_GROUP(x) ((x) * MAX_IRQ_IN_COMBINER + IRQ_SPI(64))
55#define COMBINER_IRQ(x, y) (COMBINER_GROUP(x) + y) 55#define COMBINER_IRQ(x, y) (COMBINER_GROUP(x) + y)
56 56
57#define IRQ_PDMA0 COMBINER_IRQ(21, 0)
58#define IRQ_PDMA1 COMBINER_IRQ(21, 1)
59
57#define IRQ_TIMER0_VIC COMBINER_IRQ(22, 0) 60#define IRQ_TIMER0_VIC COMBINER_IRQ(22, 0)
58#define IRQ_TIMER1_VIC COMBINER_IRQ(22, 1) 61#define IRQ_TIMER1_VIC COMBINER_IRQ(22, 1)
59#define IRQ_TIMER2_VIC COMBINER_IRQ(22, 2) 62#define IRQ_TIMER2_VIC COMBINER_IRQ(22, 2)
diff --git a/arch/arm/mach-s5pv310/include/mach/map.h b/arch/arm/mach-s5pv310/include/mach/map.h
index 7acf4e77e92..53994467605 100644
--- a/arch/arm/mach-s5pv310/include/mach/map.h
+++ b/arch/arm/mach-s5pv310/include/mach/map.h
@@ -52,6 +52,11 @@
52#define S5PV310_PA_GIC_DIST (0x10501000) 52#define S5PV310_PA_GIC_DIST (0x10501000)
53#define S5PV310_PA_L2CC (0x10502000) 53#define S5PV310_PA_L2CC (0x10502000)
54 54
55/* DMA */
56#define S5PV310_PA_MDMA 0x10810000
57#define S5PV310_PA_PDMA0 0x12680000
58#define S5PV310_PA_PDMA1 0x12690000
59
55#define S5PV310_PA_GPIO1 (0x11400000) 60#define S5PV310_PA_GPIO1 (0x11400000)
56#define S5PV310_PA_GPIO2 (0x11000000) 61#define S5PV310_PA_GPIO2 (0x11000000)
57#define S5PV310_PA_GPIO3 (0x03860000) 62#define S5PV310_PA_GPIO3 (0x03860000)
@@ -60,6 +65,22 @@
60 65
61#define S5PV310_PA_SROMC (0x12570000) 66#define S5PV310_PA_SROMC (0x12570000)
62 67
68/* S/PDIF */
69#define S5PV310_PA_SPDIF 0xE1100000
70
71/* I2S */
72#define S5PV310_PA_I2S0 0x03830000
73#define S5PV310_PA_I2S1 0xE3100000
74#define S5PV310_PA_I2S2 0xE2A00000
75
76/* PCM */
77#define S5PV310_PA_PCM0 0x03840000
78#define S5PV310_PA_PCM1 0x13980000
79#define S5PV310_PA_PCM2 0x13990000
80
81/* AC97 */
82#define S5PV310_PA_AC97 0x139A0000
83
63#define S5PV310_PA_UART (0x13800000) 84#define S5PV310_PA_UART (0x13800000)
64 85
65#define S5P_PA_UART(x) (S5PV310_PA_UART + ((x) * S3C_UART_OFFSET)) 86#define S5P_PA_UART(x) (S5PV310_PA_UART + ((x) * S3C_UART_OFFSET))
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 54b479c35ee..51dcd59eda6 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -116,4 +116,6 @@ endmenu
116config SH_CLK_CPG 116config SH_CLK_CPG
117 bool 117 bool
118 118
119source "drivers/sh/Kconfig"
120
119endif 121endif
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 46ca4d4abf9..d3260542b94 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -163,11 +163,13 @@ static struct mtd_partition nor_flash_partitions[] = {
163 .name = "loader", 163 .name = "loader",
164 .offset = 0x00000000, 164 .offset = 0x00000000,
165 .size = 512 * 1024, 165 .size = 512 * 1024,
166 .mask_flags = MTD_WRITEABLE,
166 }, 167 },
167 { 168 {
168 .name = "bootenv", 169 .name = "bootenv",
169 .offset = MTDPART_OFS_APPEND, 170 .offset = MTDPART_OFS_APPEND,
170 .size = 512 * 1024, 171 .size = 512 * 1024,
172 .mask_flags = MTD_WRITEABLE,
171 }, 173 },
172 { 174 {
173 .name = "kernel_ro", 175 .name = "kernel_ro",
@@ -565,12 +567,54 @@ static struct platform_device *qhd_devices[] __initdata = {
565 567
566/* FSI */ 568/* FSI */
567#define IRQ_FSI evt2irq(0x1840) 569#define IRQ_FSI evt2irq(0x1840)
570
571static int fsi_set_rate(int is_porta, int rate)
572{
573 struct clk *fsib_clk;
574 struct clk *fdiv_clk = &sh7372_fsidivb_clk;
575 int ret;
576
577 /* set_rate is not needed if port A */
578 if (is_porta)
579 return 0;
580
581 fsib_clk = clk_get(NULL, "fsib_clk");
582 if (IS_ERR(fsib_clk))
583 return -EINVAL;
584
585 switch (rate) {
586 case 44100:
587 clk_set_rate(fsib_clk, clk_round_rate(fsib_clk, 11283000));
588 ret = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
589 break;
590 case 48000:
591 clk_set_rate(fsib_clk, clk_round_rate(fsib_clk, 85428000));
592 clk_set_rate(fdiv_clk, clk_round_rate(fdiv_clk, 12204000));
593 ret = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
594 break;
595 default:
596 pr_err("unsupported rate in FSI2 port B\n");
597 ret = -EINVAL;
598 break;
599 }
600
601 clk_put(fsib_clk);
602
603 return ret;
604}
605
568static struct sh_fsi_platform_info fsi_info = { 606static struct sh_fsi_platform_info fsi_info = {
569 .porta_flags = SH_FSI_BRS_INV | 607 .porta_flags = SH_FSI_BRS_INV |
570 SH_FSI_OUT_SLAVE_MODE | 608 SH_FSI_OUT_SLAVE_MODE |
571 SH_FSI_IN_SLAVE_MODE | 609 SH_FSI_IN_SLAVE_MODE |
572 SH_FSI_OFMT(PCM) | 610 SH_FSI_OFMT(PCM) |
573 SH_FSI_IFMT(PCM), 611 SH_FSI_IFMT(PCM),
612
613 .portb_flags = SH_FSI_BRS_INV |
614 SH_FSI_BRM_INV |
615 SH_FSI_LRS_INV |
616 SH_FSI_OFMT(SPDIF),
617 .set_rate = fsi_set_rate,
574}; 618};
575 619
576static struct resource fsi_resources[] = { 620static struct resource fsi_resources[] = {
@@ -634,6 +678,7 @@ static struct platform_device lcdc1_device = {
634static struct sh_mobile_hdmi_info hdmi_info = { 678static struct sh_mobile_hdmi_info hdmi_info = {
635 .lcd_chan = &sh_mobile_lcdc1_info.ch[0], 679 .lcd_chan = &sh_mobile_lcdc1_info.ch[0],
636 .lcd_dev = &lcdc1_device.dev, 680 .lcd_dev = &lcdc1_device.dev,
681 .flags = HDMI_SND_SRC_SPDIF,
637}; 682};
638 683
639static struct resource hdmi_resources[] = { 684static struct resource hdmi_resources[] = {
@@ -992,6 +1037,7 @@ static void __init ap4evb_map_io(void)
992 1037
993#define GPIO_PORT9CR 0xE6051009 1038#define GPIO_PORT9CR 0xE6051009
994#define GPIO_PORT10CR 0xE605100A 1039#define GPIO_PORT10CR 0xE605100A
1040#define USCCR1 0xE6058144
995static void __init ap4evb_init(void) 1041static void __init ap4evb_init(void)
996{ 1042{
997 u32 srcr4; 1043 u32 srcr4;
@@ -1062,7 +1108,7 @@ static void __init ap4evb_init(void)
1062 /* setup USB phy */ 1108 /* setup USB phy */
1063 __raw_writew(0x8a0a, 0xE6058130); /* USBCR2 */ 1109 __raw_writew(0x8a0a, 0xE6058130); /* USBCR2 */
1064 1110
1065 /* enable FSI2 */ 1111 /* enable FSI2 port A (ak4643) */
1066 gpio_request(GPIO_FN_FSIAIBT, NULL); 1112 gpio_request(GPIO_FN_FSIAIBT, NULL);
1067 gpio_request(GPIO_FN_FSIAILR, NULL); 1113 gpio_request(GPIO_FN_FSIAILR, NULL);
1068 gpio_request(GPIO_FN_FSIAISLD, NULL); 1114 gpio_request(GPIO_FN_FSIAISLD, NULL);
@@ -1079,6 +1125,10 @@ static void __init ap4evb_init(void)
1079 gpio_request(GPIO_PORT41, NULL); 1125 gpio_request(GPIO_PORT41, NULL);
1080 gpio_direction_input(GPIO_PORT41); 1126 gpio_direction_input(GPIO_PORT41);
1081 1127
1128 /* setup FSI2 port B (HDMI) */
1129 gpio_request(GPIO_FN_FSIBCK, NULL);
1130 __raw_writew(__raw_readw(USCCR1) & ~(1 << 6), USCCR1); /* use SPDIF */
1131
1082 /* set SPU2 clock to 119.6 MHz */ 1132 /* set SPU2 clock to 119.6 MHz */
1083 clk = clk_get(NULL, "spu_clk"); 1133 clk = clk_get(NULL, "spu_clk");
1084 if (!IS_ERR(clk)) { 1134 if (!IS_ERR(clk)) {
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index 8565aefa21f..7db31e6c6bf 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -50,6 +50,9 @@
50#define SMSTPCR3 0xe615013c 50#define SMSTPCR3 0xe615013c
51#define SMSTPCR4 0xe6150140 51#define SMSTPCR4 0xe6150140
52 52
53#define FSIDIVA 0xFE1F8000
54#define FSIDIVB 0xFE1F8008
55
53/* Platforms must set frequency on their DV_CLKI pin */ 56/* Platforms must set frequency on their DV_CLKI pin */
54struct clk sh7372_dv_clki_clk = { 57struct clk sh7372_dv_clki_clk = {
55}; 58};
@@ -288,6 +291,7 @@ struct clk sh7372_pllc2_clk = {
288 .ops = &pllc2_clk_ops, 291 .ops = &pllc2_clk_ops,
289 .parent = &extal1_div2_clk, 292 .parent = &extal1_div2_clk,
290 .freq_table = pllc2_freq_table, 293 .freq_table = pllc2_freq_table,
294 .nr_freqs = ARRAY_SIZE(pllc2_freq_table) - 1,
291 .parent_table = pllc2_parent, 295 .parent_table = pllc2_parent,
292 .parent_num = ARRAY_SIZE(pllc2_parent), 296 .parent_num = ARRAY_SIZE(pllc2_parent),
293}; 297};
@@ -417,6 +421,101 @@ static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = {
417 fsibckcr_parent, ARRAY_SIZE(fsibckcr_parent), 6, 2), 421 fsibckcr_parent, ARRAY_SIZE(fsibckcr_parent), 6, 2),
418}; 422};
419 423
424/* FSI DIV */
425static unsigned long fsidiv_recalc(struct clk *clk)
426{
427 unsigned long value;
428
429 value = __raw_readl(clk->mapping->base);
430
431 if ((value & 0x3) != 0x3)
432 return 0;
433
434 value >>= 16;
435 if (value < 2)
436 return 0;
437
438 return clk->parent->rate / value;
439}
440
441static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
442{
443 return clk_rate_div_range_round(clk, 2, 0xffff, rate);
444}
445
446static void fsidiv_disable(struct clk *clk)
447{
448 __raw_writel(0, clk->mapping->base);
449}
450
451static int fsidiv_enable(struct clk *clk)
452{
453 unsigned long value;
454
455 value = __raw_readl(clk->mapping->base) >> 16;
456 if (value < 2) {
457 fsidiv_disable(clk);
458 return -ENOENT;
459 }
460
461 __raw_writel((value << 16) | 0x3, clk->mapping->base);
462
463 return 0;
464}
465
466static int fsidiv_set_rate(struct clk *clk,
467 unsigned long rate, int algo_id)
468{
469 int idx;
470
471 if (clk->parent->rate == rate) {
472 fsidiv_disable(clk);
473 return 0;
474 }
475
476 idx = (clk->parent->rate / rate) & 0xffff;
477 if (idx < 2)
478 return -ENOENT;
479
480 __raw_writel(idx << 16, clk->mapping->base);
481 return fsidiv_enable(clk);
482}
483
484static struct clk_ops fsidiv_clk_ops = {
485 .recalc = fsidiv_recalc,
486 .round_rate = fsidiv_round_rate,
487 .set_rate = fsidiv_set_rate,
488 .enable = fsidiv_enable,
489 .disable = fsidiv_disable,
490};
491
492static struct clk_mapping sh7372_fsidiva_clk_mapping = {
493 .phys = FSIDIVA,
494 .len = 8,
495};
496
497struct clk sh7372_fsidiva_clk = {
498 .ops = &fsidiv_clk_ops,
499 .parent = &div6_reparent_clks[DIV6_FSIA], /* late install */
500 .mapping = &sh7372_fsidiva_clk_mapping,
501};
502
503static struct clk_mapping sh7372_fsidivb_clk_mapping = {
504 .phys = FSIDIVB,
505 .len = 8,
506};
507
508struct clk sh7372_fsidivb_clk = {
509 .ops = &fsidiv_clk_ops,
510 .parent = &div6_reparent_clks[DIV6_FSIB], /* late install */
511 .mapping = &sh7372_fsidivb_clk_mapping,
512};
513
514static struct clk *late_main_clks[] = {
515 &sh7372_fsidiva_clk,
516 &sh7372_fsidivb_clk,
517};
518
420enum { MSTP001, 519enum { MSTP001,
421 MSTP131, MSTP130, 520 MSTP131, MSTP130,
422 MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, 521 MSTP129, MSTP128, MSTP127, MSTP126, MSTP125,
@@ -585,6 +684,9 @@ void __init sh7372_clock_init(void)
585 if (!ret) 684 if (!ret)
586 ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR); 685 ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
587 686
687 for (k = 0; !ret && (k < ARRAY_SIZE(late_main_clks)); k++)
688 ret = clk_register(late_main_clks[k]);
689
588 clkdev_add_table(lookups, ARRAY_SIZE(lookups)); 690 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
589 691
590 if (!ret) 692 if (!ret)
diff --git a/arch/arm/mach-shmobile/include/mach/gpio.h b/arch/arm/mach-shmobile/include/mach/gpio.h
index 5bc6bd444d7..2b1bb9e43dd 100644
--- a/arch/arm/mach-shmobile/include/mach/gpio.h
+++ b/arch/arm/mach-shmobile/include/mach/gpio.h
@@ -35,12 +35,12 @@ static inline int gpio_cansleep(unsigned gpio)
35 35
36static inline int gpio_to_irq(unsigned gpio) 36static inline int gpio_to_irq(unsigned gpio)
37{ 37{
38 return -ENOSYS; 38 return __gpio_to_irq(gpio);
39} 39}
40 40
41static inline int irq_to_gpio(unsigned int irq) 41static inline int irq_to_gpio(unsigned int irq)
42{ 42{
43 return -EINVAL; 43 return -ENOSYS;
44} 44}
45 45
46#endif /* CONFIG_GPIOLIB */ 46#endif /* CONFIG_GPIOLIB */
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index 147775a94bc..e4f9004e710 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -464,5 +464,7 @@ extern struct clk sh7372_dv_clki_div2_clk;
464extern struct clk sh7372_pllc2_clk; 464extern struct clk sh7372_pllc2_clk;
465extern struct clk sh7372_fsiack_clk; 465extern struct clk sh7372_fsiack_clk;
466extern struct clk sh7372_fsibck_clk; 466extern struct clk sh7372_fsibck_clk;
467extern struct clk sh7372_fsidiva_clk;
468extern struct clk sh7372_fsidivb_clk;
467 469
468#endif /* __ASM_SH7372_H__ */ 470#endif /* __ASM_SH7372_H__ */
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c
index 4cd3cae38e7..30b2f400666 100644
--- a/arch/arm/mach-shmobile/intc-sh7372.c
+++ b/arch/arm/mach-shmobile/intc-sh7372.c
@@ -98,7 +98,7 @@ static struct intc_vect intca_vectors[] __initdata = {
98 INTC_VECT(IRQ14A, 0x03c0), INTC_VECT(IRQ15A, 0x03e0), 98 INTC_VECT(IRQ14A, 0x03c0), INTC_VECT(IRQ15A, 0x03e0),
99 INTC_VECT(IRQ16A, 0x3200), INTC_VECT(IRQ17A, 0x3220), 99 INTC_VECT(IRQ16A, 0x3200), INTC_VECT(IRQ17A, 0x3220),
100 INTC_VECT(IRQ18A, 0x3240), INTC_VECT(IRQ19A, 0x3260), 100 INTC_VECT(IRQ18A, 0x3240), INTC_VECT(IRQ19A, 0x3260),
101 INTC_VECT(IRQ20A, 0x3280), INTC_VECT(IRQ31A, 0x32a0), 101 INTC_VECT(IRQ20A, 0x3280), INTC_VECT(IRQ21A, 0x32a0),
102 INTC_VECT(IRQ22A, 0x32c0), INTC_VECT(IRQ23A, 0x32e0), 102 INTC_VECT(IRQ22A, 0x32c0), INTC_VECT(IRQ23A, 0x32e0),
103 INTC_VECT(IRQ24A, 0x3300), INTC_VECT(IRQ25A, 0x3320), 103 INTC_VECT(IRQ24A, 0x3300), INTC_VECT(IRQ25A, 0x3320),
104 INTC_VECT(IRQ26A, 0x3340), INTC_VECT(IRQ27A, 0x3360), 104 INTC_VECT(IRQ26A, 0x3340), INTC_VECT(IRQ27A, 0x3360),
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index c2e405a9e02..fd25ccd7272 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -54,7 +54,9 @@ static struct map_desc ct_ca9x4_io_desc[] __initdata = {
54 54
55static void __init ct_ca9x4_map_io(void) 55static void __init ct_ca9x4_map_io(void)
56{ 56{
57#ifdef CONFIG_LOCAL_TIMERS
57 twd_base = MMIO_P2V(A9_MPCORE_TWD); 58 twd_base = MMIO_P2V(A9_MPCORE_TWD);
59#endif
58 v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc)); 60 v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
59} 61}
60 62
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e4dd0646e85..ac6a36142fc 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -198,7 +198,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
198 * fragmentation of the DMA space, and also prevents allocations 198 * fragmentation of the DMA space, and also prevents allocations
199 * smaller than a section from crossing a section boundary. 199 * smaller than a section from crossing a section boundary.
200 */ 200 */
201 bit = fls(size - 1) + 1; 201 bit = fls(size - 1);
202 if (bit > SECTION_SHIFT) 202 if (bit > SECTION_SHIFT)
203 bit = SECTION_SHIFT; 203 bit = SECTION_SHIFT;
204 align = 1 << bit; 204 align = 1 << bit;
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index 6f42a18b8aa..fc819120978 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -284,12 +284,14 @@ void __init omap_dsp_reserve_sdram_memblock(void)
284 if (!size) 284 if (!size)
285 return; 285 return;
286 286
287 paddr = __memblock_alloc_base(size, SZ_1M, MEMBLOCK_REAL_LIMIT); 287 paddr = memblock_alloc(size, SZ_1M);
288 if (!paddr) { 288 if (!paddr) {
289 pr_err("%s: failed to reserve %x bytes\n", 289 pr_err("%s: failed to reserve %x bytes\n",
290 __func__, size); 290 __func__, size);
291 return; 291 return;
292 } 292 }
293 memblock_free(paddr, size);
294 memblock_remove(paddr, size);
293 295
294 omap_dsp_phys_mempool_base = paddr; 296 omap_dsp_phys_mempool_base = paddr;
295} 297}
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index f5c5b8da9a8..2c2826571d4 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -1983,6 +1983,8 @@ static int omap2_dma_handle_ch(int ch)
1983 1983
1984 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(ch)); 1984 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(ch));
1985 dma_write(1 << ch, IRQSTATUS_L0); 1985 dma_write(1 << ch, IRQSTATUS_L0);
1986 /* read back the register to flush the write */
1987 dma_read(IRQSTATUS_L0);
1986 1988
1987 /* If the ch is not chained then chain_id will be -1 */ 1989 /* If the ch is not chained then chain_id will be -1 */
1988 if (dma_chan[ch].chain_id != -1) { 1990 if (dma_chan[ch].chain_id != -1) {
diff --git a/arch/arm/plat-orion/include/plat/pcie.h b/arch/arm/plat-orion/include/plat/pcie.h
index 3ebfef72b4e..cc99163e73f 100644
--- a/arch/arm/plat-orion/include/plat/pcie.h
+++ b/arch/arm/plat-orion/include/plat/pcie.h
@@ -11,12 +11,15 @@
11#ifndef __PLAT_PCIE_H 11#ifndef __PLAT_PCIE_H
12#define __PLAT_PCIE_H 12#define __PLAT_PCIE_H
13 13
14struct pci_bus;
15
14u32 orion_pcie_dev_id(void __iomem *base); 16u32 orion_pcie_dev_id(void __iomem *base);
15u32 orion_pcie_rev(void __iomem *base); 17u32 orion_pcie_rev(void __iomem *base);
16int orion_pcie_link_up(void __iomem *base); 18int orion_pcie_link_up(void __iomem *base);
17int orion_pcie_x4_mode(void __iomem *base); 19int orion_pcie_x4_mode(void __iomem *base);
18int orion_pcie_get_local_bus_nr(void __iomem *base); 20int orion_pcie_get_local_bus_nr(void __iomem *base);
19void orion_pcie_set_local_bus_nr(void __iomem *base, int nr); 21void orion_pcie_set_local_bus_nr(void __iomem *base, int nr);
22void orion_pcie_reset(void __iomem *base);
20void orion_pcie_setup(void __iomem *base, 23void orion_pcie_setup(void __iomem *base,
21 struct mbus_dram_target_info *dram); 24 struct mbus_dram_target_info *dram);
22int orion_pcie_rd_conf(void __iomem *base, struct pci_bus *bus, 25int orion_pcie_rd_conf(void __iomem *base, struct pci_bus *bus,
diff --git a/arch/arm/plat-orion/pcie.c b/arch/arm/plat-orion/pcie.c
index 779553a1595..af2d733c50b 100644
--- a/arch/arm/plat-orion/pcie.c
+++ b/arch/arm/plat-orion/pcie.c
@@ -182,11 +182,6 @@ void __init orion_pcie_setup(void __iomem *base,
182 u32 mask; 182 u32 mask;
183 183
184 /* 184 /*
185 * soft reset PCIe unit
186 */
187 orion_pcie_reset(base);
188
189 /*
190 * Point PCIe unit MBUS decode windows to DRAM space. 185 * Point PCIe unit MBUS decode windows to DRAM space.
191 */ 186 */
192 orion_pcie_setup_wins(base, dram); 187 orion_pcie_setup_wins(base, dram);
diff --git a/arch/arm/plat-samsung/include/plat/audio.h b/arch/arm/plat-samsung/include/plat/audio.h
index 7712ff6336f..a0826ed2f9f 100644
--- a/arch/arm/plat-samsung/include/plat/audio.h
+++ b/arch/arm/plat-samsung/include/plat/audio.h
@@ -25,10 +25,34 @@ extern void s3c64xx_ac97_setup_gpio(int);
25#define S5PC100_SPDIF_GPG3 1 25#define S5PC100_SPDIF_GPG3 1
26extern void s5pc100_spdif_setup_gpio(int); 26extern void s5pc100_spdif_setup_gpio(int);
27 27
28struct samsung_i2s {
29/* If the Primary DAI has 5.1 Channels */
30#define QUIRK_PRI_6CHAN (1 << 0)
31/* If the I2S block has a Stereo Overlay Channel */
32#define QUIRK_SEC_DAI (1 << 1)
33/*
34 * If the I2S block has no internal prescalar or MUX (I2SMOD[10] bit)
35 * The Machine driver must provide suitably set clock to the I2S block.
36 */
37#define QUIRK_NO_MUXPSR (1 << 2)
38#define QUIRK_NEED_RSTCLR (1 << 3)
39 /* Quirks of the I2S controller */
40 u32 quirks;
41
42 /*
43 * Array of clock names that can be used to generate I2S signals.
44 * Also corresponds to clocks of I2SMOD[10]
45 */
46 const char **src_clk;
47};
48
28/** 49/**
29 * struct s3c_audio_pdata - common platform data for audio device drivers 50 * struct s3c_audio_pdata - common platform data for audio device drivers
30 * @cfg_gpio: Callback function to setup mux'ed pins in I2S/PCM/AC97 mode 51 * @cfg_gpio: Callback function to setup mux'ed pins in I2S/PCM/AC97 mode
31 */ 52 */
32struct s3c_audio_pdata { 53struct s3c_audio_pdata {
33 int (*cfg_gpio)(struct platform_device *); 54 int (*cfg_gpio)(struct platform_device *);
55 union {
56 struct samsung_i2s i2s;
57 } type;
34}; 58};
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index 2d82a6cb144..dec0ded6a5e 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -96,6 +96,15 @@ extern struct platform_device s5pv210_device_iis1;
96extern struct platform_device s5pv210_device_iis2; 96extern struct platform_device s5pv210_device_iis2;
97extern struct platform_device s5pv210_device_spdif; 97extern struct platform_device s5pv210_device_spdif;
98 98
99extern struct platform_device s5pv310_device_ac97;
100extern struct platform_device s5pv310_device_pcm0;
101extern struct platform_device s5pv310_device_pcm1;
102extern struct platform_device s5pv310_device_pcm2;
103extern struct platform_device s5pv310_device_i2s0;
104extern struct platform_device s5pv310_device_i2s1;
105extern struct platform_device s5pv310_device_i2s2;
106extern struct platform_device s5pv310_device_spdif;
107
99extern struct platform_device s5p6442_device_pcm0; 108extern struct platform_device s5p6442_device_pcm0;
100extern struct platform_device s5p6442_device_pcm1; 109extern struct platform_device s5p6442_device_pcm1;
101extern struct platform_device s5p6442_device_iis0; 110extern struct platform_device s5p6442_device_iis0;
diff --git a/arch/m68k/include/asm/irqflags.h b/arch/m68k/include/asm/irqflags.h
index 4a5b284a155..7ef4115b8c4 100644
--- a/arch/m68k/include/asm/irqflags.h
+++ b/arch/m68k/include/asm/irqflags.h
@@ -2,7 +2,9 @@
2#define _M68K_IRQFLAGS_H 2#define _M68K_IRQFLAGS_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#ifdef CONFIG_MMU
5#include <linux/hardirq.h> 6#include <linux/hardirq.h>
7#endif
6#include <linux/preempt.h> 8#include <linux/preempt.h>
7#include <asm/thread_info.h> 9#include <asm/thread_info.h>
8#include <asm/entry.h> 10#include <asm/entry.h>
diff --git a/arch/m68k/include/asm/machdep.h b/arch/m68k/include/asm/machdep.h
index 789f3b2de0e..415d5484916 100644
--- a/arch/m68k/include/asm/machdep.h
+++ b/arch/m68k/include/asm/machdep.h
@@ -40,5 +40,6 @@ extern unsigned long hw_timer_offset(void);
40extern irqreturn_t arch_timer_interrupt(int irq, void *dummy); 40extern irqreturn_t arch_timer_interrupt(int irq, void *dummy);
41 41
42extern void config_BSP(char *command, int len); 42extern void config_BSP(char *command, int len);
43extern void do_IRQ(int irq, struct pt_regs *fp);
43 44
44#endif /* _M68K_MACHDEP_H */ 45#endif /* _M68K_MACHDEP_H */
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 428d0e538ae..b06bdae0406 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -127,7 +127,7 @@ static void kvm_patch_ins_nop(u32 *inst)
127 127
128static void kvm_patch_ins_b(u32 *inst, int addr) 128static void kvm_patch_ins_b(u32 *inst, int addr)
129{ 129{
130#ifdef CONFIG_RELOCATABLE 130#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
131 /* On relocatable kernels interrupts handlers and our code 131 /* On relocatable kernels interrupts handlers and our code
132 can be in different regions, so we don't patch them */ 132 can be in different regions, so we don't patch them */
133 133
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 049846911ce..1cc471faac2 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -416,7 +416,7 @@ lightweight_exit:
416 lwz r3, VCPU_PC(r4) 416 lwz r3, VCPU_PC(r4)
417 mtsrr0 r3 417 mtsrr0 r3
418 lwz r3, VCPU_SHARED(r4) 418 lwz r3, VCPU_SHARED(r4)
419 lwz r3, VCPU_SHARED_MSR(r3) 419 lwz r3, (VCPU_SHARED_MSR + 4)(r3)
420 oris r3, r3, KVMPPC_MSR_MASK@h 420 oris r3, r3, KVMPPC_MSR_MASK@h
421 ori r3, r3, KVMPPC_MSR_MASK@l 421 ori r3, r3, KVMPPC_MSR_MASK@l
422 mtsrr1 r3 422 mtsrr1 r3
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 71750f2dd5d..e3768ee9b59 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -138,8 +138,8 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
138 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 138 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
139 139
140 free_page((unsigned long)vcpu->arch.shared); 140 free_page((unsigned long)vcpu->arch.shared);
141 kvmppc_e500_tlb_uninit(vcpu_e500);
142 kvm_vcpu_uninit(vcpu); 141 kvm_vcpu_uninit(vcpu);
142 kvmppc_e500_tlb_uninit(vcpu_e500);
143 kmem_cache_free(kvm_vcpu_cache, vcpu_e500); 143 kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
144} 144}
145 145
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 2f87a1627f6..38f756f2505 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -617,6 +617,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
617 switch (ioctl) { 617 switch (ioctl) {
618 case KVM_PPC_GET_PVINFO: { 618 case KVM_PPC_GET_PVINFO: {
619 struct kvm_ppc_pvinfo pvinfo; 619 struct kvm_ppc_pvinfo pvinfo;
620 memset(&pvinfo, 0, sizeof(pvinfo));
620 r = kvm_vm_ioctl_get_pvinfo(&pvinfo); 621 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
621 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { 622 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
622 r = -EFAULT; 623 r = -EFAULT;
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
index 46fa04f12a9..a021f5827a3 100644
--- a/arch/powerpc/kvm/timing.c
+++ b/arch/powerpc/kvm/timing.c
@@ -35,7 +35,6 @@ void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
35 int i; 35 int i;
36 36
37 /* pause guest execution to avoid concurrent updates */ 37 /* pause guest execution to avoid concurrent updates */
38 local_irq_disable();
39 mutex_lock(&vcpu->mutex); 38 mutex_lock(&vcpu->mutex);
40 39
41 vcpu->arch.last_exit_type = 0xDEAD; 40 vcpu->arch.last_exit_type = 0xDEAD;
@@ -51,7 +50,6 @@ void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
51 vcpu->arch.timing_last_enter.tv64 = 0; 50 vcpu->arch.timing_last_enter.tv64 = 0;
52 51
53 mutex_unlock(&vcpu->mutex); 52 mutex_unlock(&vcpu->mutex);
54 local_irq_enable();
55} 53}
56 54
57static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type) 55static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 5c075f562eb..7f217b3a50a 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -193,6 +193,7 @@ config CPU_SH2
193config CPU_SH2A 193config CPU_SH2A
194 bool 194 bool
195 select CPU_SH2 195 select CPU_SH2
196 select UNCACHED_MAPPING
196 197
197config CPU_SH3 198config CPU_SH3
198 bool 199 bool
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 307b3a4a790..9c8c6e1a2a1 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -133,10 +133,7 @@ machdir-$(CONFIG_SOLUTION_ENGINE) += mach-se
133machdir-$(CONFIG_SH_HP6XX) += mach-hp6xx 133machdir-$(CONFIG_SH_HP6XX) += mach-hp6xx
134machdir-$(CONFIG_SH_DREAMCAST) += mach-dreamcast 134machdir-$(CONFIG_SH_DREAMCAST) += mach-dreamcast
135machdir-$(CONFIG_SH_SH03) += mach-sh03 135machdir-$(CONFIG_SH_SH03) += mach-sh03
136machdir-$(CONFIG_SH_SECUREEDGE5410) += mach-snapgear
137machdir-$(CONFIG_SH_RTS7751R2D) += mach-r2d 136machdir-$(CONFIG_SH_RTS7751R2D) += mach-r2d
138machdir-$(CONFIG_SH_7751_SYSTEMH) += mach-systemh
139machdir-$(CONFIG_SH_EDOSK7705) += mach-edosk7705
140machdir-$(CONFIG_SH_HIGHLANDER) += mach-highlander 137machdir-$(CONFIG_SH_HIGHLANDER) += mach-highlander
141machdir-$(CONFIG_SH_MIGOR) += mach-migor 138machdir-$(CONFIG_SH_MIGOR) += mach-migor
142machdir-$(CONFIG_SH_AP325RXA) += mach-ap325rxa 139machdir-$(CONFIG_SH_AP325RXA) += mach-ap325rxa
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index 9c94711aa6c..2018c7ea4c9 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -81,13 +81,6 @@ config SH_7343_SOLUTION_ENGINE
81 Select 7343 SolutionEngine if configuring for a Hitachi 81 Select 7343 SolutionEngine if configuring for a Hitachi
82 SH7343 (SH-Mobile 3AS) evaluation board. 82 SH7343 (SH-Mobile 3AS) evaluation board.
83 83
84config SH_7751_SYSTEMH
85 bool "SystemH7751R"
86 depends on CPU_SUBTYPE_SH7751R
87 help
88 Select SystemH if you are configuring for a Renesas SystemH
89 7751R evaluation board.
90
91config SH_HP6XX 84config SH_HP6XX
92 bool "HP6XX" 85 bool "HP6XX"
93 select SYS_SUPPORTS_APM_EMULATION 86 select SYS_SUPPORTS_APM_EMULATION
diff --git a/arch/sh/boards/Makefile b/arch/sh/boards/Makefile
index 38ef655cc0f..be7d11d04b2 100644
--- a/arch/sh/boards/Makefile
+++ b/arch/sh/boards/Makefile
@@ -2,10 +2,12 @@
2# Specific board support, not covered by a mach group. 2# Specific board support, not covered by a mach group.
3# 3#
4obj-$(CONFIG_SH_MAGIC_PANEL_R2) += board-magicpanelr2.o 4obj-$(CONFIG_SH_MAGIC_PANEL_R2) += board-magicpanelr2.o
5obj-$(CONFIG_SH_SECUREEDGE5410) += board-secureedge5410.o
5obj-$(CONFIG_SH_SH2007) += board-sh2007.o 6obj-$(CONFIG_SH_SH2007) += board-sh2007.o
6obj-$(CONFIG_SH_SH7785LCR) += board-sh7785lcr.o 7obj-$(CONFIG_SH_SH7785LCR) += board-sh7785lcr.o
7obj-$(CONFIG_SH_URQUELL) += board-urquell.o 8obj-$(CONFIG_SH_URQUELL) += board-urquell.o
8obj-$(CONFIG_SH_SHMIN) += board-shmin.o 9obj-$(CONFIG_SH_SHMIN) += board-shmin.o
10obj-$(CONFIG_SH_EDOSK7705) += board-edosk7705.o
9obj-$(CONFIG_SH_EDOSK7760) += board-edosk7760.o 11obj-$(CONFIG_SH_EDOSK7760) += board-edosk7760.o
10obj-$(CONFIG_SH_ESPT) += board-espt.o 12obj-$(CONFIG_SH_ESPT) += board-espt.o
11obj-$(CONFIG_SH_POLARIS) += board-polaris.o 13obj-$(CONFIG_SH_POLARIS) += board-polaris.o
diff --git a/arch/sh/boards/board-edosk7705.c b/arch/sh/boards/board-edosk7705.c
new file mode 100644
index 00000000000..4cb3bb74c36
--- /dev/null
+++ b/arch/sh/boards/board-edosk7705.c
@@ -0,0 +1,78 @@
1/*
2 * arch/sh/boards/renesas/edosk7705/setup.c
3 *
4 * Copyright (C) 2000 Kazumoto Kojima
5 *
6 * Hitachi SolutionEngine Support.
7 *
8 * Modified for edosk7705 development
9 * board by S. Dunn, 2003.
10 */
11#include <linux/init.h>
12#include <linux/irq.h>
13#include <linux/platform_device.h>
14#include <linux/interrupt.h>
15#include <linux/smc91x.h>
16#include <asm/machvec.h>
17#include <asm/sizes.h>
18
19#define SMC_IOBASE 0xA2000000
20#define SMC_IO_OFFSET 0x300
21#define SMC_IOADDR (SMC_IOBASE + SMC_IO_OFFSET)
22
23#define ETHERNET_IRQ 0x09
24
25static void __init sh_edosk7705_init_irq(void)
26{
27 make_imask_irq(ETHERNET_IRQ);
28}
29
30/* eth initialization functions */
31static struct smc91x_platdata smc91x_info = {
32 .flags = SMC91X_USE_16BIT | SMC91X_IO_SHIFT_1 | IORESOURCE_IRQ_LOWLEVEL,
33};
34
35static struct resource smc91x_res[] = {
36 [0] = {
37 .start = SMC_IOADDR,
38 .end = SMC_IOADDR + SZ_32 - 1,
39 .flags = IORESOURCE_MEM,
40 },
41 [1] = {
42 .start = ETHERNET_IRQ,
43 .end = ETHERNET_IRQ,
44 .flags = IORESOURCE_IRQ ,
45 }
46};
47
48static struct platform_device smc91x_dev = {
49 .name = "smc91x",
50 .id = -1,
51 .num_resources = ARRAY_SIZE(smc91x_res),
52 .resource = smc91x_res,
53
54 .dev = {
55 .platform_data = &smc91x_info,
56 },
57};
58
59/* platform init code */
60static struct platform_device *edosk7705_devices[] __initdata = {
61 &smc91x_dev,
62};
63
64static int __init init_edosk7705_devices(void)
65{
66 return platform_add_devices(edosk7705_devices,
67 ARRAY_SIZE(edosk7705_devices));
68}
69__initcall(init_edosk7705_devices);
70
71/*
72 * The Machine Vector
73 */
74static struct sh_machine_vector mv_edosk7705 __initmv = {
75 .mv_name = "EDOSK7705",
76 .mv_nr_irqs = 80,
77 .mv_init_irq = sh_edosk7705_init_irq,
78};
diff --git a/arch/sh/boards/mach-snapgear/setup.c b/arch/sh/boards/board-secureedge5410.c
index 331745dee37..32f875e8493 100644
--- a/arch/sh/boards/mach-snapgear/setup.c
+++ b/arch/sh/boards/board-secureedge5410.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * linux/arch/sh/boards/snapgear/setup.c
3 *
4 * Copyright (C) 2002 David McCullough <davidm@snapgear.com> 2 * Copyright (C) 2002 David McCullough <davidm@snapgear.com>
5 * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org> 3 * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org>
6 * 4 *
@@ -19,18 +17,19 @@
19#include <linux/module.h> 17#include <linux/module.h>
20#include <linux/sched.h> 18#include <linux/sched.h>
21#include <asm/machvec.h> 19#include <asm/machvec.h>
22#include <mach/snapgear.h> 20#include <mach/secureedge5410.h>
23#include <asm/irq.h> 21#include <asm/irq.h>
24#include <asm/io.h> 22#include <asm/io.h>
25#include <cpu/timer.h> 23#include <cpu/timer.h>
26 24
25unsigned short secureedge5410_ioport;
26
27/* 27/*
28 * EraseConfig handling functions 28 * EraseConfig handling functions
29 */ 29 */
30
31static irqreturn_t eraseconfig_interrupt(int irq, void *dev_id) 30static irqreturn_t eraseconfig_interrupt(int irq, void *dev_id)
32{ 31{
33 (void)__raw_readb(0xb8000000); /* dummy read */ 32 ctrl_delay(); /* dummy read */
34 33
35 printk("SnapGear: erase switch interrupt!\n"); 34 printk("SnapGear: erase switch interrupt!\n");
36 35
@@ -39,21 +38,22 @@ static irqreturn_t eraseconfig_interrupt(int irq, void *dev_id)
39 38
40static int __init eraseconfig_init(void) 39static int __init eraseconfig_init(void)
41{ 40{
41 unsigned int irq = evt2irq(0x240);
42
42 printk("SnapGear: EraseConfig init\n"); 43 printk("SnapGear: EraseConfig init\n");
44
43 /* Setup "EraseConfig" switch on external IRQ 0 */ 45 /* Setup "EraseConfig" switch on external IRQ 0 */
44 if (request_irq(IRL0_IRQ, eraseconfig_interrupt, IRQF_DISABLED, 46 if (request_irq(irq, eraseconfig_interrupt, IRQF_DISABLED,
45 "Erase Config", NULL)) 47 "Erase Config", NULL))
46 printk("SnapGear: failed to register IRQ%d for Reset witch\n", 48 printk("SnapGear: failed to register IRQ%d for Reset witch\n",
47 IRL0_IRQ); 49 irq);
48 else 50 else
49 printk("SnapGear: registered EraseConfig switch on IRQ%d\n", 51 printk("SnapGear: registered EraseConfig switch on IRQ%d\n",
50 IRL0_IRQ); 52 irq);
51 return(0); 53 return 0;
52} 54}
53
54module_init(eraseconfig_init); 55module_init(eraseconfig_init);
55 56
56/****************************************************************************/
57/* 57/*
58 * Initialize IRQ setting 58 * Initialize IRQ setting
59 * 59 *
@@ -62,7 +62,6 @@ module_init(eraseconfig_init);
62 * IRL2 = eth1 62 * IRL2 = eth1
63 * IRL3 = crypto 63 * IRL3 = crypto
64 */ 64 */
65
66static void __init init_snapgear_IRQ(void) 65static void __init init_snapgear_IRQ(void)
67{ 66{
68 printk("Setup SnapGear IRQ/IPR ...\n"); 67 printk("Setup SnapGear IRQ/IPR ...\n");
@@ -76,20 +75,5 @@ static void __init init_snapgear_IRQ(void)
76static struct sh_machine_vector mv_snapgear __initmv = { 75static struct sh_machine_vector mv_snapgear __initmv = {
77 .mv_name = "SnapGear SecureEdge5410", 76 .mv_name = "SnapGear SecureEdge5410",
78 .mv_nr_irqs = 72, 77 .mv_nr_irqs = 72,
79
80 .mv_inb = snapgear_inb,
81 .mv_inw = snapgear_inw,
82 .mv_inl = snapgear_inl,
83 .mv_outb = snapgear_outb,
84 .mv_outw = snapgear_outw,
85 .mv_outl = snapgear_outl,
86
87 .mv_inb_p = snapgear_inb_p,
88 .mv_inw_p = snapgear_inw,
89 .mv_inl_p = snapgear_inl,
90 .mv_outb_p = snapgear_outb_p,
91 .mv_outw_p = snapgear_outw,
92 .mv_outl_p = snapgear_outl,
93
94 .mv_init_irq = init_snapgear_IRQ, 78 .mv_init_irq = init_snapgear_IRQ,
95}; 79};
diff --git a/arch/sh/boards/mach-edosk7705/Makefile b/arch/sh/boards/mach-edosk7705/Makefile
deleted file mode 100644
index cd54acb5149..00000000000
--- a/arch/sh/boards/mach-edosk7705/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1#
2# Makefile for the EDOSK7705 specific parts of the kernel
3#
4
5obj-y := setup.o io.o
diff --git a/arch/sh/boards/mach-edosk7705/io.c b/arch/sh/boards/mach-edosk7705/io.c
deleted file mode 100644
index 5b9c57c4324..00000000000
--- a/arch/sh/boards/mach-edosk7705/io.c
+++ /dev/null
@@ -1,71 +0,0 @@
1/*
2 * arch/sh/boards/renesas/edosk7705/io.c
3 *
4 * Copyright (C) 2001 Ian da Silva, Jeremy Siegel
5 * Based largely on io_se.c.
6 *
7 * I/O routines for Hitachi EDOSK7705 board.
8 *
9 */
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/io.h>
14#include <mach/edosk7705.h>
15#include <asm/addrspace.h>
16
17#define SMC_IOADDR 0xA2000000
18
19/* Map the Ethernet addresses as if it is at 0x300 - 0x320 */
20static unsigned long sh_edosk7705_isa_port2addr(unsigned long port)
21{
22 /*
23 * SMC91C96 registers are 4 byte aligned rather than the
24 * usual 2 byte!
25 */
26 if (port >= 0x300 && port < 0x320)
27 return SMC_IOADDR + ((port - 0x300) * 2);
28
29 maybebadio(port);
30 return port;
31}
32
33/* Trying to read / write bytes on odd-byte boundaries to the Ethernet
34 * registers causes problems. So we bit-shift the value and read / write
35 * in 2 byte chunks. Setting the low byte to 0 does not cause problems
36 * now as odd byte writes are only made on the bit mask / interrupt
37 * register. This may not be the case in future Mar-2003 SJD
38 */
39unsigned char sh_edosk7705_inb(unsigned long port)
40{
41 if (port >= 0x300 && port < 0x320 && port & 0x01)
42 return __raw_readw(port - 1) >> 8;
43
44 return __raw_readb(sh_edosk7705_isa_port2addr(port));
45}
46
47void sh_edosk7705_outb(unsigned char value, unsigned long port)
48{
49 if (port >= 0x300 && port < 0x320 && port & 0x01) {
50 __raw_writew(((unsigned short)value << 8), port - 1);
51 return;
52 }
53
54 __raw_writeb(value, sh_edosk7705_isa_port2addr(port));
55}
56
57void sh_edosk7705_insb(unsigned long port, void *addr, unsigned long count)
58{
59 unsigned char *p = addr;
60
61 while (count--)
62 *p++ = sh_edosk7705_inb(port);
63}
64
65void sh_edosk7705_outsb(unsigned long port, const void *addr, unsigned long count)
66{
67 unsigned char *p = (unsigned char *)addr;
68
69 while (count--)
70 sh_edosk7705_outb(*p++, port);
71}
diff --git a/arch/sh/boards/mach-edosk7705/setup.c b/arch/sh/boards/mach-edosk7705/setup.c
deleted file mode 100644
index d59225e26fb..00000000000
--- a/arch/sh/boards/mach-edosk7705/setup.c
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * arch/sh/boards/renesas/edosk7705/setup.c
3 *
4 * Copyright (C) 2000 Kazumoto Kojima
5 *
6 * Hitachi SolutionEngine Support.
7 *
8 * Modified for edosk7705 development
9 * board by S. Dunn, 2003.
10 */
11#include <linux/init.h>
12#include <linux/irq.h>
13#include <asm/machvec.h>
14#include <mach/edosk7705.h>
15
16static void __init sh_edosk7705_init_irq(void)
17{
18 /* This is the Ethernet interrupt */
19 make_imask_irq(0x09);
20}
21
22/*
23 * The Machine Vector
24 */
25static struct sh_machine_vector mv_edosk7705 __initmv = {
26 .mv_name = "EDOSK7705",
27 .mv_nr_irqs = 80,
28
29 .mv_inb = sh_edosk7705_inb,
30 .mv_outb = sh_edosk7705_outb,
31
32 .mv_insb = sh_edosk7705_insb,
33 .mv_outsb = sh_edosk7705_outsb,
34
35 .mv_init_irq = sh_edosk7705_init_irq,
36};
diff --git a/arch/sh/boards/mach-microdev/io.c b/arch/sh/boards/mach-microdev/io.c
index 2960c659020..acdafb0c640 100644
--- a/arch/sh/boards/mach-microdev/io.c
+++ b/arch/sh/boards/mach-microdev/io.c
@@ -54,7 +54,7 @@
54/* 54/*
55 * map I/O ports to memory-mapped addresses 55 * map I/O ports to memory-mapped addresses
56 */ 56 */
57static unsigned long microdev_isa_port2addr(unsigned long offset) 57void __iomem *microdev_ioport_map(unsigned long offset, unsigned int len)
58{ 58{
59 unsigned long result; 59 unsigned long result;
60 60
@@ -72,16 +72,6 @@ static unsigned long microdev_isa_port2addr(unsigned long offset)
72 * Configuration Registers 72 * Configuration Registers
73 */ 73 */
74 result = IO_SUPERIO_PHYS + (offset << 1); 74 result = IO_SUPERIO_PHYS + (offset << 1);
75#if 0
76 } else if (offset == KBD_DATA_REG || offset == KBD_CNTL_REG ||
77 offset == KBD_STATUS_REG) {
78 /*
79 * SMSC FDC37C93xAPM SuperIO chip
80 *
81 * PS/2 Keyboard + Mouse (ports 0x60 and 0x64).
82 */
83 result = IO_SUPERIO_PHYS + (offset << 1);
84#endif
85 } else if (((offset >= IO_IDE1_BASE) && 75 } else if (((offset >= IO_IDE1_BASE) &&
86 (offset < IO_IDE1_BASE + IO_IDE_EXTENT)) || 76 (offset < IO_IDE1_BASE + IO_IDE_EXTENT)) ||
87 (offset == IO_IDE1_MISC)) { 77 (offset == IO_IDE1_MISC)) {
@@ -131,237 +121,5 @@ static unsigned long microdev_isa_port2addr(unsigned long offset)
131 result = PVR; 121 result = PVR;
132 } 122 }
133 123
134 return result; 124 return (void __iomem *)result;
135}
136
137#define PORT2ADDR(x) (microdev_isa_port2addr(x))
138
139static inline void delay(void)
140{
141#if defined(CONFIG_PCI)
142 /* System board present, just make a dummy SRAM access. (CS0 will be
143 mapped to PCI memory, probably good to avoid it.) */
144 __raw_readw(0xa6800000);
145#else
146 /* CS0 will be mapped to flash, ROM etc so safe to access it. */
147 __raw_readw(0xa0000000);
148#endif
149}
150
151unsigned char microdev_inb(unsigned long port)
152{
153#ifdef CONFIG_PCI
154 if (port >= PCIBIOS_MIN_IO)
155 return microdev_pci_inb(port);
156#endif
157 return *(volatile unsigned char*)PORT2ADDR(port);
158}
159
160unsigned short microdev_inw(unsigned long port)
161{
162#ifdef CONFIG_PCI
163 if (port >= PCIBIOS_MIN_IO)
164 return microdev_pci_inw(port);
165#endif
166 return *(volatile unsigned short*)PORT2ADDR(port);
167}
168
169unsigned int microdev_inl(unsigned long port)
170{
171#ifdef CONFIG_PCI
172 if (port >= PCIBIOS_MIN_IO)
173 return microdev_pci_inl(port);
174#endif
175 return *(volatile unsigned int*)PORT2ADDR(port);
176}
177
178void microdev_outw(unsigned short b, unsigned long port)
179{
180#ifdef CONFIG_PCI
181 if (port >= PCIBIOS_MIN_IO) {
182 microdev_pci_outw(b, port);
183 return;
184 }
185#endif
186 *(volatile unsigned short*)PORT2ADDR(port) = b;
187}
188
189void microdev_outb(unsigned char b, unsigned long port)
190{
191#ifdef CONFIG_PCI
192 if (port >= PCIBIOS_MIN_IO) {
193 microdev_pci_outb(b, port);
194 return;
195 }
196#endif
197
198 /*
199 * There is a board feature with the current SH4-202 MicroDev in
200 * that the 2 byte enables (nBE0 and nBE1) are tied together (and
201 * to the Chip Select Line (Ethernet_CS)). Due to this connectivity,
202 * it is not possible to safely perform 8-bit writes to the
203 * Ethernet registers, as 16-bits will be consumed from the Data
204 * lines (corrupting the other byte). Hence, this function is
205 * written to implement 16-bit read/modify/write for all byte-wide
206 * accesses.
207 *
208 * Note: there is no problem with byte READS (even or odd).
209 *
210 * Sean McGoogan - 16th June 2003.
211 */
212 if ((port >= IO_LAN91C111_BASE) &&
213 (port < IO_LAN91C111_BASE + IO_LAN91C111_EXTENT)) {
214 /*
215 * Then are trying to perform a byte-write to the
216 * LAN91C111. This needs special care.
217 */
218 if (port % 2 == 1) { /* is the port odd ? */
219 /* unset bit-0, i.e. make even */
220 const unsigned long evenPort = port-1;
221 unsigned short word;
222
223 /*
224 * do a 16-bit read/write to write to 'port',
225 * preserving even byte.
226 *
227 * Even addresses are bits 0-7
228 * Odd addresses are bits 8-15
229 */
230 word = microdev_inw(evenPort);
231 word = (word & 0xffu) | (b << 8);
232 microdev_outw(word, evenPort);
233 } else {
234 /* else, we are trying to do an even byte write */
235 unsigned short word;
236
237 /*
238 * do a 16-bit read/write to write to 'port',
239 * preserving odd byte.
240 *
241 * Even addresses are bits 0-7
242 * Odd addresses are bits 8-15
243 */
244 word = microdev_inw(port);
245 word = (word & 0xff00u) | (b);
246 microdev_outw(word, port);
247 }
248 } else {
249 *(volatile unsigned char*)PORT2ADDR(port) = b;
250 }
251}
252
253void microdev_outl(unsigned int b, unsigned long port)
254{
255#ifdef CONFIG_PCI
256 if (port >= PCIBIOS_MIN_IO) {
257 microdev_pci_outl(b, port);
258 return;
259 }
260#endif
261 *(volatile unsigned int*)PORT2ADDR(port) = b;
262}
263
264unsigned char microdev_inb_p(unsigned long port)
265{
266 unsigned char v = microdev_inb(port);
267 delay();
268 return v;
269}
270
271unsigned short microdev_inw_p(unsigned long port)
272{
273 unsigned short v = microdev_inw(port);
274 delay();
275 return v;
276}
277
278unsigned int microdev_inl_p(unsigned long port)
279{
280 unsigned int v = microdev_inl(port);
281 delay();
282 return v;
283}
284
285void microdev_outb_p(unsigned char b, unsigned long port)
286{
287 microdev_outb(b, port);
288 delay();
289}
290
291void microdev_outw_p(unsigned short b, unsigned long port)
292{
293 microdev_outw(b, port);
294 delay();
295}
296
297void microdev_outl_p(unsigned int b, unsigned long port)
298{
299 microdev_outl(b, port);
300 delay();
301}
302
303void microdev_insb(unsigned long port, void *buffer, unsigned long count)
304{
305 volatile unsigned char *port_addr;
306 unsigned char *buf = buffer;
307
308 port_addr = (volatile unsigned char *)PORT2ADDR(port);
309
310 while (count--)
311 *buf++ = *port_addr;
312}
313
314void microdev_insw(unsigned long port, void *buffer, unsigned long count)
315{
316 volatile unsigned short *port_addr;
317 unsigned short *buf = buffer;
318
319 port_addr = (volatile unsigned short *)PORT2ADDR(port);
320
321 while (count--)
322 *buf++ = *port_addr;
323}
324
325void microdev_insl(unsigned long port, void *buffer, unsigned long count)
326{
327 volatile unsigned long *port_addr;
328 unsigned int *buf = buffer;
329
330 port_addr = (volatile unsigned long *)PORT2ADDR(port);
331
332 while (count--)
333 *buf++ = *port_addr;
334}
335
336void microdev_outsb(unsigned long port, const void *buffer, unsigned long count)
337{
338 volatile unsigned char *port_addr;
339 const unsigned char *buf = buffer;
340
341 port_addr = (volatile unsigned char *)PORT2ADDR(port);
342
343 while (count--)
344 *port_addr = *buf++;
345}
346
347void microdev_outsw(unsigned long port, const void *buffer, unsigned long count)
348{
349 volatile unsigned short *port_addr;
350 const unsigned short *buf = buffer;
351
352 port_addr = (volatile unsigned short *)PORT2ADDR(port);
353
354 while (count--)
355 *port_addr = *buf++;
356}
357
358void microdev_outsl(unsigned long port, const void *buffer, unsigned long count)
359{
360 volatile unsigned long *port_addr;
361 const unsigned int *buf = buffer;
362
363 port_addr = (volatile unsigned long *)PORT2ADDR(port);
364
365 while (count--)
366 *port_addr = *buf++;
367} 125}
diff --git a/arch/sh/boards/mach-microdev/setup.c b/arch/sh/boards/mach-microdev/setup.c
index d1df2a4fb9b..d8a747291e0 100644
--- a/arch/sh/boards/mach-microdev/setup.c
+++ b/arch/sh/boards/mach-microdev/setup.c
@@ -195,27 +195,6 @@ device_initcall(microdev_devices_setup);
195static struct sh_machine_vector mv_sh4202_microdev __initmv = { 195static struct sh_machine_vector mv_sh4202_microdev __initmv = {
196 .mv_name = "SH4-202 MicroDev", 196 .mv_name = "SH4-202 MicroDev",
197 .mv_nr_irqs = 72, 197 .mv_nr_irqs = 72,
198 198 .mv_ioport_map = microdev_ioport_map,
199 .mv_inb = microdev_inb,
200 .mv_inw = microdev_inw,
201 .mv_inl = microdev_inl,
202 .mv_outb = microdev_outb,
203 .mv_outw = microdev_outw,
204 .mv_outl = microdev_outl,
205
206 .mv_inb_p = microdev_inb_p,
207 .mv_inw_p = microdev_inw_p,
208 .mv_inl_p = microdev_inl_p,
209 .mv_outb_p = microdev_outb_p,
210 .mv_outw_p = microdev_outw_p,
211 .mv_outl_p = microdev_outl_p,
212
213 .mv_insb = microdev_insb,
214 .mv_insw = microdev_insw,
215 .mv_insl = microdev_insl,
216 .mv_outsb = microdev_outsb,
217 .mv_outsw = microdev_outsw,
218 .mv_outsl = microdev_outsl,
219
220 .mv_init_irq = init_microdev_irq, 199 .mv_init_irq = init_microdev_irq,
221}; 200};
diff --git a/arch/sh/boards/mach-se/7206/Makefile b/arch/sh/boards/mach-se/7206/Makefile
index 63e7ed699f3..5c9eaa0535b 100644
--- a/arch/sh/boards/mach-se/7206/Makefile
+++ b/arch/sh/boards/mach-se/7206/Makefile
@@ -2,4 +2,4 @@
2# Makefile for the 7206 SolutionEngine specific parts of the kernel 2# Makefile for the 7206 SolutionEngine specific parts of the kernel
3# 3#
4 4
5obj-y := setup.o io.o irq.o 5obj-y := setup.o irq.o
diff --git a/arch/sh/boards/mach-se/7206/io.c b/arch/sh/boards/mach-se/7206/io.c
deleted file mode 100644
index adadc77532e..00000000000
--- a/arch/sh/boards/mach-se/7206/io.c
+++ /dev/null
@@ -1,104 +0,0 @@
1/* $Id: io.c,v 1.5 2004/02/22 23:08:43 kkojima Exp $
2 *
3 * linux/arch/sh/boards/se/7206/io.c
4 *
5 * Copyright (C) 2006 Yoshinori Sato
6 *
7 * I/O routine for Hitachi 7206 SolutionEngine.
8 *
9 */
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <asm/io.h>
14#include <mach-se/mach/se7206.h>
15
16
17static inline void delay(void)
18{
19 __raw_readw(0x20000000); /* P2 ROM Area */
20}
21
22/* MS7750 requires special versions of in*, out* routines, since
23 PC-like io ports are located at upper half byte of 16-bit word which
24 can be accessed only with 16-bit wide. */
25
26static inline volatile __u16 *
27port2adr(unsigned int port)
28{
29 if (port >= 0x2000 && port < 0x2020)
30 return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
31 else if (port >= 0x300 && port < 0x310)
32 return (volatile __u16 *) (PA_SMSC + (port - 0x300));
33
34 return (volatile __u16 *)port;
35}
36
37unsigned char se7206_inb(unsigned long port)
38{
39 return (*port2adr(port)) & 0xff;
40}
41
42unsigned char se7206_inb_p(unsigned long port)
43{
44 unsigned long v;
45
46 v = (*port2adr(port)) & 0xff;
47 delay();
48 return v;
49}
50
51unsigned short se7206_inw(unsigned long port)
52{
53 return *port2adr(port);
54}
55
56void se7206_outb(unsigned char value, unsigned long port)
57{
58 *(port2adr(port)) = value;
59}
60
61void se7206_outb_p(unsigned char value, unsigned long port)
62{
63 *(port2adr(port)) = value;
64 delay();
65}
66
67void se7206_outw(unsigned short value, unsigned long port)
68{
69 *port2adr(port) = value;
70}
71
72void se7206_insb(unsigned long port, void *addr, unsigned long count)
73{
74 volatile __u16 *p = port2adr(port);
75 __u8 *ap = addr;
76
77 while (count--)
78 *ap++ = *p;
79}
80
81void se7206_insw(unsigned long port, void *addr, unsigned long count)
82{
83 volatile __u16 *p = port2adr(port);
84 __u16 *ap = addr;
85 while (count--)
86 *ap++ = *p;
87}
88
89void se7206_outsb(unsigned long port, const void *addr, unsigned long count)
90{
91 volatile __u16 *p = port2adr(port);
92 const __u8 *ap = addr;
93
94 while (count--)
95 *p = *ap++;
96}
97
98void se7206_outsw(unsigned long port, const void *addr, unsigned long count)
99{
100 volatile __u16 *p = port2adr(port);
101 const __u16 *ap = addr;
102 while (count--)
103 *p = *ap++;
104}
diff --git a/arch/sh/boards/mach-se/7206/irq.c b/arch/sh/boards/mach-se/7206/irq.c
index 883b21eacaa..d961949600f 100644
--- a/arch/sh/boards/mach-se/7206/irq.c
+++ b/arch/sh/boards/mach-se/7206/irq.c
@@ -139,11 +139,13 @@ void __init init_se7206_IRQ(void)
139 make_se7206_irq(IRQ0_IRQ); /* SMC91C111 */ 139 make_se7206_irq(IRQ0_IRQ); /* SMC91C111 */
140 make_se7206_irq(IRQ1_IRQ); /* ATA */ 140 make_se7206_irq(IRQ1_IRQ); /* ATA */
141 make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */ 141 make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */
142 __raw_writew(inw(INTC_ICR1) | 0x000b ,INTC_ICR1 ) ; /* ICR1 */ 142
143 __raw_writew(__raw_readw(INTC_ICR1) | 0x000b, INTC_ICR); /* ICR1 */
143 144
144 /* FPGA System register setup*/ 145 /* FPGA System register setup*/
145 __raw_writew(0x0000,INTSTS0); /* Clear INTSTS0 */ 146 __raw_writew(0x0000,INTSTS0); /* Clear INTSTS0 */
146 __raw_writew(0x0000,INTSTS1); /* Clear INTSTS1 */ 147 __raw_writew(0x0000,INTSTS1); /* Clear INTSTS1 */
148
147 /* IRQ0=LAN, IRQ1=ATA, IRQ3=SLT,PCM */ 149 /* IRQ0=LAN, IRQ1=ATA, IRQ3=SLT,PCM */
148 __raw_writew(0x0001,INTSEL); 150 __raw_writew(0x0001,INTSEL);
149} 151}
diff --git a/arch/sh/boards/mach-se/7206/setup.c b/arch/sh/boards/mach-se/7206/setup.c
index 8f5c65d43d1..7f4871c71a0 100644
--- a/arch/sh/boards/mach-se/7206/setup.c
+++ b/arch/sh/boards/mach-se/7206/setup.c
@@ -86,20 +86,5 @@ __initcall(se7206_devices_setup);
86static struct sh_machine_vector mv_se __initmv = { 86static struct sh_machine_vector mv_se __initmv = {
87 .mv_name = "SolutionEngine", 87 .mv_name = "SolutionEngine",
88 .mv_nr_irqs = 256, 88 .mv_nr_irqs = 256,
89 .mv_inb = se7206_inb,
90 .mv_inw = se7206_inw,
91 .mv_outb = se7206_outb,
92 .mv_outw = se7206_outw,
93
94 .mv_inb_p = se7206_inb_p,
95 .mv_inw_p = se7206_inw,
96 .mv_outb_p = se7206_outb_p,
97 .mv_outw_p = se7206_outw,
98
99 .mv_insb = se7206_insb,
100 .mv_insw = se7206_insw,
101 .mv_outsb = se7206_outsb,
102 .mv_outsw = se7206_outsw,
103
104 .mv_init_irq = init_se7206_IRQ, 89 .mv_init_irq = init_se7206_IRQ,
105}; 90};
diff --git a/arch/sh/boards/mach-se/770x/Makefile b/arch/sh/boards/mach-se/770x/Makefile
index 8e624b06d5e..43ea14feef5 100644
--- a/arch/sh/boards/mach-se/770x/Makefile
+++ b/arch/sh/boards/mach-se/770x/Makefile
@@ -2,4 +2,4 @@
2# Makefile for the 770x SolutionEngine specific parts of the kernel 2# Makefile for the 770x SolutionEngine specific parts of the kernel
3# 3#
4 4
5obj-y := setup.o io.o irq.o 5obj-y := setup.o irq.o
diff --git a/arch/sh/boards/mach-se/770x/io.c b/arch/sh/boards/mach-se/770x/io.c
deleted file mode 100644
index 28833c8786e..00000000000
--- a/arch/sh/boards/mach-se/770x/io.c
+++ /dev/null
@@ -1,156 +0,0 @@
1/*
2 * Copyright (C) 2000 Kazumoto Kojima
3 *
4 * I/O routine for Hitachi SolutionEngine.
5 */
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <asm/io.h>
9#include <mach-se/mach/se.h>
10
11/* MS7750 requires special versions of in*, out* routines, since
12 PC-like io ports are located at upper half byte of 16-bit word which
13 can be accessed only with 16-bit wide. */
14
15static inline volatile __u16 *
16port2adr(unsigned int port)
17{
18 if (port & 0xff000000)
19 return ( volatile __u16 *) port;
20 if (port >= 0x2000)
21 return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
22 else if (port >= 0x1000)
23 return (volatile __u16 *) (PA_83902 + (port << 1));
24 else
25 return (volatile __u16 *) (PA_SUPERIO + (port << 1));
26}
27
28static inline int
29shifted_port(unsigned long port)
30{
31 /* For IDE registers, value is not shifted */
32 if ((0x1f0 <= port && port < 0x1f8) || port == 0x3f6)
33 return 0;
34 else
35 return 1;
36}
37
38unsigned char se_inb(unsigned long port)
39{
40 if (shifted_port(port))
41 return (*port2adr(port) >> 8);
42 else
43 return (*port2adr(port))&0xff;
44}
45
46unsigned char se_inb_p(unsigned long port)
47{
48 unsigned long v;
49
50 if (shifted_port(port))
51 v = (*port2adr(port) >> 8);
52 else
53 v = (*port2adr(port))&0xff;
54 ctrl_delay();
55 return v;
56}
57
58unsigned short se_inw(unsigned long port)
59{
60 if (port >= 0x2000)
61 return *port2adr(port);
62 else
63 maybebadio(port);
64 return 0;
65}
66
67unsigned int se_inl(unsigned long port)
68{
69 maybebadio(port);
70 return 0;
71}
72
73void se_outb(unsigned char value, unsigned long port)
74{
75 if (shifted_port(port))
76 *(port2adr(port)) = value << 8;
77 else
78 *(port2adr(port)) = value;
79}
80
81void se_outb_p(unsigned char value, unsigned long port)
82{
83 if (shifted_port(port))
84 *(port2adr(port)) = value << 8;
85 else
86 *(port2adr(port)) = value;
87 ctrl_delay();
88}
89
90void se_outw(unsigned short value, unsigned long port)
91{
92 if (port >= 0x2000)
93 *port2adr(port) = value;
94 else
95 maybebadio(port);
96}
97
98void se_outl(unsigned int value, unsigned long port)
99{
100 maybebadio(port);
101}
102
103void se_insb(unsigned long port, void *addr, unsigned long count)
104{
105 volatile __u16 *p = port2adr(port);
106 __u8 *ap = addr;
107
108 if (shifted_port(port)) {
109 while (count--)
110 *ap++ = *p >> 8;
111 } else {
112 while (count--)
113 *ap++ = *p;
114 }
115}
116
117void se_insw(unsigned long port, void *addr, unsigned long count)
118{
119 volatile __u16 *p = port2adr(port);
120 __u16 *ap = addr;
121 while (count--)
122 *ap++ = *p;
123}
124
125void se_insl(unsigned long port, void *addr, unsigned long count)
126{
127 maybebadio(port);
128}
129
130void se_outsb(unsigned long port, const void *addr, unsigned long count)
131{
132 volatile __u16 *p = port2adr(port);
133 const __u8 *ap = addr;
134
135 if (shifted_port(port)) {
136 while (count--)
137 *p = *ap++ << 8;
138 } else {
139 while (count--)
140 *p = *ap++;
141 }
142}
143
144void se_outsw(unsigned long port, const void *addr, unsigned long count)
145{
146 volatile __u16 *p = port2adr(port);
147 const __u16 *ap = addr;
148
149 while (count--)
150 *p = *ap++;
151}
152
153void se_outsl(unsigned long port, const void *addr, unsigned long count)
154{
155 maybebadio(port);
156}
diff --git a/arch/sh/boards/mach-se/770x/setup.c b/arch/sh/boards/mach-se/770x/setup.c
index 66d39d1b090..31330c65c0c 100644
--- a/arch/sh/boards/mach-se/770x/setup.c
+++ b/arch/sh/boards/mach-se/770x/setup.c
@@ -195,27 +195,5 @@ static struct sh_machine_vector mv_se __initmv = {
195#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) 195#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
196 .mv_nr_irqs = 104, 196 .mv_nr_irqs = 104,
197#endif 197#endif
198
199 .mv_inb = se_inb,
200 .mv_inw = se_inw,
201 .mv_inl = se_inl,
202 .mv_outb = se_outb,
203 .mv_outw = se_outw,
204 .mv_outl = se_outl,
205
206 .mv_inb_p = se_inb_p,
207 .mv_inw_p = se_inw,
208 .mv_inl_p = se_inl,
209 .mv_outb_p = se_outb_p,
210 .mv_outw_p = se_outw,
211 .mv_outl_p = se_outl,
212
213 .mv_insb = se_insb,
214 .mv_insw = se_insw,
215 .mv_insl = se_insl,
216 .mv_outsb = se_outsb,
217 .mv_outsw = se_outsw,
218 .mv_outsl = se_outsl,
219
220 .mv_init_irq = init_se_IRQ, 198 .mv_init_irq = init_se_IRQ,
221}; 199};
diff --git a/arch/sh/boards/mach-se/7751/Makefile b/arch/sh/boards/mach-se/7751/Makefile
index e6f4341bfe6..a338fd9d503 100644
--- a/arch/sh/boards/mach-se/7751/Makefile
+++ b/arch/sh/boards/mach-se/7751/Makefile
@@ -2,4 +2,4 @@
2# Makefile for the 7751 SolutionEngine specific parts of the kernel 2# Makefile for the 7751 SolutionEngine specific parts of the kernel
3# 3#
4 4
5obj-y := setup.o io.o irq.o 5obj-y := setup.o irq.o
diff --git a/arch/sh/boards/mach-se/7751/io.c b/arch/sh/boards/mach-se/7751/io.c
deleted file mode 100644
index 6e75bd4459e..00000000000
--- a/arch/sh/boards/mach-se/7751/io.c
+++ /dev/null
@@ -1,119 +0,0 @@
1/*
2 * Copyright (C) 2001 Ian da Silva, Jeremy Siegel
3 * Based largely on io_se.c.
4 *
5 * I/O routine for Hitachi 7751 SolutionEngine.
6 *
7 * Initial version only to support LAN access; some
8 * placeholder code from io_se.c left in with the
9 * expectation of later SuperIO and PCMCIA access.
10 */
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/pci.h>
14#include <asm/io.h>
15#include <mach-se/mach/se7751.h>
16#include <asm/addrspace.h>
17
18static inline volatile u16 *port2adr(unsigned int port)
19{
20 if (port >= 0x2000)
21 return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
22 maybebadio((unsigned long)port);
23 return (volatile __u16*)port;
24}
25
26/*
27 * General outline: remap really low stuff [eventually] to SuperIO,
28 * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
29 * is mapped through the PCI IO window. Stuff with high bits (PXSEG)
30 * should be way beyond the window, and is used w/o translation for
31 * compatibility.
32 */
33unsigned char sh7751se_inb(unsigned long port)
34{
35 if (PXSEG(port))
36 return *(volatile unsigned char *)port;
37 else
38 return (*port2adr(port)) & 0xff;
39}
40
41unsigned char sh7751se_inb_p(unsigned long port)
42{
43 unsigned char v;
44
45 if (PXSEG(port))
46 v = *(volatile unsigned char *)port;
47 else
48 v = (*port2adr(port)) & 0xff;
49 ctrl_delay();
50 return v;
51}
52
53unsigned short sh7751se_inw(unsigned long port)
54{
55 if (PXSEG(port))
56 return *(volatile unsigned short *)port;
57 else if (port >= 0x2000)
58 return *port2adr(port);
59 else
60 maybebadio(port);
61 return 0;
62}
63
64unsigned int sh7751se_inl(unsigned long port)
65{
66 if (PXSEG(port))
67 return *(volatile unsigned long *)port;
68 else if (port >= 0x2000)
69 return *port2adr(port);
70 else
71 maybebadio(port);
72 return 0;
73}
74
75void sh7751se_outb(unsigned char value, unsigned long port)
76{
77
78 if (PXSEG(port))
79 *(volatile unsigned char *)port = value;
80 else
81 *(port2adr(port)) = value;
82}
83
84void sh7751se_outb_p(unsigned char value, unsigned long port)
85{
86 if (PXSEG(port))
87 *(volatile unsigned char *)port = value;
88 else
89 *(port2adr(port)) = value;
90 ctrl_delay();
91}
92
93void sh7751se_outw(unsigned short value, unsigned long port)
94{
95 if (PXSEG(port))
96 *(volatile unsigned short *)port = value;
97 else if (port >= 0x2000)
98 *port2adr(port) = value;
99 else
100 maybebadio(port);
101}
102
103void sh7751se_outl(unsigned int value, unsigned long port)
104{
105 if (PXSEG(port))
106 *(volatile unsigned long *)port = value;
107 else
108 maybebadio(port);
109}
110
111void sh7751se_insl(unsigned long port, void *addr, unsigned long count)
112{
113 maybebadio(port);
114}
115
116void sh7751se_outsl(unsigned long port, const void *addr, unsigned long count)
117{
118 maybebadio(port);
119}
diff --git a/arch/sh/boards/mach-se/7751/setup.c b/arch/sh/boards/mach-se/7751/setup.c
index 50572512e3e..9fbc51beb18 100644
--- a/arch/sh/boards/mach-se/7751/setup.c
+++ b/arch/sh/boards/mach-se/7751/setup.c
@@ -56,23 +56,5 @@ __initcall(se7751_devices_setup);
56static struct sh_machine_vector mv_7751se __initmv = { 56static struct sh_machine_vector mv_7751se __initmv = {
57 .mv_name = "7751 SolutionEngine", 57 .mv_name = "7751 SolutionEngine",
58 .mv_nr_irqs = 72, 58 .mv_nr_irqs = 72,
59
60 .mv_inb = sh7751se_inb,
61 .mv_inw = sh7751se_inw,
62 .mv_inl = sh7751se_inl,
63 .mv_outb = sh7751se_outb,
64 .mv_outw = sh7751se_outw,
65 .mv_outl = sh7751se_outl,
66
67 .mv_inb_p = sh7751se_inb_p,
68 .mv_inw_p = sh7751se_inw,
69 .mv_inl_p = sh7751se_inl,
70 .mv_outb_p = sh7751se_outb_p,
71 .mv_outw_p = sh7751se_outw,
72 .mv_outl_p = sh7751se_outl,
73
74 .mv_insl = sh7751se_insl,
75 .mv_outsl = sh7751se_outsl,
76
77 .mv_init_irq = init_7751se_IRQ, 59 .mv_init_irq = init_7751se_IRQ,
78}; 60};
diff --git a/arch/sh/boards/mach-snapgear/Makefile b/arch/sh/boards/mach-snapgear/Makefile
deleted file mode 100644
index d2d2f4b6a50..00000000000
--- a/arch/sh/boards/mach-snapgear/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
1#
2# Makefile for the SnapGear specific parts of the kernel
3#
4
5obj-y := setup.o io.o
diff --git a/arch/sh/boards/mach-snapgear/io.c b/arch/sh/boards/mach-snapgear/io.c
deleted file mode 100644
index 476650e42db..00000000000
--- a/arch/sh/boards/mach-snapgear/io.c
+++ /dev/null
@@ -1,121 +0,0 @@
1/*
2 * Copyright (C) 2002 David McCullough <davidm@snapgear.com>
3 * Copyright (C) 2001 Ian da Silva, Jeremy Siegel
4 * Based largely on io_se.c.
5 *
6 * I/O routine for Hitachi 7751 SolutionEngine.
7 *
8 * Initial version only to support LAN access; some
9 * placeholder code from io_se.c left in with the
10 * expectation of later SuperIO and PCMCIA access.
11 */
12#include <linux/kernel.h>
13#include <linux/types.h>
14#include <linux/pci.h>
15#include <asm/io.h>
16#include <asm/addrspace.h>
17
18#ifdef CONFIG_SH_SECUREEDGE5410
19unsigned short secureedge5410_ioport;
20#endif
21
22static inline volatile __u16 *port2adr(unsigned int port)
23{
24 maybebadio((unsigned long)port);
25 return (volatile __u16*)port;
26}
27
28/*
29 * General outline: remap really low stuff [eventually] to SuperIO,
30 * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
31 * is mapped through the PCI IO window. Stuff with high bits (PXSEG)
32 * should be way beyond the window, and is used w/o translation for
33 * compatibility.
34 */
35unsigned char snapgear_inb(unsigned long port)
36{
37 if (PXSEG(port))
38 return *(volatile unsigned char *)port;
39 else
40 return (*port2adr(port)) & 0xff;
41}
42
43unsigned char snapgear_inb_p(unsigned long port)
44{
45 unsigned char v;
46
47 if (PXSEG(port))
48 v = *(volatile unsigned char *)port;
49 else
50 v = (*port2adr(port))&0xff;
51 ctrl_delay();
52 return v;
53}
54
55unsigned short snapgear_inw(unsigned long port)
56{
57 if (PXSEG(port))
58 return *(volatile unsigned short *)port;
59 else if (port >= 0x2000)
60 return *port2adr(port);
61 else
62 maybebadio(port);
63 return 0;
64}
65
66unsigned int snapgear_inl(unsigned long port)
67{
68 if (PXSEG(port))
69 return *(volatile unsigned long *)port;
70 else if (port >= 0x2000)
71 return *port2adr(port);
72 else
73 maybebadio(port);
74 return 0;
75}
76
77void snapgear_outb(unsigned char value, unsigned long port)
78{
79
80 if (PXSEG(port))
81 *(volatile unsigned char *)port = value;
82 else
83 *(port2adr(port)) = value;
84}
85
86void snapgear_outb_p(unsigned char value, unsigned long port)
87{
88 if (PXSEG(port))
89 *(volatile unsigned char *)port = value;
90 else
91 *(port2adr(port)) = value;
92 ctrl_delay();
93}
94
95void snapgear_outw(unsigned short value, unsigned long port)
96{
97 if (PXSEG(port))
98 *(volatile unsigned short *)port = value;
99 else if (port >= 0x2000)
100 *port2adr(port) = value;
101 else
102 maybebadio(port);
103}
104
105void snapgear_outl(unsigned int value, unsigned long port)
106{
107 if (PXSEG(port))
108 *(volatile unsigned long *)port = value;
109 else
110 maybebadio(port);
111}
112
113void snapgear_insl(unsigned long port, void *addr, unsigned long count)
114{
115 maybebadio(port);
116}
117
118void snapgear_outsl(unsigned long port, const void *addr, unsigned long count)
119{
120 maybebadio(port);
121}
diff --git a/arch/sh/boards/mach-systemh/Makefile b/arch/sh/boards/mach-systemh/Makefile
deleted file mode 100644
index 2cc6a23d9d3..00000000000
--- a/arch/sh/boards/mach-systemh/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
1#
2# Makefile for the SystemH specific parts of the kernel
3#
4
5obj-y := setup.o irq.o io.o
6
7# XXX: This wants to be consolidated in arch/sh/drivers/pci, and more
8# importantly, with the generic sh7751_pcic_init() code. For now, we'll
9# just abuse the hell out of kbuild, because we can..
10
11obj-$(CONFIG_PCI) += pci.o
12pci-y := ../../se/7751/pci.o
13
diff --git a/arch/sh/boards/mach-systemh/io.c b/arch/sh/boards/mach-systemh/io.c
deleted file mode 100644
index 15577ff1f71..00000000000
--- a/arch/sh/boards/mach-systemh/io.c
+++ /dev/null
@@ -1,158 +0,0 @@
1/*
2 * linux/arch/sh/boards/renesas/systemh/io.c
3 *
4 * Copyright (C) 2001 Ian da Silva, Jeremy Siegel
5 * Based largely on io_se.c.
6 *
7 * I/O routine for Hitachi 7751 Systemh.
8 */
9#include <linux/kernel.h>
10#include <linux/types.h>
11#include <linux/pci.h>
12#include <mach/systemh7751.h>
13#include <asm/addrspace.h>
14#include <asm/io.h>
15
16#define ETHER_IOMAP(adr) (0xB3000000 + (adr)) /*map to 16bits access area
17 of smc lan chip*/
18static inline volatile __u16 *
19port2adr(unsigned int port)
20{
21 if (port >= 0x2000)
22 return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
23 maybebadio((unsigned long)port);
24 return (volatile __u16*)port;
25}
26
27/*
28 * General outline: remap really low stuff [eventually] to SuperIO,
29 * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
30 * is mapped through the PCI IO window. Stuff with high bits (PXSEG)
31 * should be way beyond the window, and is used w/o translation for
32 * compatibility.
33 */
34unsigned char sh7751systemh_inb(unsigned long port)
35{
36 if (PXSEG(port))
37 return *(volatile unsigned char *)port;
38 else if (port <= 0x3F1)
39 return *(volatile unsigned char *)ETHER_IOMAP(port);
40 else
41 return (*port2adr(port))&0xff;
42}
43
44unsigned char sh7751systemh_inb_p(unsigned long port)
45{
46 unsigned char v;
47
48 if (PXSEG(port))
49 v = *(volatile unsigned char *)port;
50 else if (port <= 0x3F1)
51 v = *(volatile unsigned char *)ETHER_IOMAP(port);
52 else
53 v = (*port2adr(port))&0xff;
54 ctrl_delay();
55 return v;
56}
57
58unsigned short sh7751systemh_inw(unsigned long port)
59{
60 if (PXSEG(port))
61 return *(volatile unsigned short *)port;
62 else if (port >= 0x2000)
63 return *port2adr(port);
64 else if (port <= 0x3F1)
65 return *(volatile unsigned int *)ETHER_IOMAP(port);
66 else
67 maybebadio(port);
68 return 0;
69}
70
71unsigned int sh7751systemh_inl(unsigned long port)
72{
73 if (PXSEG(port))
74 return *(volatile unsigned long *)port;
75 else if (port >= 0x2000)
76 return *port2adr(port);
77 else if (port <= 0x3F1)
78 return *(volatile unsigned int *)ETHER_IOMAP(port);
79 else
80 maybebadio(port);
81 return 0;
82}
83
84void sh7751systemh_outb(unsigned char value, unsigned long port)
85{
86
87 if (PXSEG(port))
88 *(volatile unsigned char *)port = value;
89 else if (port <= 0x3F1)
90 *(volatile unsigned char *)ETHER_IOMAP(port) = value;
91 else
92 *(port2adr(port)) = value;
93}
94
95void sh7751systemh_outb_p(unsigned char value, unsigned long port)
96{
97 if (PXSEG(port))
98 *(volatile unsigned char *)port = value;
99 else if (port <= 0x3F1)
100 *(volatile unsigned char *)ETHER_IOMAP(port) = value;
101 else
102 *(port2adr(port)) = value;
103 ctrl_delay();
104}
105
106void sh7751systemh_outw(unsigned short value, unsigned long port)
107{
108 if (PXSEG(port))
109 *(volatile unsigned short *)port = value;
110 else if (port >= 0x2000)
111 *port2adr(port) = value;
112 else if (port <= 0x3F1)
113 *(volatile unsigned short *)ETHER_IOMAP(port) = value;
114 else
115 maybebadio(port);
116}
117
118void sh7751systemh_outl(unsigned int value, unsigned long port)
119{
120 if (PXSEG(port))
121 *(volatile unsigned long *)port = value;
122 else
123 maybebadio(port);
124}
125
126void sh7751systemh_insb(unsigned long port, void *addr, unsigned long count)
127{
128 unsigned char *p = addr;
129 while (count--) *p++ = sh7751systemh_inb(port);
130}
131
132void sh7751systemh_insw(unsigned long port, void *addr, unsigned long count)
133{
134 unsigned short *p = addr;
135 while (count--) *p++ = sh7751systemh_inw(port);
136}
137
138void sh7751systemh_insl(unsigned long port, void *addr, unsigned long count)
139{
140 maybebadio(port);
141}
142
143void sh7751systemh_outsb(unsigned long port, const void *addr, unsigned long count)
144{
145 unsigned char *p = (unsigned char*)addr;
146 while (count--) sh7751systemh_outb(*p++, port);
147}
148
149void sh7751systemh_outsw(unsigned long port, const void *addr, unsigned long count)
150{
151 unsigned short *p = (unsigned short*)addr;
152 while (count--) sh7751systemh_outw(*p++, port);
153}
154
155void sh7751systemh_outsl(unsigned long port, const void *addr, unsigned long count)
156{
157 maybebadio(port);
158}
diff --git a/arch/sh/boards/mach-systemh/irq.c b/arch/sh/boards/mach-systemh/irq.c
deleted file mode 100644
index e5ee13adeff..00000000000
--- a/arch/sh/boards/mach-systemh/irq.c
+++ /dev/null
@@ -1,61 +0,0 @@
1/*
2 * linux/arch/sh/boards/renesas/systemh/irq.c
3 *
4 * Copyright (C) 2000 Kazumoto Kojima
5 *
6 * Hitachi SystemH Support.
7 *
8 * Modified for 7751 SystemH by
9 * Jonathan Short.
10 */
11
12#include <linux/init.h>
13#include <linux/irq.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16
17#include <mach/systemh7751.h>
18#include <asm/smc37c93x.h>
19
20/* address of external interrupt mask register
21 * address must be set prior to use these (maybe in init_XXX_irq())
22 * XXX : is it better to use .config than specifying it in code? */
23static unsigned long *systemh_irq_mask_register = (unsigned long *)0xB3F10004;
24static unsigned long *systemh_irq_request_register = (unsigned long *)0xB3F10000;
25
26static void disable_systemh_irq(struct irq_data *data)
27{
28 unsigned long val, mask = 0x01 << 1;
29
30 /* Clear the "irq"th bit in the mask and set it in the request */
31 val = __raw_readl((unsigned long)systemh_irq_mask_register);
32 val &= ~mask;
33 __raw_writel(val, (unsigned long)systemh_irq_mask_register);
34
35 val = __raw_readl((unsigned long)systemh_irq_request_register);
36 val |= mask;
37 __raw_writel(val, (unsigned long)systemh_irq_request_register);
38}
39
40static void enable_systemh_irq(struct irq_data *data)
41{
42 unsigned long val, mask = 0x01 << 1;
43
44 /* Set "irq"th bit in the mask register */
45 val = __raw_readl((unsigned long)systemh_irq_mask_register);
46 val |= mask;
47 __raw_writel(val, (unsigned long)systemh_irq_mask_register);
48}
49
50static struct irq_chip systemh_irq_type = {
51 .name = "SystemH Register",
52 .irq_unmask = enable_systemh_irq,
53 .irq_mask = disable_systemh_irq,
54};
55
56void make_systemh_irq(unsigned int irq)
57{
58 disable_irq_nosync(irq);
59 set_irq_chip_and_handler(irq, &systemh_irq_type, handle_level_irq);
60 disable_systemh_irq(irq_get_irq_data(irq));
61}
diff --git a/arch/sh/boards/mach-systemh/setup.c b/arch/sh/boards/mach-systemh/setup.c
deleted file mode 100644
index 219fd800a43..00000000000
--- a/arch/sh/boards/mach-systemh/setup.c
+++ /dev/null
@@ -1,57 +0,0 @@
1/*
2 * linux/arch/sh/boards/renesas/systemh/setup.c
3 *
4 * Copyright (C) 2000 Kazumoto Kojima
5 * Copyright (C) 2003 Paul Mundt
6 *
7 * Hitachi SystemH Support.
8 *
9 * Modified for 7751 SystemH by Jonathan Short.
10 *
11 * Rewritten for 2.6 by Paul Mundt.
12 *
13 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
15 * for more details.
16 */
17#include <linux/init.h>
18#include <asm/machvec.h>
19#include <mach/systemh7751.h>
20
21extern void make_systemh_irq(unsigned int irq);
22
23/*
24 * Initialize IRQ setting
25 */
26static void __init sh7751systemh_init_irq(void)
27{
28 make_systemh_irq(0xb); /* Ethernet interrupt */
29}
30
31static struct sh_machine_vector mv_7751systemh __initmv = {
32 .mv_name = "7751 SystemH",
33 .mv_nr_irqs = 72,
34
35 .mv_inb = sh7751systemh_inb,
36 .mv_inw = sh7751systemh_inw,
37 .mv_inl = sh7751systemh_inl,
38 .mv_outb = sh7751systemh_outb,
39 .mv_outw = sh7751systemh_outw,
40 .mv_outl = sh7751systemh_outl,
41
42 .mv_inb_p = sh7751systemh_inb_p,
43 .mv_inw_p = sh7751systemh_inw,
44 .mv_inl_p = sh7751systemh_inl,
45 .mv_outb_p = sh7751systemh_outb_p,
46 .mv_outw_p = sh7751systemh_outw,
47 .mv_outl_p = sh7751systemh_outl,
48
49 .mv_insb = sh7751systemh_insb,
50 .mv_insw = sh7751systemh_insw,
51 .mv_insl = sh7751systemh_insl,
52 .mv_outsb = sh7751systemh_outsb,
53 .mv_outsw = sh7751systemh_outsw,
54 .mv_outsl = sh7751systemh_outsl,
55
56 .mv_init_irq = sh7751systemh_init_irq,
57};
diff --git a/arch/sh/configs/snapgear_defconfig b/arch/sh/configs/secureedge5410_defconfig
index 7eae4e59d7f..7eae4e59d7f 100644
--- a/arch/sh/configs/snapgear_defconfig
+++ b/arch/sh/configs/secureedge5410_defconfig
diff --git a/arch/sh/configs/systemh_defconfig b/arch/sh/configs/systemh_defconfig
deleted file mode 100644
index b58dfc505ef..00000000000
--- a/arch/sh/configs/systemh_defconfig
+++ /dev/null
@@ -1,28 +0,0 @@
1CONFIG_EXPERIMENTAL=y
2CONFIG_LOG_BUF_SHIFT=14
3CONFIG_BLK_DEV_INITRD=y
4# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
5# CONFIG_SYSCTL_SYSCALL is not set
6# CONFIG_HOTPLUG is not set
7CONFIG_SLAB=y
8CONFIG_MODULES=y
9CONFIG_MODULE_UNLOAD=y
10# CONFIG_BLK_DEV_BSG is not set
11CONFIG_CPU_SUBTYPE_SH7751R=y
12CONFIG_MEMORY_START=0x0c000000
13CONFIG_MEMORY_SIZE=0x00400000
14CONFIG_FLATMEM_MANUAL=y
15CONFIG_SH_7751_SYSTEMH=y
16CONFIG_PREEMPT=y
17# CONFIG_STANDALONE is not set
18CONFIG_BLK_DEV_RAM=y
19CONFIG_BLK_DEV_RAM_SIZE=1024
20# CONFIG_INPUT is not set
21# CONFIG_SERIO_SERPORT is not set
22# CONFIG_VT is not set
23CONFIG_HW_RANDOM=y
24CONFIG_PROC_KCORE=y
25CONFIG_TMPFS=y
26CONFIG_CRAMFS=y
27CONFIG_ROMFS_FS=y
28# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h
index 446b3831c21..3d1ae2bfaa6 100644
--- a/arch/sh/include/asm/addrspace.h
+++ b/arch/sh/include/asm/addrspace.h
@@ -44,10 +44,10 @@
44/* 44/*
45 * These will never work in 32-bit, don't even bother. 45 * These will never work in 32-bit, don't even bother.
46 */ 46 */
47#define P1SEGADDR(a) __futile_remapping_attempt 47#define P1SEGADDR(a) ({ (void)(a); BUG(); NULL; })
48#define P2SEGADDR(a) __futile_remapping_attempt 48#define P2SEGADDR(a) ({ (void)(a); BUG(); NULL; })
49#define P3SEGADDR(a) __futile_remapping_attempt 49#define P3SEGADDR(a) ({ (void)(a); BUG(); NULL; })
50#define P4SEGADDR(a) __futile_remapping_attempt 50#define P4SEGADDR(a) ({ (void)(a); BUG(); NULL; })
51#endif 51#endif
52#endif /* P1SEG */ 52#endif /* P1SEG */
53 53
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index a15f1058bbf..083ea068e81 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -66,7 +66,6 @@ static inline unsigned long long neff_sign_extend(unsigned long val)
66#define PHYS_ADDR_MASK29 0x1fffffff 66#define PHYS_ADDR_MASK29 0x1fffffff
67#define PHYS_ADDR_MASK32 0xffffffff 67#define PHYS_ADDR_MASK32 0xffffffff
68 68
69#ifdef CONFIG_PMB
70static inline unsigned long phys_addr_mask(void) 69static inline unsigned long phys_addr_mask(void)
71{ 70{
72 /* Is the MMU in 29bit mode? */ 71 /* Is the MMU in 29bit mode? */
@@ -75,17 +74,6 @@ static inline unsigned long phys_addr_mask(void)
75 74
76 return PHYS_ADDR_MASK32; 75 return PHYS_ADDR_MASK32;
77} 76}
78#elif defined(CONFIG_32BIT)
79static inline unsigned long phys_addr_mask(void)
80{
81 return PHYS_ADDR_MASK32;
82}
83#else
84static inline unsigned long phys_addr_mask(void)
85{
86 return PHYS_ADDR_MASK29;
87}
88#endif
89 77
90#define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK) 78#define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK)
91#define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT) 79#define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT)
diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h
index 1f1af5afff0..10c8b1823a1 100644
--- a/arch/sh/include/asm/system.h
+++ b/arch/sh/include/asm/system.h
@@ -10,6 +10,7 @@
10#include <linux/compiler.h> 10#include <linux/compiler.h>
11#include <linux/linkage.h> 11#include <linux/linkage.h>
12#include <asm/types.h> 12#include <asm/types.h>
13#include <asm/uncached.h>
13 14
14#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */ 15#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */
15 16
@@ -137,9 +138,6 @@ extern unsigned int instruction_size(unsigned int insn);
137#define instruction_size(insn) (4) 138#define instruction_size(insn) (4)
138#endif 139#endif
139 140
140extern unsigned long cached_to_uncached;
141extern unsigned long uncached_size;
142
143void per_cpu_trap_init(void); 141void per_cpu_trap_init(void);
144void default_idle(void); 142void default_idle(void);
145void cpu_idle_wait(void); 143void cpu_idle_wait(void);
diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h
index c941b273940..a4ad1cd9bc4 100644
--- a/arch/sh/include/asm/system_32.h
+++ b/arch/sh/include/asm/system_32.h
@@ -145,42 +145,6 @@ do { \
145 __restore_dsp(prev); \ 145 __restore_dsp(prev); \
146} while (0) 146} while (0)
147 147
148/*
149 * Jump to uncached area.
150 * When handling TLB or caches, we need to do it from an uncached area.
151 */
152#define jump_to_uncached() \
153do { \
154 unsigned long __dummy; \
155 \
156 __asm__ __volatile__( \
157 "mova 1f, %0\n\t" \
158 "add %1, %0\n\t" \
159 "jmp @%0\n\t" \
160 " nop\n\t" \
161 ".balign 4\n" \
162 "1:" \
163 : "=&z" (__dummy) \
164 : "r" (cached_to_uncached)); \
165} while (0)
166
167/*
168 * Back to cached area.
169 */
170#define back_to_cached() \
171do { \
172 unsigned long __dummy; \
173 ctrl_barrier(); \
174 __asm__ __volatile__( \
175 "mov.l 1f, %0\n\t" \
176 "jmp @%0\n\t" \
177 " nop\n\t" \
178 ".balign 4\n" \
179 "1: .long 2f\n" \
180 "2:" \
181 : "=&r" (__dummy)); \
182} while (0)
183
184#ifdef CONFIG_CPU_HAS_SR_RB 148#ifdef CONFIG_CPU_HAS_SR_RB
185#define lookup_exception_vector() \ 149#define lookup_exception_vector() \
186({ \ 150({ \
diff --git a/arch/sh/include/asm/system_64.h b/arch/sh/include/asm/system_64.h
index 36338646dfc..8593bc8d1a4 100644
--- a/arch/sh/include/asm/system_64.h
+++ b/arch/sh/include/asm/system_64.h
@@ -34,9 +34,6 @@ do { \
34 &next->thread); \ 34 &next->thread); \
35} while (0) 35} while (0)
36 36
37#define jump_to_uncached() do { } while (0)
38#define back_to_cached() do { } while (0)
39
40#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr)) 37#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
41#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr)) 38#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
42#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr)) 39#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
diff --git a/arch/sh/include/asm/uncached.h b/arch/sh/include/asm/uncached.h
index e3419f96626..6f8816b79cf 100644
--- a/arch/sh/include/asm/uncached.h
+++ b/arch/sh/include/asm/uncached.h
@@ -4,15 +4,55 @@
4#include <linux/bug.h> 4#include <linux/bug.h>
5 5
6#ifdef CONFIG_UNCACHED_MAPPING 6#ifdef CONFIG_UNCACHED_MAPPING
7extern unsigned long cached_to_uncached;
8extern unsigned long uncached_size;
7extern unsigned long uncached_start, uncached_end; 9extern unsigned long uncached_start, uncached_end;
8 10
9extern int virt_addr_uncached(unsigned long kaddr); 11extern int virt_addr_uncached(unsigned long kaddr);
10extern void uncached_init(void); 12extern void uncached_init(void);
11extern void uncached_resize(unsigned long size); 13extern void uncached_resize(unsigned long size);
14
15/*
16 * Jump to uncached area.
17 * When handling TLB or caches, we need to do it from an uncached area.
18 */
19#define jump_to_uncached() \
20do { \
21 unsigned long __dummy; \
22 \
23 __asm__ __volatile__( \
24 "mova 1f, %0\n\t" \
25 "add %1, %0\n\t" \
26 "jmp @%0\n\t" \
27 " nop\n\t" \
28 ".balign 4\n" \
29 "1:" \
30 : "=&z" (__dummy) \
31 : "r" (cached_to_uncached)); \
32} while (0)
33
34/*
35 * Back to cached area.
36 */
37#define back_to_cached() \
38do { \
39 unsigned long __dummy; \
40 ctrl_barrier(); \
41 __asm__ __volatile__( \
42 "mov.l 1f, %0\n\t" \
43 "jmp @%0\n\t" \
44 " nop\n\t" \
45 ".balign 4\n" \
46 "1: .long 2f\n" \
47 "2:" \
48 : "=&r" (__dummy)); \
49} while (0)
12#else 50#else
13#define virt_addr_uncached(kaddr) (0) 51#define virt_addr_uncached(kaddr) (0)
14#define uncached_init() do { } while (0) 52#define uncached_init() do { } while (0)
15#define uncached_resize(size) BUG() 53#define uncached_resize(size) BUG()
54#define jump_to_uncached() do { } while (0)
55#define back_to_cached() do { } while (0)
16#endif 56#endif
17 57
18#endif /* __ASM_SH_UNCACHED_H */ 58#endif /* __ASM_SH_UNCACHED_H */
diff --git a/arch/sh/include/mach-common/mach/edosk7705.h b/arch/sh/include/mach-common/mach/edosk7705.h
deleted file mode 100644
index efc43b32346..00000000000
--- a/arch/sh/include/mach-common/mach/edosk7705.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef __ASM_SH_EDOSK7705_H
2#define __ASM_SH_EDOSK7705_H
3
4#define __IO_PREFIX sh_edosk7705
5#include <asm/io_generic.h>
6
7#endif /* __ASM_SH_EDOSK7705_H */
diff --git a/arch/sh/include/mach-common/mach/microdev.h b/arch/sh/include/mach-common/mach/microdev.h
index 1aed15856e1..dcb05fa8c16 100644
--- a/arch/sh/include/mach-common/mach/microdev.h
+++ b/arch/sh/include/mach-common/mach/microdev.h
@@ -68,13 +68,4 @@ extern void microdev_print_fpga_intc_status(void);
68#define __IO_PREFIX microdev 68#define __IO_PREFIX microdev
69#include <asm/io_generic.h> 69#include <asm/io_generic.h>
70 70
71#if defined(CONFIG_PCI)
72unsigned char microdev_pci_inb(unsigned long port);
73unsigned short microdev_pci_inw(unsigned long port);
74unsigned long microdev_pci_inl(unsigned long port);
75void microdev_pci_outb(unsigned char data, unsigned long port);
76void microdev_pci_outw(unsigned short data, unsigned long port);
77void microdev_pci_outl(unsigned long data, unsigned long port);
78#endif
79
80#endif /* __ASM_SH_MICRODEV_H */ 71#endif /* __ASM_SH_MICRODEV_H */
diff --git a/arch/sh/include/mach-common/mach/snapgear.h b/arch/sh/include/mach-common/mach/secureedge5410.h
index 042d95f51c4..3653b9a4bac 100644
--- a/arch/sh/include/mach-common/mach/snapgear.h
+++ b/arch/sh/include/mach-common/mach/secureedge5410.h
@@ -12,30 +12,9 @@
12#ifndef _ASM_SH_IO_SNAPGEAR_H 12#ifndef _ASM_SH_IO_SNAPGEAR_H
13#define _ASM_SH_IO_SNAPGEAR_H 13#define _ASM_SH_IO_SNAPGEAR_H
14 14
15#if defined(CONFIG_CPU_SH4)
16/*
17 * The external interrupt lines, these take up ints 0 - 15 inclusive
18 * depending on the priority for the interrupt. In fact the priority
19 * is the interrupt :-)
20 */
21
22#define IRL0_IRQ 2
23#define IRL0_PRIORITY 13
24
25#define IRL1_IRQ 5
26#define IRL1_PRIORITY 10
27
28#define IRL2_IRQ 8
29#define IRL2_PRIORITY 7
30
31#define IRL3_IRQ 11
32#define IRL3_PRIORITY 4
33#endif
34
35#define __IO_PREFIX snapgear 15#define __IO_PREFIX snapgear
36#include <asm/io_generic.h> 16#include <asm/io_generic.h>
37 17
38#ifdef CONFIG_SH_SECUREEDGE5410
39/* 18/*
40 * We need to remember what was written to the ioport as some bits 19 * We need to remember what was written to the ioport as some bits
41 * are shared with other functions and you cannot read back what was 20 * are shared with other functions and you cannot read back what was
@@ -66,6 +45,5 @@ extern unsigned short secureedge5410_ioport;
66 ((secureedge5410_ioport & ~(mask)) | ((val) & (mask))))) 45 ((secureedge5410_ioport & ~(mask)) | ((val) & (mask)))))
67#define SECUREEDGE_READ_IOPORT() \ 46#define SECUREEDGE_READ_IOPORT() \
68 ((*SECUREEDGE_IOPORT_ADDR&0x0817) | (secureedge5410_ioport&~0x0817)) 47 ((*SECUREEDGE_IOPORT_ADDR&0x0817) | (secureedge5410_ioport&~0x0817))
69#endif
70 48
71#endif /* _ASM_SH_IO_SNAPGEAR_H */ 49#endif /* _ASM_SH_IO_SNAPGEAR_H */
diff --git a/arch/sh/include/mach-common/mach/systemh7751.h b/arch/sh/include/mach-common/mach/systemh7751.h
deleted file mode 100644
index 4161122c84e..00000000000
--- a/arch/sh/include/mach-common/mach/systemh7751.h
+++ /dev/null
@@ -1,71 +0,0 @@
1#ifndef __ASM_SH_SYSTEMH_7751SYSTEMH_H
2#define __ASM_SH_SYSTEMH_7751SYSTEMH_H
3
4/*
5 * linux/include/asm-sh/systemh/7751systemh.h
6 *
7 * Copyright (C) 2000 Kazumoto Kojima
8 *
9 * Hitachi SystemH support
10
11 * Modified for 7751 SystemH by
12 * Jonathan Short, 2002.
13 */
14
15/* Box specific addresses. */
16
17#define PA_ROM 0x00000000 /* EPROM */
18#define PA_ROM_SIZE 0x00400000 /* EPROM size 4M byte */
19#define PA_FROM 0x01000000 /* EPROM */
20#define PA_FROM_SIZE 0x00400000 /* EPROM size 4M byte */
21#define PA_EXT1 0x04000000
22#define PA_EXT1_SIZE 0x04000000
23#define PA_EXT2 0x08000000
24#define PA_EXT2_SIZE 0x04000000
25#define PA_SDRAM 0x0c000000
26#define PA_SDRAM_SIZE 0x04000000
27
28#define PA_EXT4 0x12000000
29#define PA_EXT4_SIZE 0x02000000
30#define PA_EXT5 0x14000000
31#define PA_EXT5_SIZE 0x04000000
32#define PA_PCIC 0x18000000 /* MR-SHPC-01 PCMCIA */
33
34#define PA_DIPSW0 0xb9000000 /* Dip switch 5,6 */
35#define PA_DIPSW1 0xb9000002 /* Dip switch 7,8 */
36#define PA_LED 0xba000000 /* LED */
37#define PA_BCR 0xbb000000 /* FPGA on the MS7751SE01 */
38
39#define PA_MRSHPC 0xb83fffe0 /* MR-SHPC-01 PCMCIA controller */
40#define PA_MRSHPC_MW1 0xb8400000 /* MR-SHPC-01 memory window base */
41#define PA_MRSHPC_MW2 0xb8500000 /* MR-SHPC-01 attribute window base */
42#define PA_MRSHPC_IO 0xb8600000 /* MR-SHPC-01 I/O window base */
43#define MRSHPC_MODE (PA_MRSHPC + 4)
44#define MRSHPC_OPTION (PA_MRSHPC + 6)
45#define MRSHPC_CSR (PA_MRSHPC + 8)
46#define MRSHPC_ISR (PA_MRSHPC + 10)
47#define MRSHPC_ICR (PA_MRSHPC + 12)
48#define MRSHPC_CPWCR (PA_MRSHPC + 14)
49#define MRSHPC_MW0CR1 (PA_MRSHPC + 16)
50#define MRSHPC_MW1CR1 (PA_MRSHPC + 18)
51#define MRSHPC_IOWCR1 (PA_MRSHPC + 20)
52#define MRSHPC_MW0CR2 (PA_MRSHPC + 22)
53#define MRSHPC_MW1CR2 (PA_MRSHPC + 24)
54#define MRSHPC_IOWCR2 (PA_MRSHPC + 26)
55#define MRSHPC_CDCR (PA_MRSHPC + 28)
56#define MRSHPC_PCIC_INFO (PA_MRSHPC + 30)
57
58#define BCR_ILCRA (PA_BCR + 0)
59#define BCR_ILCRB (PA_BCR + 2)
60#define BCR_ILCRC (PA_BCR + 4)
61#define BCR_ILCRD (PA_BCR + 6)
62#define BCR_ILCRE (PA_BCR + 8)
63#define BCR_ILCRF (PA_BCR + 10)
64#define BCR_ILCRG (PA_BCR + 12)
65
66#define IRQ_79C973 13
67
68#define __IO_PREFIX sh7751systemh
69#include <asm/io_generic.h>
70
71#endif /* __ASM_SH_SYSTEMH_7751SYSTEMH_H */
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
index 2d9700c6b53..0fe2e9329cb 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -48,7 +48,7 @@ static struct clk r_clk = {
48 * Default rate for the root input clock, reset this with clk_set_rate() 48 * Default rate for the root input clock, reset this with clk_set_rate()
49 * from the platform code. 49 * from the platform code.
50 */ 50 */
51struct clk extal_clk = { 51static struct clk extal_clk = {
52 .rate = 33333333, 52 .rate = 33333333,
53}; 53};
54 54
@@ -111,7 +111,7 @@ static struct clk div3_clk = {
111 .parent = &pll_clk, 111 .parent = &pll_clk,
112}; 112};
113 113
114struct clk *main_clks[] = { 114static struct clk *main_clks[] = {
115 &r_clk, 115 &r_clk,
116 &extal_clk, 116 &extal_clk,
117 &fll_clk, 117 &fll_clk,
@@ -156,7 +156,7 @@ struct clk div4_clks[DIV4_NR] = {
156 156
157enum { DIV6_V, DIV6_FA, DIV6_FB, DIV6_I, DIV6_S, DIV6_NR }; 157enum { DIV6_V, DIV6_FA, DIV6_FB, DIV6_I, DIV6_S, DIV6_NR };
158 158
159struct clk div6_clks[DIV6_NR] = { 159static struct clk div6_clks[DIV6_NR] = {
160 [DIV6_V] = SH_CLK_DIV6(&div3_clk, VCLKCR, 0), 160 [DIV6_V] = SH_CLK_DIV6(&div3_clk, VCLKCR, 0),
161 [DIV6_FA] = SH_CLK_DIV6(&div3_clk, FCLKACR, 0), 161 [DIV6_FA] = SH_CLK_DIV6(&div3_clk, FCLKACR, 0),
162 [DIV6_FB] = SH_CLK_DIV6(&div3_clk, FCLKBCR, 0), 162 [DIV6_FB] = SH_CLK_DIV6(&div3_clk, FCLKBCR, 0),
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 09370392aff..c3e61b36649 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -79,7 +79,7 @@ config 29BIT
79 79
80config 32BIT 80config 32BIT
81 bool 81 bool
82 default y if CPU_SH5 82 default y if CPU_SH5 || !MMU
83 83
84config PMB 84config PMB
85 bool "Support 32-bit physical addressing through PMB" 85 bool "Support 32-bit physical addressing through PMB"
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index 03879328699..40733a95240 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -79,21 +79,20 @@ void dma_generic_free_coherent(struct device *dev, size_t size,
79void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 79void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
80 enum dma_data_direction direction) 80 enum dma_data_direction direction)
81{ 81{
82#if defined(CONFIG_CPU_SH5) || defined(CONFIG_PMB) 82 void *addr;
83 void *p1addr = vaddr; 83
84#else 84 addr = __in_29bit_mode() ?
85 void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr); 85 (void *)P1SEGADDR((unsigned long)vaddr) : vaddr;
86#endif
87 86
88 switch (direction) { 87 switch (direction) {
89 case DMA_FROM_DEVICE: /* invalidate only */ 88 case DMA_FROM_DEVICE: /* invalidate only */
90 __flush_invalidate_region(p1addr, size); 89 __flush_invalidate_region(addr, size);
91 break; 90 break;
92 case DMA_TO_DEVICE: /* writeback only */ 91 case DMA_TO_DEVICE: /* writeback only */
93 __flush_wback_region(p1addr, size); 92 __flush_wback_region(addr, size);
94 break; 93 break;
95 case DMA_BIDIRECTIONAL: /* writeback and invalidate */ 94 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
96 __flush_purge_region(p1addr, size); 95 __flush_purge_region(addr, size);
97 break; 96 break;
98 default: 97 default:
99 BUG(); 98 BUG();
diff --git a/arch/sh/mm/uncached.c b/arch/sh/mm/uncached.c
index 8a4eca551fc..a7767da815e 100644
--- a/arch/sh/mm/uncached.c
+++ b/arch/sh/mm/uncached.c
@@ -28,7 +28,7 @@ EXPORT_SYMBOL(virt_addr_uncached);
28 28
29void __init uncached_init(void) 29void __init uncached_init(void)
30{ 30{
31#ifdef CONFIG_29BIT 31#if defined(CONFIG_29BIT) || !defined(CONFIG_MMU)
32 uncached_start = P2SEG; 32 uncached_start = P2SEG;
33#else 33#else
34 uncached_start = memory_end; 34 uncached_start = memory_end;
diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types
index 9f56eb97802..0e68465e7b5 100644
--- a/arch/sh/tools/mach-types
+++ b/arch/sh/tools/mach-types
@@ -26,7 +26,6 @@ HD64461 HD64461
267724SE SH_7724_SOLUTION_ENGINE 267724SE SH_7724_SOLUTION_ENGINE
277751SE SH_7751_SOLUTION_ENGINE 277751SE SH_7751_SOLUTION_ENGINE
287780SE SH_7780_SOLUTION_ENGINE 287780SE SH_7780_SOLUTION_ENGINE
297751SYSTEMH SH_7751_SYSTEMH
30HP6XX SH_HP6XX 29HP6XX SH_HP6XX
31DREAMCAST SH_DREAMCAST 30DREAMCAST SH_DREAMCAST
32SNAPGEAR SH_SECUREEDGE5410 31SNAPGEAR SH_SECUREEDGE5410
diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h
index e0f7ee18672..b2a6c5de79a 100644
--- a/arch/tile/include/asm/highmem.h
+++ b/arch/tile/include/asm/highmem.h
@@ -23,7 +23,6 @@
23 23
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/threads.h> 25#include <linux/threads.h>
26#include <asm/kmap_types.h>
27#include <asm/tlbflush.h> 26#include <asm/tlbflush.h>
28#include <asm/homecache.h> 27#include <asm/homecache.h>
29 28
diff --git a/arch/tile/include/asm/kmap_types.h b/arch/tile/include/asm/kmap_types.h
index 1480106d1c0..3d0f2024626 100644
--- a/arch/tile/include/asm/kmap_types.h
+++ b/arch/tile/include/asm/kmap_types.h
@@ -16,28 +16,42 @@
16#define _ASM_TILE_KMAP_TYPES_H 16#define _ASM_TILE_KMAP_TYPES_H
17 17
18/* 18/*
19 * In TILE Linux each set of four of these uses another 16MB chunk of 19 * In 32-bit TILE Linux we have to balance the desire to have a lot of
20 * address space, given 64 tiles and 64KB pages, so we only enable 20 * nested atomic mappings with the fact that large page sizes and many
21 * ones that are required by the kernel configuration. 21 * processors chew up address space quickly. In a typical
22 * 64-processor, 64KB-page layout build, making KM_TYPE_NR one larger
23 * adds 4MB of required address-space. For now we leave KM_TYPE_NR
24 * set to depth 8.
22 */ 25 */
23enum km_type { 26enum km_type {
27 KM_TYPE_NR = 8
28};
29
30/*
31 * We provide dummy definitions of all the stray values that used to be
32 * required for kmap_atomic() and no longer are.
33 */
34enum {
24 KM_BOUNCE_READ, 35 KM_BOUNCE_READ,
25 KM_SKB_SUNRPC_DATA, 36 KM_SKB_SUNRPC_DATA,
26 KM_SKB_DATA_SOFTIRQ, 37 KM_SKB_DATA_SOFTIRQ,
27 KM_USER0, 38 KM_USER0,
28 KM_USER1, 39 KM_USER1,
29 KM_BIO_SRC_IRQ, 40 KM_BIO_SRC_IRQ,
41 KM_BIO_DST_IRQ,
42 KM_PTE0,
43 KM_PTE1,
30 KM_IRQ0, 44 KM_IRQ0,
31 KM_IRQ1, 45 KM_IRQ1,
32 KM_SOFTIRQ0, 46 KM_SOFTIRQ0,
33 KM_SOFTIRQ1, 47 KM_SOFTIRQ1,
34 KM_MEMCPY0, 48 KM_SYNC_ICACHE,
35 KM_MEMCPY1, 49 KM_SYNC_DCACHE,
36#if defined(CONFIG_HIGHPTE) 50 KM_UML_USERCOPY,
37 KM_PTE0, 51 KM_IRQ_PTE,
38 KM_PTE1, 52 KM_NMI,
39#endif 53 KM_NMI_PTE,
40 KM_TYPE_NR 54 KM_KDB
41}; 55};
42 56
43#endif /* _ASM_TILE_KMAP_TYPES_H */ 57#endif /* _ASM_TILE_KMAP_TYPES_H */
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index dc4ccdd855b..a6604e9485d 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -344,10 +344,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
344#define pgd_offset_k(address) pgd_offset(&init_mm, address) 344#define pgd_offset_k(address) pgd_offset(&init_mm, address)
345 345
346#if defined(CONFIG_HIGHPTE) 346#if defined(CONFIG_HIGHPTE)
347extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type); 347extern pte_t *pte_offset_map(pmd_t *, unsigned long address);
348#define pte_offset_map(dir, address) \ 348#define pte_unmap(pte) kunmap_atomic(pte)
349 _pte_offset_map(dir, address, KM_PTE0)
350#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
351#else 349#else
352#define pte_offset_map(dir, address) pte_offset_kernel(dir, address) 350#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
353#define pte_unmap(pte) do { } while (0) 351#define pte_unmap(pte) do { } while (0)
diff --git a/arch/tile/include/asm/stat.h b/arch/tile/include/asm/stat.h
index 3dc90fa92c7..b16e5db8f0e 100644
--- a/arch/tile/include/asm/stat.h
+++ b/arch/tile/include/asm/stat.h
@@ -1 +1,4 @@
1#ifdef CONFIG_COMPAT
2#define __ARCH_WANT_STAT64 /* Used for compat_sys_stat64() etc. */
3#endif
1#include <asm-generic/stat.h> 4#include <asm-generic/stat.h>
diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h
index f2e3ff48533..b35c2db7119 100644
--- a/arch/tile/include/asm/unistd.h
+++ b/arch/tile/include/asm/unistd.h
@@ -41,6 +41,7 @@ __SYSCALL(__NR_cmpxchg_badaddr, sys_cmpxchg_badaddr)
41#ifdef CONFIG_COMPAT 41#ifdef CONFIG_COMPAT
42#define __ARCH_WANT_SYS_LLSEEK 42#define __ARCH_WANT_SYS_LLSEEK
43#endif 43#endif
44#define __ARCH_WANT_SYS_NEWFSTATAT
44#endif 45#endif
45 46
46#endif /* _ASM_TILE_UNISTD_H */ 47#endif /* _ASM_TILE_UNISTD_H */
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index 77739cdd946..67617a05e60 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -148,11 +148,11 @@ long tile_compat_sys_msgrcv(int msqid,
148#define compat_sys_readahead sys32_readahead 148#define compat_sys_readahead sys32_readahead
149#define compat_sys_sync_file_range compat_sys_sync_file_range2 149#define compat_sys_sync_file_range compat_sys_sync_file_range2
150 150
151/* The native 64-bit "struct stat" matches the 32-bit "struct stat64". */ 151/* We leverage the "struct stat64" type for 32-bit time_t/nsec. */
152#define compat_sys_stat64 sys_newstat 152#define compat_sys_stat64 sys_stat64
153#define compat_sys_lstat64 sys_newlstat 153#define compat_sys_lstat64 sys_lstat64
154#define compat_sys_fstat64 sys_newfstat 154#define compat_sys_fstat64 sys_fstat64
155#define compat_sys_fstatat64 sys_newfstatat 155#define compat_sys_fstatat64 sys_fstatat64
156 156
157/* The native sys_ptrace dynamically handles compat binaries. */ 157/* The native sys_ptrace dynamically handles compat binaries. */
158#define compat_sys_ptrace sys_ptrace 158#define compat_sys_ptrace sys_ptrace
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c
index 2c54fd43a8a..493a0e66d91 100644
--- a/arch/tile/kernel/early_printk.c
+++ b/arch/tile/kernel/early_printk.c
@@ -54,7 +54,7 @@ void early_printk(const char *fmt, ...)
54void early_panic(const char *fmt, ...) 54void early_panic(const char *fmt, ...)
55{ 55{
56 va_list ap; 56 va_list ap;
57 raw_local_irq_disable_all(); 57 arch_local_irq_disable_all();
58 va_start(ap, fmt); 58 va_start(ap, fmt);
59 early_printk("Kernel panic - not syncing: "); 59 early_printk("Kernel panic - not syncing: ");
60 early_vprintk(fmt, ap); 60 early_vprintk(fmt, ap);
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index 1e54a784341..e910530436e 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -151,12 +151,12 @@ enum direction_protect {
151 151
152static void enable_firewall_interrupts(void) 152static void enable_firewall_interrupts(void)
153{ 153{
154 raw_local_irq_unmask_now(INT_UDN_FIREWALL); 154 arch_local_irq_unmask_now(INT_UDN_FIREWALL);
155} 155}
156 156
157static void disable_firewall_interrupts(void) 157static void disable_firewall_interrupts(void)
158{ 158{
159 raw_local_irq_mask_now(INT_UDN_FIREWALL); 159 arch_local_irq_mask_now(INT_UDN_FIREWALL);
160} 160}
161 161
162/* Set up hardwall on this cpu based on the passed hardwall_info. */ 162/* Set up hardwall on this cpu based on the passed hardwall_info. */
@@ -768,13 +768,13 @@ static int hardwall_release(struct inode *inode, struct file *file)
768} 768}
769 769
770static const struct file_operations dev_hardwall_fops = { 770static const struct file_operations dev_hardwall_fops = {
771 .open = nonseekable_open,
771 .unlocked_ioctl = hardwall_ioctl, 772 .unlocked_ioctl = hardwall_ioctl,
772#ifdef CONFIG_COMPAT 773#ifdef CONFIG_COMPAT
773 .compat_ioctl = hardwall_compat_ioctl, 774 .compat_ioctl = hardwall_compat_ioctl,
774#endif 775#endif
775 .flush = hardwall_flush, 776 .flush = hardwall_flush,
776 .release = hardwall_release, 777 .release = hardwall_release,
777 .llseek = noop_llseek,
778}; 778};
779 779
780static struct cdev hardwall_dev; 780static struct cdev hardwall_dev;
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index e63917687e9..128805ef8f2 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -26,7 +26,7 @@
26#define IS_HW_CLEARED 1 26#define IS_HW_CLEARED 1
27 27
28/* 28/*
29 * The set of interrupts we enable for raw_local_irq_enable(). 29 * The set of interrupts we enable for arch_local_irq_enable().
30 * This is initialized to have just a single interrupt that the kernel 30 * This is initialized to have just a single interrupt that the kernel
31 * doesn't actually use as a sentinel. During kernel init, 31 * doesn't actually use as a sentinel. During kernel init,
32 * interrupts are added as the kernel gets prepared to support them. 32 * interrupts are added as the kernel gets prepared to support them.
@@ -225,7 +225,7 @@ void __cpuinit setup_irq_regs(void)
225 /* Enable interrupt delivery. */ 225 /* Enable interrupt delivery. */
226 unmask_irqs(~0UL); 226 unmask_irqs(~0UL);
227#if CHIP_HAS_IPI() 227#if CHIP_HAS_IPI()
228 raw_local_irq_unmask(INT_IPI_K); 228 arch_local_irq_unmask(INT_IPI_K);
229#endif 229#endif
230} 230}
231 231
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index ba7a265d617..0d8b9e93348 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -182,13 +182,13 @@ static void kexec_find_and_set_command_line(struct kimage *image)
182 182
183 if ((entry & IND_SOURCE)) { 183 if ((entry & IND_SOURCE)) {
184 void *va = 184 void *va =
185 kmap_atomic_pfn(entry >> PAGE_SHIFT, KM_USER0); 185 kmap_atomic_pfn(entry >> PAGE_SHIFT);
186 r = kexec_bn2cl(va); 186 r = kexec_bn2cl(va);
187 if (r) { 187 if (r) {
188 command_line = r; 188 command_line = r;
189 break; 189 break;
190 } 190 }
191 kunmap_atomic(va, KM_USER0); 191 kunmap_atomic(va);
192 } 192 }
193 } 193 }
194 194
@@ -198,7 +198,7 @@ static void kexec_find_and_set_command_line(struct kimage *image)
198 198
199 hverr = hv_set_command_line( 199 hverr = hv_set_command_line(
200 (HV_VirtAddr) command_line, strlen(command_line)); 200 (HV_VirtAddr) command_line, strlen(command_line));
201 kunmap_atomic(command_line, KM_USER0); 201 kunmap_atomic(command_line);
202 } else { 202 } else {
203 pr_info("%s: no command line found; making empty\n", 203 pr_info("%s: no command line found; making empty\n",
204 __func__); 204 __func__);
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index 997e3933f72..0858ee6b520 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -34,7 +34,7 @@ void __cpuinit init_messaging(void)
34 panic("hv_register_message_state: error %d", rc); 34 panic("hv_register_message_state: error %d", rc);
35 35
36 /* Make sure downcall interrupts will be enabled. */ 36 /* Make sure downcall interrupts will be enabled. */
37 raw_local_irq_unmask(INT_INTCTRL_K); 37 arch_local_irq_unmask(INT_INTCTRL_K);
38} 38}
39 39
40void hv_message_intr(struct pt_regs *regs, int intnum) 40void hv_message_intr(struct pt_regs *regs, int intnum)
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index 9cd29884c09..e92e40527d6 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -50,10 +50,10 @@ long arch_ptrace(struct task_struct *child, long request,
50{ 50{
51 unsigned long __user *datap = (long __user __force *)data; 51 unsigned long __user *datap = (long __user __force *)data;
52 unsigned long tmp; 52 unsigned long tmp;
53 int i;
54 long ret = -EIO; 53 long ret = -EIO;
55 unsigned long *childregs;
56 char *childreg; 54 char *childreg;
55 struct pt_regs copyregs;
56 int ex1_offset;
57 57
58 switch (request) { 58 switch (request) {
59 59
@@ -80,6 +80,16 @@ long arch_ptrace(struct task_struct *child, long request,
80 if (addr >= PTREGS_SIZE) 80 if (addr >= PTREGS_SIZE)
81 break; 81 break;
82 childreg = (char *)task_pt_regs(child) + addr; 82 childreg = (char *)task_pt_regs(child) + addr;
83
84 /* Guard against overwrites of the privilege level. */
85 ex1_offset = PTREGS_OFFSET_EX1;
86#if defined(CONFIG_COMPAT) && defined(__BIG_ENDIAN)
87 if (is_compat_task()) /* point at low word */
88 ex1_offset += sizeof(compat_long_t);
89#endif
90 if (addr == ex1_offset)
91 data = PL_ICS_EX1(USER_PL, EX1_ICS(data));
92
83#ifdef CONFIG_COMPAT 93#ifdef CONFIG_COMPAT
84 if (is_compat_task()) { 94 if (is_compat_task()) {
85 if (addr & (sizeof(compat_long_t)-1)) 95 if (addr & (sizeof(compat_long_t)-1))
@@ -96,26 +106,19 @@ long arch_ptrace(struct task_struct *child, long request,
96 break; 106 break;
97 107
98 case PTRACE_GETREGS: /* Get all registers from the child. */ 108 case PTRACE_GETREGS: /* Get all registers from the child. */
99 if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE)) 109 if (copy_to_user(datap, task_pt_regs(child),
100 break; 110 sizeof(struct pt_regs)) == 0) {
101 childregs = (long *)task_pt_regs(child); 111 ret = 0;
102 for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long);
103 ++i) {
104 ret = __put_user(childregs[i], &datap[i]);
105 if (ret != 0)
106 break;
107 } 112 }
108 break; 113 break;
109 114
110 case PTRACE_SETREGS: /* Set all registers in the child. */ 115 case PTRACE_SETREGS: /* Set all registers in the child. */
111 if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE)) 116 if (copy_from_user(&copyregs, datap,
112 break; 117 sizeof(struct pt_regs)) == 0) {
113 childregs = (long *)task_pt_regs(child); 118 copyregs.ex1 =
114 for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long); 119 PL_ICS_EX1(USER_PL, EX1_ICS(copyregs.ex1));
115 ++i) { 120 *task_pt_regs(child) = copyregs;
116 ret = __get_user(childregs[i], &datap[i]); 121 ret = 0;
117 if (ret != 0)
118 break;
119 } 122 }
120 break; 123 break;
121 124
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c
index acd86d20beb..baa3d905fee 100644
--- a/arch/tile/kernel/reboot.c
+++ b/arch/tile/kernel/reboot.c
@@ -27,7 +27,7 @@
27void machine_halt(void) 27void machine_halt(void)
28{ 28{
29 warn_early_printk(); 29 warn_early_printk();
30 raw_local_irq_disable_all(); 30 arch_local_irq_disable_all();
31 smp_send_stop(); 31 smp_send_stop();
32 hv_halt(); 32 hv_halt();
33} 33}
@@ -35,14 +35,14 @@ void machine_halt(void)
35void machine_power_off(void) 35void machine_power_off(void)
36{ 36{
37 warn_early_printk(); 37 warn_early_printk();
38 raw_local_irq_disable_all(); 38 arch_local_irq_disable_all();
39 smp_send_stop(); 39 smp_send_stop();
40 hv_power_off(); 40 hv_power_off();
41} 41}
42 42
43void machine_restart(char *cmd) 43void machine_restart(char *cmd)
44{ 44{
45 raw_local_irq_disable_all(); 45 arch_local_irq_disable_all();
46 smp_send_stop(); 46 smp_send_stop();
47 hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd); 47 hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd);
48} 48}
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index ae51cad12da..fb0b3cbeae1 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -868,14 +868,14 @@ void __cpuinit setup_cpu(int boot)
868 868
869 /* Allow asynchronous TLB interrupts. */ 869 /* Allow asynchronous TLB interrupts. */
870#if CHIP_HAS_TILE_DMA() 870#if CHIP_HAS_TILE_DMA()
871 raw_local_irq_unmask(INT_DMATLB_MISS); 871 arch_local_irq_unmask(INT_DMATLB_MISS);
872 raw_local_irq_unmask(INT_DMATLB_ACCESS); 872 arch_local_irq_unmask(INT_DMATLB_ACCESS);
873#endif 873#endif
874#if CHIP_HAS_SN_PROC() 874#if CHIP_HAS_SN_PROC()
875 raw_local_irq_unmask(INT_SNITLB_MISS); 875 arch_local_irq_unmask(INT_SNITLB_MISS);
876#endif 876#endif
877#ifdef __tilegx__ 877#ifdef __tilegx__
878 raw_local_irq_unmask(INT_SINGLE_STEP_K); 878 arch_local_irq_unmask(INT_SINGLE_STEP_K);
879#endif 879#endif
880 880
881 /* 881 /*
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index fb28e85ae3a..687719d4abd 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -71,6 +71,9 @@ int restore_sigcontext(struct pt_regs *regs,
71 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) 71 for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
72 err |= __get_user(regs->regs[i], &sc->gregs[i]); 72 err |= __get_user(regs->regs[i], &sc->gregs[i]);
73 73
74 /* Ensure that the PL is always set to USER_PL. */
75 regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1));
76
74 regs->faultnum = INT_SWINT_1_SIGRETURN; 77 regs->faultnum = INT_SWINT_1_SIGRETURN;
75 78
76 err |= __get_user(*pr0, &sc->gregs[0]); 79 err |= __get_user(*pr0, &sc->gregs[0]);
@@ -330,7 +333,7 @@ void do_signal(struct pt_regs *regs)
330 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 333 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
331 } 334 }
332 335
333 return; 336 goto done;
334 } 337 }
335 338
336 /* Did we come from a system call? */ 339 /* Did we come from a system call? */
@@ -358,4 +361,8 @@ void do_signal(struct pt_regs *regs)
358 current_thread_info()->status &= ~TS_RESTORE_SIGMASK; 361 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
359 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 362 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
360 } 363 }
364
365done:
366 /* Avoid double syscall restart if there are nested signals. */
367 regs->faultnum = INT_SWINT_1_SIGRETURN;
361} 368}
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 75255d90aff..9575b37a8b7 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -115,7 +115,7 @@ static void smp_start_cpu_interrupt(void)
115static void smp_stop_cpu_interrupt(void) 115static void smp_stop_cpu_interrupt(void)
116{ 116{
117 set_cpu_online(smp_processor_id(), 0); 117 set_cpu_online(smp_processor_id(), 0);
118 raw_local_irq_disable_all(); 118 arch_local_irq_disable_all();
119 for (;;) 119 for (;;)
120 asm("nap"); 120 asm("nap");
121} 121}
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 6bed820e142..f2e156e4469 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -132,7 +132,7 @@ static int tile_timer_set_next_event(unsigned long ticks,
132{ 132{
133 BUG_ON(ticks > MAX_TICK); 133 BUG_ON(ticks > MAX_TICK);
134 __insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks); 134 __insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks);
135 raw_local_irq_unmask_now(INT_TILE_TIMER); 135 arch_local_irq_unmask_now(INT_TILE_TIMER);
136 return 0; 136 return 0;
137} 137}
138 138
@@ -143,7 +143,7 @@ static int tile_timer_set_next_event(unsigned long ticks,
143static void tile_timer_set_mode(enum clock_event_mode mode, 143static void tile_timer_set_mode(enum clock_event_mode mode,
144 struct clock_event_device *evt) 144 struct clock_event_device *evt)
145{ 145{
146 raw_local_irq_mask_now(INT_TILE_TIMER); 146 arch_local_irq_mask_now(INT_TILE_TIMER);
147} 147}
148 148
149/* 149/*
@@ -172,7 +172,7 @@ void __cpuinit setup_tile_timer(void)
172 evt->cpumask = cpumask_of(smp_processor_id()); 172 evt->cpumask = cpumask_of(smp_processor_id());
173 173
174 /* Start out with timer not firing. */ 174 /* Start out with timer not firing. */
175 raw_local_irq_mask_now(INT_TILE_TIMER); 175 arch_local_irq_mask_now(INT_TILE_TIMER);
176 176
177 /* Register tile timer. */ 177 /* Register tile timer. */
178 clockevents_register_device(evt); 178 clockevents_register_device(evt);
@@ -188,7 +188,7 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
188 * Mask the timer interrupt here, since we are a oneshot timer 188 * Mask the timer interrupt here, since we are a oneshot timer
189 * and there are now by definition no events pending. 189 * and there are now by definition no events pending.
190 */ 190 */
191 raw_local_irq_mask(INT_TILE_TIMER); 191 arch_local_irq_mask(INT_TILE_TIMER);
192 192
193 /* Track time spent here in an interrupt context */ 193 /* Track time spent here in an interrupt context */
194 irq_enter(); 194 irq_enter();
diff --git a/arch/tile/lib/memcpy_tile64.c b/arch/tile/lib/memcpy_tile64.c
index dfedea7b266..f7d4a6ad61e 100644
--- a/arch/tile/lib/memcpy_tile64.c
+++ b/arch/tile/lib/memcpy_tile64.c
@@ -54,7 +54,7 @@ typedef unsigned long (*memcpy_t)(void *, const void *, unsigned long);
54 * we must run with interrupts disabled to avoid the risk of some 54 * we must run with interrupts disabled to avoid the risk of some
55 * other code seeing the incoherent data in our cache. (Recall that 55 * other code seeing the incoherent data in our cache. (Recall that
56 * our cache is indexed by PA, so even if the other code doesn't use 56 * our cache is indexed by PA, so even if the other code doesn't use
57 * our KM_MEMCPY virtual addresses, they'll still hit in cache using 57 * our kmap_atomic virtual addresses, they'll still hit in cache using
58 * the normal VAs that aren't supposed to hit in cache.) 58 * the normal VAs that aren't supposed to hit in cache.)
59 */ 59 */
60static void memcpy_multicache(void *dest, const void *source, 60static void memcpy_multicache(void *dest, const void *source,
@@ -64,6 +64,7 @@ static void memcpy_multicache(void *dest, const void *source,
64 unsigned long flags, newsrc, newdst; 64 unsigned long flags, newsrc, newdst;
65 pmd_t *pmdp; 65 pmd_t *pmdp;
66 pte_t *ptep; 66 pte_t *ptep;
67 int type0, type1;
67 int cpu = get_cpu(); 68 int cpu = get_cpu();
68 69
69 /* 70 /*
@@ -77,7 +78,8 @@ static void memcpy_multicache(void *dest, const void *source,
77 sim_allow_multiple_caching(1); 78 sim_allow_multiple_caching(1);
78 79
79 /* Set up the new dest mapping */ 80 /* Set up the new dest mapping */
80 idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + KM_MEMCPY0; 81 type0 = kmap_atomic_idx_push();
82 idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + type0;
81 newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1)); 83 newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1));
82 pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst); 84 pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst);
83 ptep = pte_offset_kernel(pmdp, newdst); 85 ptep = pte_offset_kernel(pmdp, newdst);
@@ -87,7 +89,8 @@ static void memcpy_multicache(void *dest, const void *source,
87 } 89 }
88 90
89 /* Set up the new source mapping */ 91 /* Set up the new source mapping */
90 idx += (KM_MEMCPY0 - KM_MEMCPY1); 92 type1 = kmap_atomic_idx_push();
93 idx += (type0 - type1);
91 src_pte = hv_pte_set_nc(src_pte); 94 src_pte = hv_pte_set_nc(src_pte);
92 src_pte = hv_pte_clear_writable(src_pte); /* be paranoid */ 95 src_pte = hv_pte_clear_writable(src_pte); /* be paranoid */
93 newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1)); 96 newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1));
@@ -119,6 +122,8 @@ static void memcpy_multicache(void *dest, const void *source,
119 * We're done: notify the simulator that all is back to normal, 122 * We're done: notify the simulator that all is back to normal,
120 * and re-enable interrupts and pre-emption. 123 * and re-enable interrupts and pre-emption.
121 */ 124 */
125 kmap_atomic_idx_pop();
126 kmap_atomic_idx_pop();
122 sim_allow_multiple_caching(0); 127 sim_allow_multiple_caching(0);
123 local_irq_restore(flags); 128 local_irq_restore(flags);
124 put_cpu(); 129 put_cpu();
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index abb57331cf6..31dbbd9afe4 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -227,7 +227,7 @@ EXPORT_SYMBOL(kmap_atomic_prot);
227void *__kmap_atomic(struct page *page) 227void *__kmap_atomic(struct page *page)
228{ 228{
229 /* PAGE_NONE is a magic value that tells us to check immutability. */ 229 /* PAGE_NONE is a magic value that tells us to check immutability. */
230 return kmap_atomic_prot(page, type, PAGE_NONE); 230 return kmap_atomic_prot(page, PAGE_NONE);
231} 231}
232EXPORT_SYMBOL(__kmap_atomic); 232EXPORT_SYMBOL(__kmap_atomic);
233 233
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 78e1982cb6c..0b9ce69b0ee 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -988,8 +988,12 @@ static long __write_once initfree = 1;
988/* Select whether to free (1) or mark unusable (0) the __init pages. */ 988/* Select whether to free (1) or mark unusable (0) the __init pages. */
989static int __init set_initfree(char *str) 989static int __init set_initfree(char *str)
990{ 990{
991 strict_strtol(str, 0, &initfree); 991 long val;
992 pr_info("initfree: %s free init pages\n", initfree ? "will" : "won't"); 992 if (strict_strtol(str, 0, &val)) {
993 initfree = val;
994 pr_info("initfree: %s free init pages\n",
995 initfree ? "will" : "won't");
996 }
993 return 1; 997 return 1;
994} 998}
995__setup("initfree=", set_initfree); 999__setup("initfree=", set_initfree);
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 335c24621c4..1f5430c53d0 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -134,9 +134,9 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
134} 134}
135 135
136#if defined(CONFIG_HIGHPTE) 136#if defined(CONFIG_HIGHPTE)
137pte_t *_pte_offset_map(pmd_t *dir, unsigned long address, enum km_type type) 137pte_t *_pte_offset_map(pmd_t *dir, unsigned long address)
138{ 138{
139 pte_t *pte = kmap_atomic(pmd_page(*dir), type) + 139 pte_t *pte = kmap_atomic(pmd_page(*dir)) +
140 (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK; 140 (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK;
141 return &pte[pte_index(address)]; 141 return &pte[pte_index(address)];
142} 142}
diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h
index 2cd899f75a3..b7c5bab9bd7 100644
--- a/arch/um/include/asm/ptrace-generic.h
+++ b/arch/um/include/asm/ptrace-generic.h
@@ -38,8 +38,8 @@ struct pt_regs {
38 38
39struct task_struct; 39struct task_struct;
40 40
41extern long subarch_ptrace(struct task_struct *child, long request, long addr, 41extern long subarch_ptrace(struct task_struct *child, long request,
42 long data); 42 unsigned long addr, unsigned long data);
43extern unsigned long getreg(struct task_struct *child, int regno); 43extern unsigned long getreg(struct task_struct *child, int regno);
44extern int putreg(struct task_struct *child, int regno, unsigned long value); 44extern int putreg(struct task_struct *child, int regno, unsigned long value);
45extern int get_fpregs(struct user_i387_struct __user *buf, 45extern int get_fpregs(struct user_i387_struct __user *buf,
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index a5e33f29bbe..701b672c112 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -122,7 +122,7 @@ long arch_ptrace(struct task_struct *child, long request,
122 break; 122 break;
123 123
124 case PTRACE_SET_THREAD_AREA: 124 case PTRACE_SET_THREAD_AREA:
125 ret = ptrace_set_thread_area(child, addr, datavp); 125 ret = ptrace_set_thread_area(child, addr, vp);
126 break; 126 break;
127 127
128 case PTRACE_FAULTINFO: { 128 case PTRACE_FAULTINFO: {
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 286de34b0ed..f6ce0bda3b9 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -141,13 +141,13 @@ static inline void native_apic_msr_write(u32 reg, u32 v)
141 141
142static inline u32 native_apic_msr_read(u32 reg) 142static inline u32 native_apic_msr_read(u32 reg)
143{ 143{
144 u32 low, high; 144 u64 msr;
145 145
146 if (reg == APIC_DFR) 146 if (reg == APIC_DFR)
147 return -1; 147 return -1;
148 148
149 rdmsr(APIC_BASE_MSR + (reg >> 4), low, high); 149 rdmsrl(APIC_BASE_MSR + (reg >> 4), msr);
150 return low; 150 return (u32)msr;
151} 151}
152 152
153static inline void native_x2apic_wait_icr_idle(void) 153static inline void native_x2apic_wait_icr_idle(void)
@@ -181,12 +181,12 @@ extern void enable_x2apic(void);
181extern void x2apic_icr_write(u32 low, u32 id); 181extern void x2apic_icr_write(u32 low, u32 id);
182static inline int x2apic_enabled(void) 182static inline int x2apic_enabled(void)
183{ 183{
184 int msr, msr2; 184 u64 msr;
185 185
186 if (!cpu_has_x2apic) 186 if (!cpu_has_x2apic)
187 return 0; 187 return 0;
188 188
189 rdmsr(MSR_IA32_APICBASE, msr, msr2); 189 rdmsrl(MSR_IA32_APICBASE, msr);
190 if (msr & X2APIC_ENABLE) 190 if (msr & X2APIC_ENABLE)
191 return 1; 191 return 1;
192 return 0; 192 return 0;
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index b2f2d2e05ce..6d90adf4428 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -806,6 +806,78 @@ union uvh_node_present_table_u {
806}; 806};
807 807
808/* ========================================================================= */ 808/* ========================================================================= */
809/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */
810/* ========================================================================= */
811#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
812
813#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
814#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
815#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
816#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
817#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
818#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
819
820union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
821 unsigned long v;
822 struct uvh_rh_gam_alias210_overlay_config_0_mmr_s {
823 unsigned long rsvd_0_23: 24; /* */
824 unsigned long base : 8; /* RW */
825 unsigned long rsvd_32_47: 16; /* */
826 unsigned long m_alias : 5; /* RW */
827 unsigned long rsvd_53_62: 10; /* */
828 unsigned long enable : 1; /* RW */
829 } s;
830};
831
832/* ========================================================================= */
833/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */
834/* ========================================================================= */
835#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
836
837#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
838#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
839#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
840#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
841#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
842#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
843
844union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
845 unsigned long v;
846 struct uvh_rh_gam_alias210_overlay_config_1_mmr_s {
847 unsigned long rsvd_0_23: 24; /* */
848 unsigned long base : 8; /* RW */
849 unsigned long rsvd_32_47: 16; /* */
850 unsigned long m_alias : 5; /* RW */
851 unsigned long rsvd_53_62: 10; /* */
852 unsigned long enable : 1; /* RW */
853 } s;
854};
855
856/* ========================================================================= */
857/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */
858/* ========================================================================= */
859#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
860
861#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
862#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
863#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
864#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
865#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
866#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
867
868union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
869 unsigned long v;
870 struct uvh_rh_gam_alias210_overlay_config_2_mmr_s {
871 unsigned long rsvd_0_23: 24; /* */
872 unsigned long base : 8; /* RW */
873 unsigned long rsvd_32_47: 16; /* */
874 unsigned long m_alias : 5; /* RW */
875 unsigned long rsvd_53_62: 10; /* */
876 unsigned long enable : 1; /* RW */
877 } s;
878};
879
880/* ========================================================================= */
809/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */ 881/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
810/* ========================================================================= */ 882/* ========================================================================= */
811#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL 883#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
@@ -857,6 +929,29 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
857}; 929};
858 930
859/* ========================================================================= */ 931/* ========================================================================= */
932/* UVH_RH_GAM_CONFIG_MMR */
933/* ========================================================================= */
934#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL
935
936#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
937#define UVH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
938#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
939#define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
940#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12
941#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
942
943union uvh_rh_gam_config_mmr_u {
944 unsigned long v;
945 struct uvh_rh_gam_config_mmr_s {
946 unsigned long m_skt : 6; /* RW */
947 unsigned long n_skt : 4; /* RW */
948 unsigned long rsvd_10_11: 2; /* */
949 unsigned long mmiol_cfg : 1; /* RW */
950 unsigned long rsvd_13_63: 51; /* */
951 } s;
952};
953
954/* ========================================================================= */
860/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ 955/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
861/* ========================================================================= */ 956/* ========================================================================= */
862#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL 957#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
@@ -987,97 +1082,5 @@ union uvh_rtc1_int_config_u {
987 } s; 1082 } s;
988}; 1083};
989 1084
990/* ========================================================================= */
991/* UVH_SI_ADDR_MAP_CONFIG */
992/* ========================================================================= */
993#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
994
995#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0
996#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL
997#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8
998#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL
999
1000union uvh_si_addr_map_config_u {
1001 unsigned long v;
1002 struct uvh_si_addr_map_config_s {
1003 unsigned long m_skt : 6; /* RW */
1004 unsigned long rsvd_6_7: 2; /* */
1005 unsigned long n_skt : 4; /* RW */
1006 unsigned long rsvd_12_63: 52; /* */
1007 } s;
1008};
1009
1010/* ========================================================================= */
1011/* UVH_SI_ALIAS0_OVERLAY_CONFIG */
1012/* ========================================================================= */
1013#define UVH_SI_ALIAS0_OVERLAY_CONFIG 0xc80008UL
1014
1015#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_SHFT 24
1016#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
1017#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_SHFT 48
1018#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
1019#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_SHFT 63
1020#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
1021
1022union uvh_si_alias0_overlay_config_u {
1023 unsigned long v;
1024 struct uvh_si_alias0_overlay_config_s {
1025 unsigned long rsvd_0_23: 24; /* */
1026 unsigned long base : 8; /* RW */
1027 unsigned long rsvd_32_47: 16; /* */
1028 unsigned long m_alias : 5; /* RW */
1029 unsigned long rsvd_53_62: 10; /* */
1030 unsigned long enable : 1; /* RW */
1031 } s;
1032};
1033
1034/* ========================================================================= */
1035/* UVH_SI_ALIAS1_OVERLAY_CONFIG */
1036/* ========================================================================= */
1037#define UVH_SI_ALIAS1_OVERLAY_CONFIG 0xc80010UL
1038
1039#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_SHFT 24
1040#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
1041#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_SHFT 48
1042#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
1043#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_SHFT 63
1044#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
1045
1046union uvh_si_alias1_overlay_config_u {
1047 unsigned long v;
1048 struct uvh_si_alias1_overlay_config_s {
1049 unsigned long rsvd_0_23: 24; /* */
1050 unsigned long base : 8; /* RW */
1051 unsigned long rsvd_32_47: 16; /* */
1052 unsigned long m_alias : 5; /* RW */
1053 unsigned long rsvd_53_62: 10; /* */
1054 unsigned long enable : 1; /* RW */
1055 } s;
1056};
1057
1058/* ========================================================================= */
1059/* UVH_SI_ALIAS2_OVERLAY_CONFIG */
1060/* ========================================================================= */
1061#define UVH_SI_ALIAS2_OVERLAY_CONFIG 0xc80018UL
1062
1063#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_SHFT 24
1064#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
1065#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_SHFT 48
1066#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
1067#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_SHFT 63
1068#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
1069
1070union uvh_si_alias2_overlay_config_u {
1071 unsigned long v;
1072 struct uvh_si_alias2_overlay_config_s {
1073 unsigned long rsvd_0_23: 24; /* */
1074 unsigned long base : 8; /* RW */
1075 unsigned long rsvd_32_47: 16; /* */
1076 unsigned long m_alias : 5; /* RW */
1077 unsigned long rsvd_53_62: 10; /* */
1078 unsigned long enable : 1; /* RW */
1079 } s;
1080};
1081
1082 1085
1083#endif /* _ASM_X86_UV_UV_MMRS_H */ 1086#endif /* __ASM_UV_MMRS_X86_H__ */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 850657d1b0e..3f838d53739 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -52,7 +52,6 @@
52#include <asm/mce.h> 52#include <asm/mce.h>
53#include <asm/kvm_para.h> 53#include <asm/kvm_para.h>
54#include <asm/tsc.h> 54#include <asm/tsc.h>
55#include <asm/atomic.h>
56 55
57unsigned int num_processors; 56unsigned int num_processors;
58 57
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index ed4118de249..194539aea17 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -379,14 +379,14 @@ struct redir_addr {
379#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 379#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
380 380
381static __initdata struct redir_addr redir_addrs[] = { 381static __initdata struct redir_addr redir_addrs[] = {
382 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG}, 382 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR},
383 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG}, 383 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR},
384 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG}, 384 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR},
385}; 385};
386 386
387static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) 387static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
388{ 388{
389 union uvh_si_alias0_overlay_config_u alias; 389 union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias;
390 union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; 390 union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
391 int i; 391 int i;
392 392
@@ -660,7 +660,7 @@ void uv_nmi_init(void)
660 660
661void __init uv_system_init(void) 661void __init uv_system_init(void)
662{ 662{
663 union uvh_si_addr_map_config_u m_n_config; 663 union uvh_rh_gam_config_mmr_u m_n_config;
664 union uvh_node_id_u node_id; 664 union uvh_node_id_u node_id;
665 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; 665 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
666 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; 666 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
@@ -670,7 +670,7 @@ void __init uv_system_init(void)
670 670
671 map_low_mmrs(); 671 map_low_mmrs();
672 672
673 m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); 673 m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
674 m_val = m_n_config.s.m_skt; 674 m_val = m_n_config.s.m_skt;
675 n_val = m_n_config.s.n_skt; 675 n_val = m_n_config.s.n_skt;
676 mmr_base = 676 mmr_base =
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 46d58448c3a..e421b8cd694 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -280,11 +280,11 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
280 struct amd_nb *nb; 280 struct amd_nb *nb;
281 int i; 281 int i;
282 282
283 nb = kmalloc(sizeof(struct amd_nb), GFP_KERNEL); 283 nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
284 cpu_to_node(cpu));
284 if (!nb) 285 if (!nb)
285 return NULL; 286 return NULL;
286 287
287 memset(nb, 0, sizeof(*nb));
288 nb->nb_id = nb_id; 288 nb->nb_id = nb_id;
289 289
290 /* 290 /*
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index e1af7c055c7..ce0cb4721c9 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -212,7 +212,7 @@ static int install_equiv_cpu_table(const u8 *buf)
212 return 0; 212 return 0;
213 } 213 }
214 214
215 equiv_cpu_table = (struct equiv_cpu_entry *) vmalloc(size); 215 equiv_cpu_table = vmalloc(size);
216 if (!equiv_cpu_table) { 216 if (!equiv_cpu_table) {
217 pr_err("failed to allocate equivalent CPU table\n"); 217 pr_err("failed to allocate equivalent CPU table\n");
218 return 0; 218 return 0;
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index 71825806cd4..6da143c2a6b 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -217,13 +217,13 @@ void __cpuinit fam10h_check_enable_mmcfg(void)
217 wrmsrl(address, val); 217 wrmsrl(address, val);
218} 218}
219 219
220static int __devinit set_check_enable_amd_mmconf(const struct dmi_system_id *d) 220static int __init set_check_enable_amd_mmconf(const struct dmi_system_id *d)
221{ 221{
222 pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF; 222 pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF;
223 return 0; 223 return 0;
224} 224}
225 225
226static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = { 226static const struct dmi_system_id __initconst mmconf_dmi_table[] = {
227 { 227 {
228 .callback = set_check_enable_amd_mmconf, 228 .callback = set_check_enable_amd_mmconf,
229 .ident = "Sun Microsystems Machine", 229 .ident = "Sun Microsystems Machine",
@@ -234,7 +234,8 @@ static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = {
234 {} 234 {}
235}; 235};
236 236
237void __cpuinit check_enable_amd_mmconf_dmi(void) 237/* Called from a __cpuinit function, but only on the BSP. */
238void __ref check_enable_amd_mmconf_dmi(void)
238{ 239{
239 dmi_check_system(mmconf_dmi_table); 240 dmi_check_system(mmconf_dmi_table);
240} 241}
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index bab3b9e6f66..008b91eefa1 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -41,44 +41,6 @@ void pvclock_set_flags(u8 flags)
41 valid_flags = flags; 41 valid_flags = flags;
42} 42}
43 43
44/*
45 * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
46 * yielding a 64-bit result.
47 */
48static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
49{
50 u64 product;
51#ifdef __i386__
52 u32 tmp1, tmp2;
53#endif
54
55 if (shift < 0)
56 delta >>= -shift;
57 else
58 delta <<= shift;
59
60#ifdef __i386__
61 __asm__ (
62 "mul %5 ; "
63 "mov %4,%%eax ; "
64 "mov %%edx,%4 ; "
65 "mul %5 ; "
66 "xor %5,%5 ; "
67 "add %4,%%eax ; "
68 "adc %5,%%edx ; "
69 : "=A" (product), "=r" (tmp1), "=r" (tmp2)
70 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
71#elif defined(__x86_64__)
72 __asm__ (
73 "mul %%rdx ; shrd $32,%%rdx,%%rax"
74 : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
75#else
76#error implement me!
77#endif
78
79 return product;
80}
81
82static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow) 44static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow)
83{ 45{
84 u64 delta = native_read_tsc() - shadow->tsc_timestamp; 46 u64 delta = native_read_tsc() - shadow->tsc_timestamp;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 908ea5464a5..fb8b376bf28 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -720,7 +720,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
720 } 720 }
721} 721}
722 722
723static void set_spte_track_bits(u64 *sptep, u64 new_spte) 723static int set_spte_track_bits(u64 *sptep, u64 new_spte)
724{ 724{
725 pfn_t pfn; 725 pfn_t pfn;
726 u64 old_spte = *sptep; 726 u64 old_spte = *sptep;
@@ -731,19 +731,20 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte)
731 old_spte = __xchg_spte(sptep, new_spte); 731 old_spte = __xchg_spte(sptep, new_spte);
732 732
733 if (!is_rmap_spte(old_spte)) 733 if (!is_rmap_spte(old_spte))
734 return; 734 return 0;
735 735
736 pfn = spte_to_pfn(old_spte); 736 pfn = spte_to_pfn(old_spte);
737 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) 737 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
738 kvm_set_pfn_accessed(pfn); 738 kvm_set_pfn_accessed(pfn);
739 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) 739 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
740 kvm_set_pfn_dirty(pfn); 740 kvm_set_pfn_dirty(pfn);
741 return 1;
741} 742}
742 743
743static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte) 744static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
744{ 745{
745 set_spte_track_bits(sptep, new_spte); 746 if (set_spte_track_bits(sptep, new_spte))
746 rmap_remove(kvm, sptep); 747 rmap_remove(kvm, sptep);
747} 748}
748 749
749static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) 750static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2288ad829b3..cdac9e592aa 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2560,6 +2560,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2560 !kvm_exception_is_soft(vcpu->arch.exception.nr); 2560 !kvm_exception_is_soft(vcpu->arch.exception.nr);
2561 events->exception.nr = vcpu->arch.exception.nr; 2561 events->exception.nr = vcpu->arch.exception.nr;
2562 events->exception.has_error_code = vcpu->arch.exception.has_error_code; 2562 events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2563 events->exception.pad = 0;
2563 events->exception.error_code = vcpu->arch.exception.error_code; 2564 events->exception.error_code = vcpu->arch.exception.error_code;
2564 2565
2565 events->interrupt.injected = 2566 events->interrupt.injected =
@@ -2573,12 +2574,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2573 events->nmi.injected = vcpu->arch.nmi_injected; 2574 events->nmi.injected = vcpu->arch.nmi_injected;
2574 events->nmi.pending = vcpu->arch.nmi_pending; 2575 events->nmi.pending = vcpu->arch.nmi_pending;
2575 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); 2576 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2577 events->nmi.pad = 0;
2576 2578
2577 events->sipi_vector = vcpu->arch.sipi_vector; 2579 events->sipi_vector = vcpu->arch.sipi_vector;
2578 2580
2579 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING 2581 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2580 | KVM_VCPUEVENT_VALID_SIPI_VECTOR 2582 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2581 | KVM_VCPUEVENT_VALID_SHADOW); 2583 | KVM_VCPUEVENT_VALID_SHADOW);
2584 memset(&events->reserved, 0, sizeof(events->reserved));
2582} 2585}
2583 2586
2584static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, 2587static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
@@ -2623,6 +2626,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
2623 dbgregs->dr6 = vcpu->arch.dr6; 2626 dbgregs->dr6 = vcpu->arch.dr6;
2624 dbgregs->dr7 = vcpu->arch.dr7; 2627 dbgregs->dr7 = vcpu->arch.dr7;
2625 dbgregs->flags = 0; 2628 dbgregs->flags = 0;
2629 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
2626} 2630}
2627 2631
2628static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, 2632static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
@@ -3106,6 +3110,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3106 sizeof(ps->channels)); 3110 sizeof(ps->channels));
3107 ps->flags = kvm->arch.vpit->pit_state.flags; 3111 ps->flags = kvm->arch.vpit->pit_state.flags;
3108 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3112 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3113 memset(&ps->reserved, 0, sizeof(ps->reserved));
3109 return r; 3114 return r;
3110} 3115}
3111 3116
@@ -3169,10 +3174,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
3169 struct kvm_memslots *slots, *old_slots; 3174 struct kvm_memslots *slots, *old_slots;
3170 unsigned long *dirty_bitmap; 3175 unsigned long *dirty_bitmap;
3171 3176
3172 spin_lock(&kvm->mmu_lock);
3173 kvm_mmu_slot_remove_write_access(kvm, log->slot);
3174 spin_unlock(&kvm->mmu_lock);
3175
3176 r = -ENOMEM; 3177 r = -ENOMEM;
3177 dirty_bitmap = vmalloc(n); 3178 dirty_bitmap = vmalloc(n);
3178 if (!dirty_bitmap) 3179 if (!dirty_bitmap)
@@ -3194,6 +3195,10 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
3194 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; 3195 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
3195 kfree(old_slots); 3196 kfree(old_slots);
3196 3197
3198 spin_lock(&kvm->mmu_lock);
3199 kvm_mmu_slot_remove_write_access(kvm, log->slot);
3200 spin_unlock(&kvm->mmu_lock);
3201
3197 r = -EFAULT; 3202 r = -EFAULT;
3198 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { 3203 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
3199 vfree(dirty_bitmap); 3204 vfree(dirty_bitmap);
@@ -3486,6 +3491,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
3486 user_ns.clock = kvm->arch.kvmclock_offset + now_ns; 3491 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
3487 local_irq_enable(); 3492 local_irq_enable();
3488 user_ns.flags = 0; 3493 user_ns.flags = 0;
3494 memset(&user_ns.pad, 0, sizeof(user_ns.pad));
3489 3495
3490 r = -EFAULT; 3496 r = -EFAULT;
3491 if (copy_to_user(argp, &user_ns, sizeof(user_ns))) 3497 if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
@@ -3972,8 +3978,10 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
3972 return X86EMUL_CONTINUE; 3978 return X86EMUL_CONTINUE;
3973 3979
3974 if (kvm_x86_ops->has_wbinvd_exit()) { 3980 if (kvm_x86_ops->has_wbinvd_exit()) {
3981 preempt_disable();
3975 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, 3982 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
3976 wbinvd_ipi, NULL, 1); 3983 wbinvd_ipi, NULL, 1);
3984 preempt_enable();
3977 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); 3985 cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
3978 } 3986 }
3979 wbinvd(); 3987 wbinvd();
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 49358481c73..12cdbb17ad1 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -251,7 +251,7 @@ static void __cpuinit calculate_tlb_offset(void)
251 } 251 }
252} 252}
253 253
254static int tlb_cpuhp_notify(struct notifier_block *n, 254static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n,
255 unsigned long action, void *hcpu) 255 unsigned long action, void *hcpu)
256{ 256{
257 switch (action & 0xf) { 257 switch (action & 0xf) {
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 15466c096ba..0972315c386 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -138,7 +138,6 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
138 struct acpi_resource_address64 addr; 138 struct acpi_resource_address64 addr;
139 acpi_status status; 139 acpi_status status;
140 unsigned long flags; 140 unsigned long flags;
141 struct resource *root, *conflict;
142 u64 start, end; 141 u64 start, end;
143 142
144 status = resource_to_addr(acpi_res, &addr); 143 status = resource_to_addr(acpi_res, &addr);
@@ -146,12 +145,10 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
146 return AE_OK; 145 return AE_OK;
147 146
148 if (addr.resource_type == ACPI_MEMORY_RANGE) { 147 if (addr.resource_type == ACPI_MEMORY_RANGE) {
149 root = &iomem_resource;
150 flags = IORESOURCE_MEM; 148 flags = IORESOURCE_MEM;
151 if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY) 149 if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
152 flags |= IORESOURCE_PREFETCH; 150 flags |= IORESOURCE_PREFETCH;
153 } else if (addr.resource_type == ACPI_IO_RANGE) { 151 } else if (addr.resource_type == ACPI_IO_RANGE) {
154 root = &ioport_resource;
155 flags = IORESOURCE_IO; 152 flags = IORESOURCE_IO;
156 } else 153 } else
157 return AE_OK; 154 return AE_OK;
@@ -172,25 +169,90 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
172 return AE_OK; 169 return AE_OK;
173 } 170 }
174 171
175 conflict = insert_resource_conflict(root, res); 172 info->res_num++;
176 if (conflict) { 173 if (addr.translation_offset)
177 dev_err(&info->bridge->dev, 174 dev_info(&info->bridge->dev, "host bridge window %pR "
178 "address space collision: host bridge window %pR " 175 "(PCI address [%#llx-%#llx])\n",
179 "conflicts with %s %pR\n", 176 res, res->start - addr.translation_offset,
180 res, conflict->name, conflict); 177 res->end - addr.translation_offset);
181 } else { 178 else
182 pci_bus_add_resource(info->bus, res, 0); 179 dev_info(&info->bridge->dev, "host bridge window %pR\n", res);
183 info->res_num++; 180
184 if (addr.translation_offset) 181 return AE_OK;
185 dev_info(&info->bridge->dev, "host bridge window %pR " 182}
186 "(PCI address [%#llx-%#llx])\n", 183
187 res, res->start - addr.translation_offset, 184static bool resource_contains(struct resource *res, resource_size_t point)
188 res->end - addr.translation_offset); 185{
186 if (res->start <= point && point <= res->end)
187 return true;
188 return false;
189}
190
191static void coalesce_windows(struct pci_root_info *info, int type)
192{
193 int i, j;
194 struct resource *res1, *res2;
195
196 for (i = 0; i < info->res_num; i++) {
197 res1 = &info->res[i];
198 if (!(res1->flags & type))
199 continue;
200
201 for (j = i + 1; j < info->res_num; j++) {
202 res2 = &info->res[j];
203 if (!(res2->flags & type))
204 continue;
205
206 /*
207 * I don't like throwing away windows because then
208 * our resources no longer match the ACPI _CRS, but
209 * the kernel resource tree doesn't allow overlaps.
210 */
211 if (resource_contains(res1, res2->start) ||
212 resource_contains(res1, res2->end) ||
213 resource_contains(res2, res1->start) ||
214 resource_contains(res2, res1->end)) {
215 res1->start = min(res1->start, res2->start);
216 res1->end = max(res1->end, res2->end);
217 dev_info(&info->bridge->dev,
218 "host bridge window expanded to %pR; %pR ignored\n",
219 res1, res2);
220 res2->flags = 0;
221 }
222 }
223 }
224}
225
226static void add_resources(struct pci_root_info *info)
227{
228 int i;
229 struct resource *res, *root, *conflict;
230
231 if (!pci_use_crs)
232 return;
233
234 coalesce_windows(info, IORESOURCE_MEM);
235 coalesce_windows(info, IORESOURCE_IO);
236
237 for (i = 0; i < info->res_num; i++) {
238 res = &info->res[i];
239
240 if (res->flags & IORESOURCE_MEM)
241 root = &iomem_resource;
242 else if (res->flags & IORESOURCE_IO)
243 root = &ioport_resource;
189 else 244 else
190 dev_info(&info->bridge->dev, 245 continue;
191 "host bridge window %pR\n", res); 246
247 conflict = insert_resource_conflict(root, res);
248 if (conflict)
249 dev_err(&info->bridge->dev,
250 "address space collision: host bridge window %pR "
251 "conflicts with %s %pR\n",
252 res, conflict->name, conflict);
253 else
254 pci_bus_add_resource(info->bus, res, 0);
192 } 255 }
193 return AE_OK;
194} 256}
195 257
196static void 258static void
@@ -224,6 +286,7 @@ get_current_resources(struct acpi_device *device, int busnum,
224 acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource, 286 acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
225 &info); 287 &info);
226 288
289 add_resources(&info);
227 return; 290 return;
228 291
229name_alloc_fail: 292name_alloc_fail:
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 117f5b8daf7..d7b5109f7a9 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -147,8 +147,10 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
147 irq = xen_allocate_pirq(v[i], 0, /* not sharable */ 147 irq = xen_allocate_pirq(v[i], 0, /* not sharable */
148 (type == PCI_CAP_ID_MSIX) ? 148 (type == PCI_CAP_ID_MSIX) ?
149 "pcifront-msi-x" : "pcifront-msi"); 149 "pcifront-msi-x" : "pcifront-msi");
150 if (irq < 0) 150 if (irq < 0) {
151 return -1; 151 ret = -1;
152 goto free;
153 }
152 154
153 ret = set_irq_msi(irq, msidesc); 155 ret = set_irq_msi(irq, msidesc);
154 if (ret) 156 if (ret)
@@ -164,7 +166,7 @@ error:
164 if (ret == -ENODEV) 166 if (ret == -ENODEV)
165 dev_err(&dev->dev, "Xen PCI frontend has not registered" \ 167 dev_err(&dev->dev, "Xen PCI frontend has not registered" \
166 " MSI/MSI-X support!\n"); 168 " MSI/MSI-X support!\n");
167 169free:
168 kfree(v); 170 kfree(v);
169 return ret; 171 return ret;
170} 172}
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 20ea20a39e2..a318194002b 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1343,8 +1343,8 @@ uv_activation_descriptor_init(int node, int pnode)
1343 * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR) 1343 * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
1344 * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub 1344 * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub
1345 */ 1345 */
1346 bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)* 1346 bau_desc = kmalloc_node(sizeof(struct bau_desc) * UV_ADP_SIZE
1347 UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node); 1347 * UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
1348 BUG_ON(!bau_desc); 1348 BUG_ON(!bau_desc);
1349 1349
1350 pa = uv_gpa(bau_desc); /* need the real nasid*/ 1350 pa = uv_gpa(bau_desc); /* need the real nasid*/
@@ -1402,9 +1402,9 @@ uv_payload_queue_init(int node, int pnode)
1402 struct bau_payload_queue_entry *pqp_malloc; 1402 struct bau_payload_queue_entry *pqp_malloc;
1403 struct bau_control *bcp; 1403 struct bau_control *bcp;
1404 1404
1405 pqp = (struct bau_payload_queue_entry *) kmalloc_node( 1405 pqp = kmalloc_node((DEST_Q_SIZE + 1)
1406 (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry), 1406 * sizeof(struct bau_payload_queue_entry),
1407 GFP_KERNEL, node); 1407 GFP_KERNEL, node);
1408 BUG_ON(!pqp); 1408 BUG_ON(!pqp);
1409 pqp_malloc = pqp; 1409 pqp_malloc = pqp;
1410 1410
@@ -1520,8 +1520,7 @@ static void __init uv_init_per_cpu(int nuvhubs)
1520 1520
1521 timeout_us = calculate_destination_timeout(); 1521 timeout_us = calculate_destination_timeout();
1522 1522
1523 uvhub_descs = (struct uvhub_desc *) 1523 uvhub_descs = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
1524 kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
1525 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc)); 1524 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
1526 uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL); 1525 uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
1527 for_each_present_cpu(cpu) { 1526 for_each_present_cpu(cpu) {
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index c237b810b03..21ed8d7f75a 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2126,7 +2126,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
2126{ 2126{
2127 pmd_t *kernel_pmd; 2127 pmd_t *kernel_pmd;
2128 2128
2129 level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE); 2129 level2_kernel_pgt = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2130 2130
2131 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + 2131 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
2132 xen_start_info->nr_pt_frames * PAGE_SIZE + 2132 xen_start_info->nr_pt_frames * PAGE_SIZE +
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index b1dbdaa23ec..769c4b01fa3 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -118,16 +118,18 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
118 const struct e820map *e820) 118 const struct e820map *e820)
119{ 119{
120 phys_addr_t max_addr = PFN_PHYS(max_pfn); 120 phys_addr_t max_addr = PFN_PHYS(max_pfn);
121 phys_addr_t last_end = 0; 121 phys_addr_t last_end = ISA_END_ADDRESS;
122 unsigned long released = 0; 122 unsigned long released = 0;
123 int i; 123 int i;
124 124
125 /* Free any unused memory above the low 1Mbyte. */
125 for (i = 0; i < e820->nr_map && last_end < max_addr; i++) { 126 for (i = 0; i < e820->nr_map && last_end < max_addr; i++) {
126 phys_addr_t end = e820->map[i].addr; 127 phys_addr_t end = e820->map[i].addr;
127 end = min(max_addr, end); 128 end = min(max_addr, end);
128 129
129 released += xen_release_chunk(last_end, end); 130 if (last_end < end)
130 last_end = e820->map[i].addr + e820->map[i].size; 131 released += xen_release_chunk(last_end, end);
132 last_end = max(last_end, e820->map[i].addr + e820->map[i].size);
131 } 133 }
132 134
133 if (last_end < max_addr) 135 if (last_end < max_addr)
@@ -164,6 +166,7 @@ char * __init xen_memory_setup(void)
164 XENMEM_memory_map; 166 XENMEM_memory_map;
165 rc = HYPERVISOR_memory_op(op, &memmap); 167 rc = HYPERVISOR_memory_op(op, &memmap);
166 if (rc == -ENOSYS) { 168 if (rc == -ENOSYS) {
169 BUG_ON(xen_initial_domain());
167 memmap.nr_entries = 1; 170 memmap.nr_entries = 1;
168 map[0].addr = 0ULL; 171 map[0].addr = 0ULL;
169 map[0].size = mem_end; 172 map[0].size = mem_end;
@@ -201,12 +204,13 @@ char * __init xen_memory_setup(void)
201 } 204 }
202 205
203 /* 206 /*
204 * Even though this is normal, usable memory under Xen, reserve 207 * In domU, the ISA region is normal, usable memory, but we
205 * ISA memory anyway because too many things think they can poke 208 * reserve ISA memory anyway because too many things poke
206 * about in there. 209 * about in there.
207 * 210 *
208 * In a dom0 kernel, this region is identity mapped with the 211 * In Dom0, the host E820 information can leave gaps in the
209 * hardware ISA area, so it really is out of bounds. 212 * ISA range, which would cause us to release those pages. To
213 * avoid this, we unconditionally reserve them here.
210 */ 214 */
211 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, 215 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
212 E820_RESERVED); 216 E820_RESERVED);
diff --git a/block/blk-core.c b/block/blk-core.c
index f0834e2f572..4ce953f1b39 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1194,13 +1194,6 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1194 int where = ELEVATOR_INSERT_SORT; 1194 int where = ELEVATOR_INSERT_SORT;
1195 int rw_flags; 1195 int rw_flags;
1196 1196
1197 /* REQ_HARDBARRIER is no more */
1198 if (WARN_ONCE(bio->bi_rw & REQ_HARDBARRIER,
1199 "block: HARDBARRIER is deprecated, use FLUSH/FUA instead\n")) {
1200 bio_endio(bio, -EOPNOTSUPP);
1201 return 0;
1202 }
1203
1204 /* 1197 /*
1205 * low level driver can indicate that it wants pages above a 1198 * low level driver can indicate that it wants pages above a
1206 * certain limit bounced to low memory (ie for highmem, or even 1199 * certain limit bounced to low memory (ie for highmem, or even
@@ -1351,7 +1344,7 @@ static void handle_bad_sector(struct bio *bio)
1351 bdevname(bio->bi_bdev, b), 1344 bdevname(bio->bi_bdev, b),
1352 bio->bi_rw, 1345 bio->bi_rw,
1353 (unsigned long long)bio->bi_sector + bio_sectors(bio), 1346 (unsigned long long)bio->bi_sector + bio_sectors(bio),
1354 (long long)(bio->bi_bdev->bd_inode->i_size >> 9)); 1347 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
1355 1348
1356 set_bit(BIO_EOF, &bio->bi_flags); 1349 set_bit(BIO_EOF, &bio->bi_flags);
1357} 1350}
@@ -1404,7 +1397,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1404 return 0; 1397 return 0;
1405 1398
1406 /* Test device or partition size, when known. */ 1399 /* Test device or partition size, when known. */
1407 maxsector = bio->bi_bdev->bd_inode->i_size >> 9; 1400 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
1408 if (maxsector) { 1401 if (maxsector) {
1409 sector_t sector = bio->bi_sector; 1402 sector_t sector = bio->bi_sector;
1410 1403
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index d22c4c55c40..3c7a339fe38 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -153,20 +153,6 @@ struct io_context *get_io_context(gfp_t gfp_flags, int node)
153} 153}
154EXPORT_SYMBOL(get_io_context); 154EXPORT_SYMBOL(get_io_context);
155 155
156void copy_io_context(struct io_context **pdst, struct io_context **psrc)
157{
158 struct io_context *src = *psrc;
159 struct io_context *dst = *pdst;
160
161 if (src) {
162 BUG_ON(atomic_long_read(&src->refcount) == 0);
163 atomic_long_inc(&src->refcount);
164 put_io_context(dst);
165 *pdst = src;
166 }
167}
168EXPORT_SYMBOL(copy_io_context);
169
170static int __init blk_ioc_init(void) 156static int __init blk_ioc_init(void)
171{ 157{
172 iocontext_cachep = kmem_cache_create("blkdev_ioc", 158 iocontext_cachep = kmem_cache_create("blkdev_ioc",
diff --git a/block/blk-map.c b/block/blk-map.c
index d4a586d8691..5d5dbe47c22 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -205,6 +205,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
205 unaligned = 1; 205 unaligned = 1;
206 break; 206 break;
207 } 207 }
208 if (!iov[i].iov_len)
209 return -EINVAL;
208 } 210 }
209 211
210 if (unaligned || (q->dma_pad_mask & len) || map_data) 212 if (unaligned || (q->dma_pad_mask & len) || map_data)
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 119f07b74dc..58c6ee5b010 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -744,13 +744,13 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
744 bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; 744 bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
745 return 0; 745 return 0;
746 case BLKGETSIZE: 746 case BLKGETSIZE:
747 size = bdev->bd_inode->i_size; 747 size = i_size_read(bdev->bd_inode);
748 if ((size >> 9) > ~0UL) 748 if ((size >> 9) > ~0UL)
749 return -EFBIG; 749 return -EFBIG;
750 return compat_put_ulong(arg, size >> 9); 750 return compat_put_ulong(arg, size >> 9);
751 751
752 case BLKGETSIZE64_32: 752 case BLKGETSIZE64_32:
753 return compat_put_u64(arg, bdev->bd_inode->i_size); 753 return compat_put_u64(arg, i_size_read(bdev->bd_inode));
754 754
755 case BLKTRACESETUP32: 755 case BLKTRACESETUP32:
756 case BLKTRACESTART: /* compatible */ 756 case BLKTRACESTART: /* compatible */
diff --git a/block/elevator.c b/block/elevator.c
index 282e8308f7e..2569512830d 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -429,7 +429,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
429 q->nr_sorted--; 429 q->nr_sorted--;
430 430
431 boundary = q->end_sector; 431 boundary = q->end_sector;
432 stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED; 432 stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
433 list_for_each_prev(entry, &q->queue_head) { 433 list_for_each_prev(entry, &q->queue_head) {
434 struct request *pos = list_entry_rq(entry); 434 struct request *pos = list_entry_rq(entry);
435 435
@@ -691,7 +691,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
691void __elv_add_request(struct request_queue *q, struct request *rq, int where, 691void __elv_add_request(struct request_queue *q, struct request *rq, int where,
692 int plug) 692 int plug)
693{ 693{
694 if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { 694 if (rq->cmd_flags & REQ_SOFTBARRIER) {
695 /* barriers are scheduling boundary, update end_sector */ 695 /* barriers are scheduling boundary, update end_sector */
696 if (rq->cmd_type == REQ_TYPE_FS || 696 if (rq->cmd_type == REQ_TYPE_FS ||
697 (rq->cmd_flags & REQ_DISCARD)) { 697 (rq->cmd_flags & REQ_DISCARD)) {
diff --git a/block/ioctl.c b/block/ioctl.c
index d724ceb1d46..3d866d0037f 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -125,7 +125,7 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
125 start >>= 9; 125 start >>= 9;
126 len >>= 9; 126 len >>= 9;
127 127
128 if (start + len > (bdev->bd_inode->i_size >> 9)) 128 if (start + len > (i_size_read(bdev->bd_inode) >> 9))
129 return -EINVAL; 129 return -EINVAL;
130 if (secure) 130 if (secure)
131 flags |= BLKDEV_DISCARD_SECURE; 131 flags |= BLKDEV_DISCARD_SECURE;
@@ -242,6 +242,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
242 * We need to set the startsect first, the driver may 242 * We need to set the startsect first, the driver may
243 * want to override it. 243 * want to override it.
244 */ 244 */
245 memset(&geo, 0, sizeof(geo));
245 geo.start = get_start_sect(bdev); 246 geo.start = get_start_sect(bdev);
246 ret = disk->fops->getgeo(bdev, &geo); 247 ret = disk->fops->getgeo(bdev, &geo);
247 if (ret) 248 if (ret)
@@ -307,12 +308,12 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
307 ret = blkdev_reread_part(bdev); 308 ret = blkdev_reread_part(bdev);
308 break; 309 break;
309 case BLKGETSIZE: 310 case BLKGETSIZE:
310 size = bdev->bd_inode->i_size; 311 size = i_size_read(bdev->bd_inode);
311 if ((size >> 9) > ~0UL) 312 if ((size >> 9) > ~0UL)
312 return -EFBIG; 313 return -EFBIG;
313 return put_ulong(arg, size >> 9); 314 return put_ulong(arg, size >> 9);
314 case BLKGETSIZE64: 315 case BLKGETSIZE64:
315 return put_u64(arg, bdev->bd_inode->i_size); 316 return put_u64(arg, i_size_read(bdev->bd_inode));
316 case BLKTRACESTART: 317 case BLKTRACESTART:
317 case BLKTRACESTOP: 318 case BLKTRACESTOP:
318 case BLKTRACESETUP: 319 case BLKTRACESETUP:
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index a8b5a10eb5b..4f4230b79bb 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -321,33 +321,47 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
321 if (hdr->iovec_count) { 321 if (hdr->iovec_count) {
322 const int size = sizeof(struct sg_iovec) * hdr->iovec_count; 322 const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
323 size_t iov_data_len; 323 size_t iov_data_len;
324 struct sg_iovec *iov; 324 struct sg_iovec *sg_iov;
325 struct iovec *iov;
326 int i;
325 327
326 iov = kmalloc(size, GFP_KERNEL); 328 sg_iov = kmalloc(size, GFP_KERNEL);
327 if (!iov) { 329 if (!sg_iov) {
328 ret = -ENOMEM; 330 ret = -ENOMEM;
329 goto out; 331 goto out;
330 } 332 }
331 333
332 if (copy_from_user(iov, hdr->dxferp, size)) { 334 if (copy_from_user(sg_iov, hdr->dxferp, size)) {
333 kfree(iov); 335 kfree(sg_iov);
334 ret = -EFAULT; 336 ret = -EFAULT;
335 goto out; 337 goto out;
336 } 338 }
337 339
340 /*
341 * Sum up the vecs, making sure they don't overflow
342 */
343 iov = (struct iovec *) sg_iov;
344 iov_data_len = 0;
345 for (i = 0; i < hdr->iovec_count; i++) {
346 if (iov_data_len + iov[i].iov_len < iov_data_len) {
347 kfree(sg_iov);
348 ret = -EINVAL;
349 goto out;
350 }
351 iov_data_len += iov[i].iov_len;
352 }
353
338 /* SG_IO howto says that the shorter of the two wins */ 354 /* SG_IO howto says that the shorter of the two wins */
339 iov_data_len = iov_length((struct iovec *)iov,
340 hdr->iovec_count);
341 if (hdr->dxfer_len < iov_data_len) { 355 if (hdr->dxfer_len < iov_data_len) {
342 hdr->iovec_count = iov_shorten((struct iovec *)iov, 356 hdr->iovec_count = iov_shorten(iov,
343 hdr->iovec_count, 357 hdr->iovec_count,
344 hdr->dxfer_len); 358 hdr->dxfer_len);
345 iov_data_len = hdr->dxfer_len; 359 iov_data_len = hdr->dxfer_len;
346 } 360 }
347 361
348 ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count, 362 ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
349 iov_data_len, GFP_KERNEL); 363 iov_data_len, GFP_KERNEL);
350 kfree(iov); 364 kfree(sg_iov);
351 } else if (hdr->dxfer_len) 365 } else if (hdr->dxfer_len)
352 ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len, 366 ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
353 GFP_KERNEL); 367 GFP_KERNEL);
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index de3078215fe..75586f1f86e 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -504,7 +504,6 @@ err:
504 504
505static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) 505static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
506{ 506{
507 kobject_put(&pcrypt->pinst->kobj);
508 free_cpumask_var(pcrypt->cb_cpumask->mask); 507 free_cpumask_var(pcrypt->cb_cpumask->mask);
509 kfree(pcrypt->cb_cpumask); 508 kfree(pcrypt->cb_cpumask);
510 509
diff --git a/drivers/Makefile b/drivers/Makefile
index 14cf9077bb2..f3ebb30f1b7 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_REGULATOR) += regulator/
26 26
27# char/ comes before serial/ etc so that the VT console is the boot-time 27# char/ comes before serial/ etc so that the VT console is the boot-time
28# default. 28# default.
29obj-y += tty/
29obj-y += char/ 30obj-y += char/
30 31
31# gpu/ comes after char for AGP vs DRM startup 32# gpu/ comes after char for AGP vs DRM startup
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
index 6355b575ee5..5df67f1d6c6 100644
--- a/drivers/acpi/debugfs.c
+++ b/drivers/acpi/debugfs.c
@@ -80,7 +80,7 @@ int __init acpi_debugfs_init(void)
80 if (!acpi_dir) 80 if (!acpi_dir)
81 goto err; 81 goto err;
82 82
83 cm_dentry = debugfs_create_file("custom_method", S_IWUGO, 83 cm_dentry = debugfs_create_file("custom_method", S_IWUSR,
84 acpi_dir, NULL, &cm_fops); 84 acpi_dir, NULL, &cm_fops);
85 if (!cm_dentry) 85 if (!cm_dentry)
86 goto err; 86 goto err;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index d050e073e57..3f91c01c217 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2552,8 +2552,11 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2552 * 2552 *
2553 * If door lock fails, always clear sdev->locked to 2553 * If door lock fails, always clear sdev->locked to
2554 * avoid this infinite loop. 2554 * avoid this infinite loop.
2555 *
2556 * This may happen before SCSI scan is complete. Make
2557 * sure qc->dev->sdev isn't NULL before dereferencing.
2555 */ 2558 */
2556 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL) 2559 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev)
2557 qc->dev->sdev->locked = 0; 2560 qc->dev->sdev->locked = 0;
2558 2561
2559 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; 2562 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index eaf194138f2..6bd9425ba5a 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -142,7 +142,7 @@ static int autospeed; /* Chip present which snoops speed changes */
142static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */ 142static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */
143static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */ 143static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
144 144
145#ifdef PATA_WINBOND_VLB_MODULE 145#ifdef CONFIG_PATA_WINBOND_VLB_MODULE
146static int winbond = 1; /* Set to probe Winbond controllers, 146static int winbond = 1; /* Set to probe Winbond controllers,
147 give I/O port if non standard */ 147 give I/O port if non standard */
148#else 148#else
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 74b82981789..fa1b95a9a7f 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -653,8 +653,6 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
653 653
654 ap = host->ports[i]; 654 ap = host->ports[i];
655 ocd = ap->dev->platform_data; 655 ocd = ap->dev->platform_data;
656
657 ocd = ap->dev->platform_data;
658 cf_port = ap->private_data; 656 cf_port = ap->private_data;
659 dma_int.u64 = 657 dma_int.u64 =
660 cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine)); 658 cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine));
diff --git a/drivers/atm/solos-attrlist.c b/drivers/atm/solos-attrlist.c
index 1a9332e4efe..9a676ee3082 100644
--- a/drivers/atm/solos-attrlist.c
+++ b/drivers/atm/solos-attrlist.c
@@ -1,6 +1,7 @@
1SOLOS_ATTR_RO(DriverVersion) 1SOLOS_ATTR_RO(DriverVersion)
2SOLOS_ATTR_RO(APIVersion) 2SOLOS_ATTR_RO(APIVersion)
3SOLOS_ATTR_RO(FirmwareVersion) 3SOLOS_ATTR_RO(FirmwareVersion)
4SOLOS_ATTR_RO(Version)
4// SOLOS_ATTR_RO(DspVersion) 5// SOLOS_ATTR_RO(DspVersion)
5// SOLOS_ATTR_RO(CommonHandshake) 6// SOLOS_ATTR_RO(CommonHandshake)
6SOLOS_ATTR_RO(Connected) 7SOLOS_ATTR_RO(Connected)
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index f46138ab38b..2e08c996fd3 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -1161,6 +1161,14 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
1161 dev_info(&dev->dev, "Solos FPGA Version %d.%02d svn-%d\n", 1161 dev_info(&dev->dev, "Solos FPGA Version %d.%02d svn-%d\n",
1162 major_ver, minor_ver, fpga_ver); 1162 major_ver, minor_ver, fpga_ver);
1163 1163
1164 if (fpga_ver < 37 && (fpga_upgrade || firmware_upgrade ||
1165 db_fpga_upgrade || db_firmware_upgrade)) {
1166 dev_warn(&dev->dev,
1167 "FPGA too old; cannot upgrade flash. Use JTAG.\n");
1168 fpga_upgrade = firmware_upgrade = 0;
1169 db_fpga_upgrade = db_firmware_upgrade = 0;
1170 }
1171
1164 if (card->fpga_version >= DMA_SUPPORTED){ 1172 if (card->fpga_version >= DMA_SUPPORTED){
1165 card->using_dma = 1; 1173 card->using_dma = 1;
1166 } else { 1174 } else {
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 541e1887996..528f6318ded 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -180,9 +180,6 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
180 BUG(); 180 BUG();
181 bio_endio(bio, -ENXIO); 181 bio_endio(bio, -ENXIO);
182 return 0; 182 return 0;
183 } else if (bio->bi_rw & REQ_HARDBARRIER) {
184 bio_endio(bio, -EOPNOTSUPP);
185 return 0;
186 } else if (bio->bi_io_vec == NULL) { 183 } else if (bio->bi_io_vec == NULL) {
187 printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); 184 printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
188 BUG(); 185 BUG();
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 2cc4dda4627..a67d0a611a8 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -113,6 +113,8 @@ static struct board_type products[] = {
113 {0x409D0E11, "Smart Array 6400 EM", &SA5_access}, 113 {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
114 {0x40910E11, "Smart Array 6i", &SA5_access}, 114 {0x40910E11, "Smart Array 6i", &SA5_access},
115 {0x3225103C, "Smart Array P600", &SA5_access}, 115 {0x3225103C, "Smart Array P600", &SA5_access},
116 {0x3223103C, "Smart Array P800", &SA5_access},
117 {0x3234103C, "Smart Array P400", &SA5_access},
116 {0x3235103C, "Smart Array P400i", &SA5_access}, 118 {0x3235103C, "Smart Array P400i", &SA5_access},
117 {0x3211103C, "Smart Array E200i", &SA5_access}, 119 {0x3211103C, "Smart Array E200i", &SA5_access},
118 {0x3212103C, "Smart Array E200", &SA5_access}, 120 {0x3212103C, "Smart Array E200", &SA5_access},
@@ -3753,7 +3755,7 @@ static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h)
3753 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 3755 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3754 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) 3756 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3755 break; 3757 break;
3756 msleep(10); 3758 usleep_range(10000, 20000);
3757 } 3759 }
3758} 3760}
3759 3761
@@ -3937,10 +3939,9 @@ static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
3937 *board_id = ((subsystem_device_id << 16) & 0xffff0000) | 3939 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
3938 subsystem_vendor_id; 3940 subsystem_vendor_id;
3939 3941
3940 for (i = 0; i < ARRAY_SIZE(products); i++) { 3942 for (i = 0; i < ARRAY_SIZE(products); i++)
3941 if (*board_id == products[i].board_id) 3943 if (*board_id == products[i].board_id)
3942 return i; 3944 return i;
3943 }
3944 dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n", 3945 dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
3945 *board_id); 3946 *board_id);
3946 return -ENODEV; 3947 return -ENODEV;
@@ -3971,18 +3972,31 @@ static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
3971 return -ENODEV; 3972 return -ENODEV;
3972} 3973}
3973 3974
3974static int __devinit cciss_wait_for_board_ready(ctlr_info_t *h) 3975static int __devinit cciss_wait_for_board_state(struct pci_dev *pdev,
3976 void __iomem *vaddr, int wait_for_ready)
3977#define BOARD_READY 1
3978#define BOARD_NOT_READY 0
3975{ 3979{
3976 int i; 3980 int i, iterations;
3977 u32 scratchpad; 3981 u32 scratchpad;
3978 3982
3979 for (i = 0; i < CCISS_BOARD_READY_ITERATIONS; i++) { 3983 if (wait_for_ready)
3980 scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 3984 iterations = CCISS_BOARD_READY_ITERATIONS;
3981 if (scratchpad == CCISS_FIRMWARE_READY) 3985 else
3982 return 0; 3986 iterations = CCISS_BOARD_NOT_READY_ITERATIONS;
3987
3988 for (i = 0; i < iterations; i++) {
3989 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
3990 if (wait_for_ready) {
3991 if (scratchpad == CCISS_FIRMWARE_READY)
3992 return 0;
3993 } else {
3994 if (scratchpad != CCISS_FIRMWARE_READY)
3995 return 0;
3996 }
3983 msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS); 3997 msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS);
3984 } 3998 }
3985 dev_warn(&h->pdev->dev, "board not ready, timed out.\n"); 3999 dev_warn(&pdev->dev, "board not ready, timed out.\n");
3986 return -ENODEV; 4000 return -ENODEV;
3987} 4001}
3988 4002
@@ -4031,6 +4045,11 @@ static int __devinit cciss_find_cfgtables(ctlr_info_t *h)
4031static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h) 4045static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
4032{ 4046{
4033 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 4047 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
4048
4049 /* Limit commands in memory limited kdump scenario. */
4050 if (reset_devices && h->max_commands > 32)
4051 h->max_commands = 32;
4052
4034 if (h->max_commands < 16) { 4053 if (h->max_commands < 16) {
4035 dev_warn(&h->pdev->dev, "Controller reports " 4054 dev_warn(&h->pdev->dev, "Controller reports "
4036 "max supported commands of %d, an obvious lie. " 4055 "max supported commands of %d, an obvious lie. "
@@ -4148,7 +4167,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
4148 err = -ENOMEM; 4167 err = -ENOMEM;
4149 goto err_out_free_res; 4168 goto err_out_free_res;
4150 } 4169 }
4151 err = cciss_wait_for_board_ready(h); 4170 err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
4152 if (err) 4171 if (err)
4153 goto err_out_free_res; 4172 goto err_out_free_res;
4154 err = cciss_find_cfgtables(h); 4173 err = cciss_find_cfgtables(h);
@@ -4313,36 +4332,6 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
4313#define cciss_soft_reset_controller(p) cciss_message(p, 1, 0) 4332#define cciss_soft_reset_controller(p) cciss_message(p, 1, 0)
4314#define cciss_noop(p) cciss_message(p, 3, 0) 4333#define cciss_noop(p) cciss_message(p, 3, 0)
4315 4334
4316static __devinit int cciss_reset_msi(struct pci_dev *pdev)
4317{
4318/* the #defines are stolen from drivers/pci/msi.h. */
4319#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
4320#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
4321
4322 int pos;
4323 u16 control = 0;
4324
4325 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
4326 if (pos) {
4327 pci_read_config_word(pdev, msi_control_reg(pos), &control);
4328 if (control & PCI_MSI_FLAGS_ENABLE) {
4329 dev_info(&pdev->dev, "resetting MSI\n");
4330 pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE);
4331 }
4332 }
4333
4334 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
4335 if (pos) {
4336 pci_read_config_word(pdev, msi_control_reg(pos), &control);
4337 if (control & PCI_MSIX_FLAGS_ENABLE) {
4338 dev_info(&pdev->dev, "resetting MSI-X\n");
4339 pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE);
4340 }
4341 }
4342
4343 return 0;
4344}
4345
4346static int cciss_controller_hard_reset(struct pci_dev *pdev, 4335static int cciss_controller_hard_reset(struct pci_dev *pdev,
4347 void * __iomem vaddr, bool use_doorbell) 4336 void * __iomem vaddr, bool use_doorbell)
4348{ 4337{
@@ -4397,17 +4386,17 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev,
4397 * states or using the doorbell register. */ 4386 * states or using the doorbell register. */
4398static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) 4387static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4399{ 4388{
4400 u16 saved_config_space[32];
4401 u64 cfg_offset; 4389 u64 cfg_offset;
4402 u32 cfg_base_addr; 4390 u32 cfg_base_addr;
4403 u64 cfg_base_addr_index; 4391 u64 cfg_base_addr_index;
4404 void __iomem *vaddr; 4392 void __iomem *vaddr;
4405 unsigned long paddr; 4393 unsigned long paddr;
4406 u32 misc_fw_support, active_transport; 4394 u32 misc_fw_support, active_transport;
4407 int rc, i; 4395 int rc;
4408 CfgTable_struct __iomem *cfgtable; 4396 CfgTable_struct __iomem *cfgtable;
4409 bool use_doorbell; 4397 bool use_doorbell;
4410 u32 board_id; 4398 u32 board_id;
4399 u16 command_register;
4411 4400
4412 /* For controllers as old a the p600, this is very nearly 4401 /* For controllers as old a the p600, this is very nearly
4413 * the same thing as 4402 * the same thing as
@@ -4417,14 +4406,6 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4417 * pci_set_power_state(pci_dev, PCI_D0); 4406 * pci_set_power_state(pci_dev, PCI_D0);
4418 * pci_restore_state(pci_dev); 4407 * pci_restore_state(pci_dev);
4419 * 4408 *
4420 * but we can't use these nice canned kernel routines on
4421 * kexec, because they also check the MSI/MSI-X state in PCI
4422 * configuration space and do the wrong thing when it is
4423 * set/cleared. Also, the pci_save/restore_state functions
4424 * violate the ordering requirements for restoring the
4425 * configuration space from the CCISS document (see the
4426 * comment below). So we roll our own ....
4427 *
4428 * For controllers newer than the P600, the pci power state 4409 * For controllers newer than the P600, the pci power state
4429 * method of resetting doesn't work so we have another way 4410 * method of resetting doesn't work so we have another way
4430 * using the doorbell register. 4411 * using the doorbell register.
@@ -4443,8 +4424,13 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4443 return -ENODEV; 4424 return -ENODEV;
4444 } 4425 }
4445 4426
4446 for (i = 0; i < 32; i++) 4427 /* Save the PCI command register */
4447 pci_read_config_word(pdev, 2*i, &saved_config_space[i]); 4428 pci_read_config_word(pdev, 4, &command_register);
4429 /* Turn the board off. This is so that later pci_restore_state()
4430 * won't turn the board on before the rest of config space is ready.
4431 */
4432 pci_disable_device(pdev);
4433 pci_save_state(pdev);
4448 4434
4449 /* find the first memory BAR, so we can find the cfg table */ 4435 /* find the first memory BAR, so we can find the cfg table */
4450 rc = cciss_pci_find_memory_BAR(pdev, &paddr); 4436 rc = cciss_pci_find_memory_BAR(pdev, &paddr);
@@ -4479,26 +4465,32 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4479 rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); 4465 rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
4480 if (rc) 4466 if (rc)
4481 goto unmap_cfgtable; 4467 goto unmap_cfgtable;
4482 4468 pci_restore_state(pdev);
4483 /* Restore the PCI configuration space. The Open CISS 4469 rc = pci_enable_device(pdev);
4484 * Specification says, "Restore the PCI Configuration 4470 if (rc) {
4485 * Registers, offsets 00h through 60h. It is important to 4471 dev_warn(&pdev->dev, "failed to enable device.\n");
4486 * restore the command register, 16-bits at offset 04h, 4472 goto unmap_cfgtable;
4487 * last. Do not restore the configuration status register,
4488 * 16-bits at offset 06h." Note that the offset is 2*i.
4489 */
4490 for (i = 0; i < 32; i++) {
4491 if (i == 2 || i == 3)
4492 continue;
4493 pci_write_config_word(pdev, 2*i, saved_config_space[i]);
4494 } 4473 }
4495 wmb(); 4474 pci_write_config_word(pdev, 4, command_register);
4496 pci_write_config_word(pdev, 4, saved_config_space[2]);
4497 4475
4498 /* Some devices (notably the HP Smart Array 5i Controller) 4476 /* Some devices (notably the HP Smart Array 5i Controller)
4499 need a little pause here */ 4477 need a little pause here */
4500 msleep(CCISS_POST_RESET_PAUSE_MSECS); 4478 msleep(CCISS_POST_RESET_PAUSE_MSECS);
4501 4479
4480 /* Wait for board to become not ready, then ready. */
4481 dev_info(&pdev->dev, "Waiting for board to become ready.\n");
4482 rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
4483 if (rc) /* Don't bail, might be E500, etc. which can't be reset */
4484 dev_warn(&pdev->dev,
4485 "failed waiting for board to become not ready\n");
4486 rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY);
4487 if (rc) {
4488 dev_warn(&pdev->dev,
4489 "failed waiting for board to become ready\n");
4490 goto unmap_cfgtable;
4491 }
4492 dev_info(&pdev->dev, "board ready.\n");
4493
4502 /* Controller should be in simple mode at this point. If it's not, 4494 /* Controller should be in simple mode at this point. If it's not,
4503 * It means we're on one of those controllers which doesn't support 4495 * It means we're on one of those controllers which doesn't support
4504 * the doorbell reset method and on which the PCI power management reset 4496 * the doorbell reset method and on which the PCI power management reset
@@ -4539,8 +4531,6 @@ static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
4539 return 0; /* just try to do the kdump anyhow. */ 4531 return 0; /* just try to do the kdump anyhow. */
4540 if (rc) 4532 if (rc)
4541 return -ENODEV; 4533 return -ENODEV;
4542 if (cciss_reset_msi(pdev))
4543 return -ENODEV;
4544 4534
4545 /* Now try to get the controller to respond to a no-op */ 4535 /* Now try to get the controller to respond to a no-op */
4546 for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) { 4536 for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) {
@@ -4936,7 +4926,8 @@ static void __exit cciss_cleanup(void)
4936 } 4926 }
4937 } 4927 }
4938 kthread_stop(cciss_scan_thread); 4928 kthread_stop(cciss_scan_thread);
4939 remove_proc_entry("driver/cciss", NULL); 4929 if (proc_cciss)
4930 remove_proc_entry("driver/cciss", NULL);
4940 bus_unregister(&cciss_bus_type); 4931 bus_unregister(&cciss_bus_type);
4941} 4932}
4942 4933
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index ae340ffc8f8..4b8933d778f 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -200,10 +200,14 @@ struct ctlr_info
200 * the above. 200 * the above.
201 */ 201 */
202#define CCISS_BOARD_READY_WAIT_SECS (120) 202#define CCISS_BOARD_READY_WAIT_SECS (120)
203#define CCISS_BOARD_NOT_READY_WAIT_SECS (10)
203#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100) 204#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100)
204#define CCISS_BOARD_READY_ITERATIONS \ 205#define CCISS_BOARD_READY_ITERATIONS \
205 ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \ 206 ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \
206 CCISS_BOARD_READY_POLL_INTERVAL_MSECS) 207 CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
208#define CCISS_BOARD_NOT_READY_ITERATIONS \
209 ((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \
210 CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
207#define CCISS_POST_RESET_PAUSE_MSECS (3000) 211#define CCISS_POST_RESET_PAUSE_MSECS (3000)
208#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000) 212#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000)
209#define CCISS_POST_RESET_NOOP_RETRIES (12) 213#define CCISS_POST_RESET_NOOP_RETRIES (12)
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index ac04ef97eac..ba95cba192b 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -78,11 +78,10 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
78 init_completion(&md_io.event); 78 init_completion(&md_io.event);
79 md_io.error = 0; 79 md_io.error = 0;
80 80
81 if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags)) 81 if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
82 rw |= REQ_HARDBARRIER; 82 rw |= REQ_FUA;
83 rw |= REQ_UNPLUG | REQ_SYNC; 83 rw |= REQ_UNPLUG | REQ_SYNC;
84 84
85 retry:
86 bio = bio_alloc(GFP_NOIO, 1); 85 bio = bio_alloc(GFP_NOIO, 1);
87 bio->bi_bdev = bdev->md_bdev; 86 bio->bi_bdev = bdev->md_bdev;
88 bio->bi_sector = sector; 87 bio->bi_sector = sector;
@@ -100,17 +99,6 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
100 wait_for_completion(&md_io.event); 99 wait_for_completion(&md_io.event);
101 ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0; 100 ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;
102 101
103 /* check for unsupported barrier op.
104 * would rather check on EOPNOTSUPP, but that is not reliable.
105 * don't try again for ANY return value != 0 */
106 if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) {
107 /* Try again with no barrier */
108 dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
109 set_bit(MD_NO_BARRIER, &mdev->flags);
110 rw &= ~REQ_HARDBARRIER;
111 bio_put(bio);
112 goto retry;
113 }
114 out: 102 out:
115 bio_put(bio); 103 bio_put(bio);
116 return ok; 104 return ok;
@@ -284,18 +272,32 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
284 u32 xor_sum = 0; 272 u32 xor_sum = 0;
285 273
286 if (!get_ldev(mdev)) { 274 if (!get_ldev(mdev)) {
287 dev_err(DEV, "get_ldev() failed in w_al_write_transaction\n"); 275 dev_err(DEV,
276 "disk is %s, cannot start al transaction (-%d +%d)\n",
277 drbd_disk_str(mdev->state.disk), evicted, new_enr);
288 complete(&((struct update_al_work *)w)->event); 278 complete(&((struct update_al_work *)w)->event);
289 return 1; 279 return 1;
290 } 280 }
291 /* do we have to do a bitmap write, first? 281 /* do we have to do a bitmap write, first?
292 * TODO reduce maximum latency: 282 * TODO reduce maximum latency:
293 * submit both bios, then wait for both, 283 * submit both bios, then wait for both,
294 * instead of doing two synchronous sector writes. */ 284 * instead of doing two synchronous sector writes.
285 * For now, we must not write the transaction,
286 * if we cannot write out the bitmap of the evicted extent. */
295 if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE) 287 if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE)
296 drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT); 288 drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT);
297 289
298 mutex_lock(&mdev->md_io_mutex); /* protects md_io_page, al_tr_cycle, ... */ 290 /* The bitmap write may have failed, causing a state change. */
291 if (mdev->state.disk < D_INCONSISTENT) {
292 dev_err(DEV,
293 "disk is %s, cannot write al transaction (-%d +%d)\n",
294 drbd_disk_str(mdev->state.disk), evicted, new_enr);
295 complete(&((struct update_al_work *)w)->event);
296 put_ldev(mdev);
297 return 1;
298 }
299
300 mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */
299 buffer = (struct al_transaction *)page_address(mdev->md_io_page); 301 buffer = (struct al_transaction *)page_address(mdev->md_io_page);
300 302
301 buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC); 303 buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
@@ -739,7 +741,7 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev)
739 unsigned int enr; 741 unsigned int enr;
740 unsigned long add = 0; 742 unsigned long add = 0;
741 char ppb[10]; 743 char ppb[10];
742 int i; 744 int i, tmp;
743 745
744 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 746 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
745 747
@@ -747,7 +749,9 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev)
747 enr = lc_element_by_index(mdev->act_log, i)->lc_number; 749 enr = lc_element_by_index(mdev->act_log, i)->lc_number;
748 if (enr == LC_FREE) 750 if (enr == LC_FREE)
749 continue; 751 continue;
750 add += drbd_bm_ALe_set_all(mdev, enr); 752 tmp = drbd_bm_ALe_set_all(mdev, enr);
753 dynamic_dev_dbg(DEV, "AL: set %d bits in extent %u\n", tmp, enr);
754 add += tmp;
751 } 755 }
752 756
753 lc_unlock(mdev->act_log); 757 lc_unlock(mdev->act_log);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 9bdcf4393c0..1ea1a34e78b 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -114,11 +114,11 @@ struct drbd_conf;
114#define D_ASSERT(exp) if (!(exp)) \ 114#define D_ASSERT(exp) if (!(exp)) \
115 dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__) 115 dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
116 116
117#define ERR_IF(exp) if (({ \ 117#define ERR_IF(exp) if (({ \
118 int _b = (exp) != 0; \ 118 int _b = (exp) != 0; \
119 if (_b) dev_err(DEV, "%s: (%s) in %s:%d\n", \ 119 if (_b) dev_err(DEV, "ASSERT FAILED: %s: (%s) in %s:%d\n", \
120 __func__, #exp, __FILE__, __LINE__); \ 120 __func__, #exp, __FILE__, __LINE__); \
121 _b; \ 121 _b; \
122 })) 122 }))
123 123
124/* Defines to control fault insertion */ 124/* Defines to control fault insertion */
@@ -749,17 +749,12 @@ struct drbd_epoch {
749 749
750/* drbd_epoch flag bits */ 750/* drbd_epoch flag bits */
751enum { 751enum {
752 DE_BARRIER_IN_NEXT_EPOCH_ISSUED,
753 DE_BARRIER_IN_NEXT_EPOCH_DONE,
754 DE_CONTAINS_A_BARRIER,
755 DE_HAVE_BARRIER_NUMBER, 752 DE_HAVE_BARRIER_NUMBER,
756 DE_IS_FINISHING,
757}; 753};
758 754
759enum epoch_event { 755enum epoch_event {
760 EV_PUT, 756 EV_PUT,
761 EV_GOT_BARRIER_NR, 757 EV_GOT_BARRIER_NR,
762 EV_BARRIER_DONE,
763 EV_BECAME_LAST, 758 EV_BECAME_LAST,
764 EV_CLEANUP = 32, /* used as flag */ 759 EV_CLEANUP = 32, /* used as flag */
765}; 760};
@@ -801,11 +796,6 @@ enum {
801 __EE_CALL_AL_COMPLETE_IO, 796 __EE_CALL_AL_COMPLETE_IO,
802 __EE_MAY_SET_IN_SYNC, 797 __EE_MAY_SET_IN_SYNC,
803 798
804 /* This epoch entry closes an epoch using a barrier.
805 * On sucessful completion, the epoch is released,
806 * and the P_BARRIER_ACK send. */
807 __EE_IS_BARRIER,
808
809 /* In case a barrier failed, 799 /* In case a barrier failed,
810 * we need to resubmit without the barrier flag. */ 800 * we need to resubmit without the barrier flag. */
811 __EE_RESUBMITTED, 801 __EE_RESUBMITTED,
@@ -820,7 +810,6 @@ enum {
820}; 810};
821#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) 811#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
822#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) 812#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
823#define EE_IS_BARRIER (1<<__EE_IS_BARRIER)
824#define EE_RESUBMITTED (1<<__EE_RESUBMITTED) 813#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
825#define EE_WAS_ERROR (1<<__EE_WAS_ERROR) 814#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
826#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST) 815#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
@@ -843,16 +832,15 @@ enum {
843 * Gets cleared when the state.conn 832 * Gets cleared when the state.conn
844 * goes into C_CONNECTED state. */ 833 * goes into C_CONNECTED state. */
845 WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */ 834 WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */
846 NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */
847 CONSIDER_RESYNC, 835 CONSIDER_RESYNC,
848 836
849 MD_NO_BARRIER, /* meta data device does not support barriers, 837 MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */
850 so don't even try */
851 SUSPEND_IO, /* suspend application io */ 838 SUSPEND_IO, /* suspend application io */
852 BITMAP_IO, /* suspend application io; 839 BITMAP_IO, /* suspend application io;
853 once no more io in flight, start bitmap io */ 840 once no more io in flight, start bitmap io */
854 BITMAP_IO_QUEUED, /* Started bitmap IO */ 841 BITMAP_IO_QUEUED, /* Started bitmap IO */
855 GO_DISKLESS, /* Disk failed, local_cnt reached zero, we are going diskless */ 842 GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */
843 WAS_IO_ERROR, /* Local disk failed returned IO error */
856 RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ 844 RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
857 NET_CONGESTED, /* The data socket is congested */ 845 NET_CONGESTED, /* The data socket is congested */
858 846
@@ -947,7 +935,6 @@ enum write_ordering_e {
947 WO_none, 935 WO_none,
948 WO_drain_io, 936 WO_drain_io,
949 WO_bdev_flush, 937 WO_bdev_flush,
950 WO_bio_barrier
951}; 938};
952 939
953struct fifo_buffer { 940struct fifo_buffer {
@@ -1281,6 +1268,7 @@ extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
1281extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev); 1268extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
1282extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why); 1269extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why);
1283extern void drbd_go_diskless(struct drbd_conf *mdev); 1270extern void drbd_go_diskless(struct drbd_conf *mdev);
1271extern void drbd_ldev_destroy(struct drbd_conf *mdev);
1284 1272
1285 1273
1286/* Meta data layout 1274/* Meta data layout
@@ -1798,17 +1786,17 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
1798 case EP_PASS_ON: 1786 case EP_PASS_ON:
1799 if (!forcedetach) { 1787 if (!forcedetach) {
1800 if (__ratelimit(&drbd_ratelimit_state)) 1788 if (__ratelimit(&drbd_ratelimit_state))
1801 dev_err(DEV, "Local IO failed in %s." 1789 dev_err(DEV, "Local IO failed in %s.\n", where);
1802 "Passing error on...\n", where);
1803 break; 1790 break;
1804 } 1791 }
1805 /* NOTE fall through to detach case if forcedetach set */ 1792 /* NOTE fall through to detach case if forcedetach set */
1806 case EP_DETACH: 1793 case EP_DETACH:
1807 case EP_CALL_HELPER: 1794 case EP_CALL_HELPER:
1795 set_bit(WAS_IO_ERROR, &mdev->flags);
1808 if (mdev->state.disk > D_FAILED) { 1796 if (mdev->state.disk > D_FAILED) {
1809 _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL); 1797 _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
1810 dev_err(DEV, "Local IO failed in %s." 1798 dev_err(DEV,
1811 "Detaching...\n", where); 1799 "Local IO failed in %s. Detaching...\n", where);
1812 } 1800 }
1813 break; 1801 break;
1814 } 1802 }
@@ -1874,7 +1862,7 @@ static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1874static inline sector_t drbd_get_capacity(struct block_device *bdev) 1862static inline sector_t drbd_get_capacity(struct block_device *bdev)
1875{ 1863{
1876 /* return bdev ? get_capacity(bdev->bd_disk) : 0; */ 1864 /* return bdev ? get_capacity(bdev->bd_disk) : 0; */
1877 return bdev ? bdev->bd_inode->i_size >> 9 : 0; 1865 return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
1878} 1866}
1879 1867
1880/** 1868/**
@@ -2127,7 +2115,11 @@ static inline void put_ldev(struct drbd_conf *mdev)
2127 __release(local); 2115 __release(local);
2128 D_ASSERT(i >= 0); 2116 D_ASSERT(i >= 0);
2129 if (i == 0) { 2117 if (i == 0) {
2118 if (mdev->state.disk == D_DISKLESS)
2119 /* even internal references gone, safe to destroy */
2120 drbd_ldev_destroy(mdev);
2130 if (mdev->state.disk == D_FAILED) 2121 if (mdev->state.disk == D_FAILED)
2122 /* all application IO references gone. */
2131 drbd_go_diskless(mdev); 2123 drbd_go_diskless(mdev);
2132 wake_up(&mdev->misc_wait); 2124 wake_up(&mdev->misc_wait);
2133 } 2125 }
@@ -2138,6 +2130,10 @@ static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_stat
2138{ 2130{
2139 int io_allowed; 2131 int io_allowed;
2140 2132
2133 /* never get a reference while D_DISKLESS */
2134 if (mdev->state.disk == D_DISKLESS)
2135 return 0;
2136
2141 atomic_inc(&mdev->local_cnt); 2137 atomic_inc(&mdev->local_cnt);
2142 io_allowed = (mdev->state.disk >= mins); 2138 io_allowed = (mdev->state.disk >= mins);
2143 if (!io_allowed) 2139 if (!io_allowed)
@@ -2406,12 +2402,12 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
2406{ 2402{
2407 int r; 2403 int r;
2408 2404
2409 if (test_bit(MD_NO_BARRIER, &mdev->flags)) 2405 if (test_bit(MD_NO_FUA, &mdev->flags))
2410 return; 2406 return;
2411 2407
2412 r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL); 2408 r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL);
2413 if (r) { 2409 if (r) {
2414 set_bit(MD_NO_BARRIER, &mdev->flags); 2410 set_bit(MD_NO_FUA, &mdev->flags);
2415 dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r); 2411 dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
2416 } 2412 }
2417} 2413}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 25c7a73c506..6be5401d0e8 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -835,6 +835,15 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
835 ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN) 835 ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
836 ns.conn = os.conn; 836 ns.conn = os.conn;
837 837
838 /* we cannot fail (again) if we already detached */
839 if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
840 ns.disk = D_DISKLESS;
841
842 /* if we are only D_ATTACHING yet,
843 * we can (and should) go directly to D_DISKLESS. */
844 if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
845 ns.disk = D_DISKLESS;
846
838 /* After C_DISCONNECTING only C_STANDALONE may follow */ 847 /* After C_DISCONNECTING only C_STANDALONE may follow */
839 if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE) 848 if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
840 ns.conn = os.conn; 849 ns.conn = os.conn;
@@ -1056,7 +1065,15 @@ int __drbd_set_state(struct drbd_conf *mdev,
1056 !test_and_set_bit(CONFIG_PENDING, &mdev->flags)) 1065 !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
1057 set_bit(DEVICE_DYING, &mdev->flags); 1066 set_bit(DEVICE_DYING, &mdev->flags);
1058 1067
1059 mdev->state.i = ns.i; 1068 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1069 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1070 * drbd_ldev_destroy() won't happen before our corresponding
1071 * after_state_ch works run, where we put_ldev again. */
1072 if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
1073 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
1074 atomic_inc(&mdev->local_cnt);
1075
1076 mdev->state = ns;
1060 wake_up(&mdev->misc_wait); 1077 wake_up(&mdev->misc_wait);
1061 wake_up(&mdev->state_wait); 1078 wake_up(&mdev->state_wait);
1062 1079
@@ -1268,7 +1285,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1268 if (test_bit(NEW_CUR_UUID, &mdev->flags)) { 1285 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1269 drbd_uuid_new_current(mdev); 1286 drbd_uuid_new_current(mdev);
1270 clear_bit(NEW_CUR_UUID, &mdev->flags); 1287 clear_bit(NEW_CUR_UUID, &mdev->flags);
1271 drbd_md_sync(mdev);
1272 } 1288 }
1273 spin_lock_irq(&mdev->req_lock); 1289 spin_lock_irq(&mdev->req_lock);
1274 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL); 1290 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
@@ -1365,63 +1381,64 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1365 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT) 1381 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1366 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate"); 1382 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
1367 1383
1368 /* first half of local IO error */ 1384 /* first half of local IO error, failure to attach,
1369 if (os.disk > D_FAILED && ns.disk == D_FAILED) { 1385 * or administrative detach */
1370 enum drbd_io_error_p eh = EP_PASS_ON; 1386 if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1387 enum drbd_io_error_p eh;
1388 int was_io_error;
1389 /* corresponding get_ldev was in __drbd_set_state, to serialize
1390 * our cleanup here with the transition to D_DISKLESS,
1391 * so it is safe to dreference ldev here. */
1392 eh = mdev->ldev->dc.on_io_error;
1393 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1394
1395 /* current state still has to be D_FAILED,
1396 * there is only one way out: to D_DISKLESS,
1397 * and that may only happen after our put_ldev below. */
1398 if (mdev->state.disk != D_FAILED)
1399 dev_err(DEV,
1400 "ASSERT FAILED: disk is %s during detach\n",
1401 drbd_disk_str(mdev->state.disk));
1371 1402
1372 if (drbd_send_state(mdev)) 1403 if (drbd_send_state(mdev))
1373 dev_warn(DEV, "Notified peer that my disk is broken.\n"); 1404 dev_warn(DEV, "Notified peer that I am detaching my disk\n");
1374 else 1405 else
1375 dev_err(DEV, "Sending state for drbd_io_error() failed\n"); 1406 dev_err(DEV, "Sending state for detaching disk failed\n");
1376 1407
1377 drbd_rs_cancel_all(mdev); 1408 drbd_rs_cancel_all(mdev);
1378 1409
1379 if (get_ldev_if_state(mdev, D_FAILED)) { 1410 /* In case we want to get something to stable storage still,
1380 eh = mdev->ldev->dc.on_io_error; 1411 * this may be the last chance.
1381 put_ldev(mdev); 1412 * Following put_ldev may transition to D_DISKLESS. */
1382 } 1413 drbd_md_sync(mdev);
1383 if (eh == EP_CALL_HELPER) 1414 put_ldev(mdev);
1415
1416 if (was_io_error && eh == EP_CALL_HELPER)
1384 drbd_khelper(mdev, "local-io-error"); 1417 drbd_khelper(mdev, "local-io-error");
1385 } 1418 }
1386 1419
1420 /* second half of local IO error, failure to attach,
1421 * or administrative detach,
1422 * after local_cnt references have reached zero again */
1423 if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1424 /* We must still be diskless,
1425 * re-attach has to be serialized with this! */
1426 if (mdev->state.disk != D_DISKLESS)
1427 dev_err(DEV,
1428 "ASSERT FAILED: disk is %s while going diskless\n",
1429 drbd_disk_str(mdev->state.disk));
1387 1430
1388 /* second half of local IO error handling, 1431 mdev->rs_total = 0;
1389 * after local_cnt references have reached zero: */ 1432 mdev->rs_failed = 0;
1390 if (os.disk == D_FAILED && ns.disk == D_DISKLESS) { 1433 atomic_set(&mdev->rs_pending_cnt, 0);
1391 mdev->rs_total = 0;
1392 mdev->rs_failed = 0;
1393 atomic_set(&mdev->rs_pending_cnt, 0);
1394 }
1395
1396 if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) {
1397 /* We must still be diskless,
1398 * re-attach has to be serialized with this! */
1399 if (mdev->state.disk != D_DISKLESS)
1400 dev_err(DEV,
1401 "ASSERT FAILED: disk is %s while going diskless\n",
1402 drbd_disk_str(mdev->state.disk));
1403 1434
1404 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state
1405 * will inc/dec it frequently. Since we became D_DISKLESS, no
1406 * one has touched the protected members anymore, though, so we
1407 * are safe to free them here. */
1408 if (drbd_send_state(mdev)) 1435 if (drbd_send_state(mdev))
1409 dev_warn(DEV, "Notified peer that I detached my disk.\n"); 1436 dev_warn(DEV, "Notified peer that I'm now diskless.\n");
1410 else 1437 else
1411 dev_err(DEV, "Sending state for detach failed\n"); 1438 dev_err(DEV, "Sending state for being diskless failed\n");
1412 1439 /* corresponding get_ldev in __drbd_set_state
1413 lc_destroy(mdev->resync); 1440 * this may finaly trigger drbd_ldev_destroy. */
1414 mdev->resync = NULL; 1441 put_ldev(mdev);
1415 lc_destroy(mdev->act_log);
1416 mdev->act_log = NULL;
1417 __no_warn(local,
1418 drbd_free_bc(mdev->ldev);
1419 mdev->ldev = NULL;);
1420
1421 if (mdev->md_io_tmpp) {
1422 __free_page(mdev->md_io_tmpp);
1423 mdev->md_io_tmpp = NULL;
1424 }
1425 } 1442 }
1426 1443
1427 /* Disks got bigger while they were detached */ 1444 /* Disks got bigger while they were detached */
@@ -2772,11 +2789,6 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
2772 2789
2773 drbd_set_defaults(mdev); 2790 drbd_set_defaults(mdev);
2774 2791
2775 /* for now, we do NOT yet support it,
2776 * even though we start some framework
2777 * to eventually support barriers */
2778 set_bit(NO_BARRIER_SUPP, &mdev->flags);
2779
2780 atomic_set(&mdev->ap_bio_cnt, 0); 2792 atomic_set(&mdev->ap_bio_cnt, 0);
2781 atomic_set(&mdev->ap_pending_cnt, 0); 2793 atomic_set(&mdev->ap_pending_cnt, 0);
2782 atomic_set(&mdev->rs_pending_cnt, 0); 2794 atomic_set(&mdev->rs_pending_cnt, 0);
@@ -2842,7 +2854,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
2842 drbd_thread_init(mdev, &mdev->asender, drbd_asender); 2854 drbd_thread_init(mdev, &mdev->asender, drbd_asender);
2843 2855
2844 mdev->agreed_pro_version = PRO_VERSION_MAX; 2856 mdev->agreed_pro_version = PRO_VERSION_MAX;
2845 mdev->write_ordering = WO_bio_barrier; 2857 mdev->write_ordering = WO_bdev_flush;
2846 mdev->resync_wenr = LC_FREE; 2858 mdev->resync_wenr = LC_FREE;
2847} 2859}
2848 2860
@@ -2899,7 +2911,6 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
2899 D_ASSERT(list_empty(&mdev->resync_work.list)); 2911 D_ASSERT(list_empty(&mdev->resync_work.list));
2900 D_ASSERT(list_empty(&mdev->unplug_work.list)); 2912 D_ASSERT(list_empty(&mdev->unplug_work.list));
2901 D_ASSERT(list_empty(&mdev->go_diskless.list)); 2913 D_ASSERT(list_empty(&mdev->go_diskless.list));
2902
2903} 2914}
2904 2915
2905 2916
@@ -3660,6 +3671,8 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3660 3671
3661 get_random_bytes(&val, sizeof(u64)); 3672 get_random_bytes(&val, sizeof(u64));
3662 _drbd_uuid_set(mdev, UI_CURRENT, val); 3673 _drbd_uuid_set(mdev, UI_CURRENT, val);
3674 /* get it to stable storage _now_ */
3675 drbd_md_sync(mdev);
3663} 3676}
3664 3677
3665void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local) 3678void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
@@ -3756,19 +3769,31 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3756 return 1; 3769 return 1;
3757} 3770}
3758 3771
3772void drbd_ldev_destroy(struct drbd_conf *mdev)
3773{
3774 lc_destroy(mdev->resync);
3775 mdev->resync = NULL;
3776 lc_destroy(mdev->act_log);
3777 mdev->act_log = NULL;
3778 __no_warn(local,
3779 drbd_free_bc(mdev->ldev);
3780 mdev->ldev = NULL;);
3781
3782 if (mdev->md_io_tmpp) {
3783 __free_page(mdev->md_io_tmpp);
3784 mdev->md_io_tmpp = NULL;
3785 }
3786 clear_bit(GO_DISKLESS, &mdev->flags);
3787}
3788
3759static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused) 3789static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3760{ 3790{
3761 D_ASSERT(mdev->state.disk == D_FAILED); 3791 D_ASSERT(mdev->state.disk == D_FAILED);
3762 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will 3792 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3763 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch 3793 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
3764 * the protected members anymore, though, so in the after_state_ch work 3794 * the protected members anymore, though, so once put_ldev reaches zero
3765 * it will be safe to free them. */ 3795 * again, it will be safe to free them. */
3766 drbd_force_state(mdev, NS(disk, D_DISKLESS)); 3796 drbd_force_state(mdev, NS(disk, D_DISKLESS));
3767 /* We need to wait for return of references checked out while we still
3768 * have been D_FAILED, though (drbd_md_sync, bitmap io). */
3769 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
3770
3771 clear_bit(GO_DISKLESS, &mdev->flags);
3772 return 1; 3797 return 1;
3773} 3798}
3774 3799
@@ -3777,9 +3802,6 @@ void drbd_go_diskless(struct drbd_conf *mdev)
3777 D_ASSERT(mdev->state.disk == D_FAILED); 3802 D_ASSERT(mdev->state.disk == D_FAILED);
3778 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags)) 3803 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
3779 drbd_queue_work(&mdev->data.work, &mdev->go_diskless); 3804 drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
3780 /* don't drbd_queue_work_front,
3781 * we need to serialize with the after_state_ch work
3782 * of the -> D_FAILED transition. */
3783} 3805}
3784 3806
3785/** 3807/**
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 87925e97e61..29e5c70e4e2 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -870,6 +870,11 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
870 retcode = ERR_DISK_CONFIGURED; 870 retcode = ERR_DISK_CONFIGURED;
871 goto fail; 871 goto fail;
872 } 872 }
873 /* It may just now have detached because of IO error. Make sure
874 * drbd_ldev_destroy is done already, we may end up here very fast,
875 * e.g. if someone calls attach from the on-io-error handler,
876 * to realize a "hot spare" feature (not that I'd recommend that) */
877 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
873 878
874 /* allocation not in the IO path, cqueue thread context */ 879 /* allocation not in the IO path, cqueue thread context */
875 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); 880 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
@@ -1098,9 +1103,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1098 /* Reset the "barriers don't work" bits here, then force meta data to 1103 /* Reset the "barriers don't work" bits here, then force meta data to
1099 * be written, to ensure we determine if barriers are supported. */ 1104 * be written, to ensure we determine if barriers are supported. */
1100 if (nbc->dc.no_md_flush) 1105 if (nbc->dc.no_md_flush)
1101 set_bit(MD_NO_BARRIER, &mdev->flags); 1106 set_bit(MD_NO_FUA, &mdev->flags);
1102 else 1107 else
1103 clear_bit(MD_NO_BARRIER, &mdev->flags); 1108 clear_bit(MD_NO_FUA, &mdev->flags);
1104 1109
1105 /* Point of no return reached. 1110 /* Point of no return reached.
1106 * Devices and memory are no longer released by error cleanup below. 1111 * Devices and memory are no longer released by error cleanup below.
@@ -1112,8 +1117,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1112 nbc = NULL; 1117 nbc = NULL;
1113 resync_lru = NULL; 1118 resync_lru = NULL;
1114 1119
1115 mdev->write_ordering = WO_bio_barrier; 1120 mdev->write_ordering = WO_bdev_flush;
1116 drbd_bump_write_ordering(mdev, WO_bio_barrier); 1121 drbd_bump_write_ordering(mdev, WO_bdev_flush);
1117 1122
1118 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY)) 1123 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1119 set_bit(CRASHED_PRIMARY, &mdev->flags); 1124 set_bit(CRASHED_PRIMARY, &mdev->flags);
@@ -1262,7 +1267,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1262 force_diskless_dec: 1267 force_diskless_dec:
1263 put_ldev(mdev); 1268 put_ldev(mdev);
1264 force_diskless: 1269 force_diskless:
1265 drbd_force_state(mdev, NS(disk, D_DISKLESS)); 1270 drbd_force_state(mdev, NS(disk, D_FAILED));
1266 drbd_md_sync(mdev); 1271 drbd_md_sync(mdev);
1267 release_bdev2_fail: 1272 release_bdev2_fail:
1268 if (nbc) 1273 if (nbc)
@@ -1285,10 +1290,19 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1285 return 0; 1290 return 0;
1286} 1291}
1287 1292
1293/* Detaching the disk is a process in multiple stages. First we need to lock
1294 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1295 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1296 * internal references as well.
1297 * Only then we have finally detached. */
1288static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1298static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1289 struct drbd_nl_cfg_reply *reply) 1299 struct drbd_nl_cfg_reply *reply)
1290{ 1300{
1301 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1291 reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS)); 1302 reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS));
1303 if (mdev->state.disk == D_DISKLESS)
1304 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1305 drbd_resume_io(mdev);
1292 return 0; 1306 return 0;
1293} 1307}
1294 1308
@@ -1953,7 +1967,6 @@ static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1953 if (test_bit(NEW_CUR_UUID, &mdev->flags)) { 1967 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1954 drbd_uuid_new_current(mdev); 1968 drbd_uuid_new_current(mdev);
1955 clear_bit(NEW_CUR_UUID, &mdev->flags); 1969 clear_bit(NEW_CUR_UUID, &mdev->flags);
1956 drbd_md_sync(mdev);
1957 } 1970 }
1958 drbd_suspend_io(mdev); 1971 drbd_suspend_io(mdev);
1959 reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0)); 1972 reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index ad325c5d0ce..7e6ac307e2d 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -158,7 +158,6 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
158 [WO_none] = 'n', 158 [WO_none] = 'n',
159 [WO_drain_io] = 'd', 159 [WO_drain_io] = 'd',
160 [WO_bdev_flush] = 'f', 160 [WO_bdev_flush] = 'f',
161 [WO_bio_barrier] = 'b',
162 }; 161 };
163 162
164 seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n", 163 seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n",
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index efd6169acf2..d299fe9e78c 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -49,11 +49,6 @@
49 49
50#include "drbd_vli.h" 50#include "drbd_vli.h"
51 51
52struct flush_work {
53 struct drbd_work w;
54 struct drbd_epoch *epoch;
55};
56
57enum finish_epoch { 52enum finish_epoch {
58 FE_STILL_LIVE, 53 FE_STILL_LIVE,
59 FE_DESTROYED, 54 FE_DESTROYED,
@@ -66,16 +61,6 @@ static int drbd_do_auth(struct drbd_conf *mdev);
66static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event); 61static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
67static int e_end_block(struct drbd_conf *, struct drbd_work *, int); 62static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
68 63
69static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
70{
71 struct drbd_epoch *prev;
72 spin_lock(&mdev->epoch_lock);
73 prev = list_entry(epoch->list.prev, struct drbd_epoch, list);
74 if (prev == epoch || prev == mdev->current_epoch)
75 prev = NULL;
76 spin_unlock(&mdev->epoch_lock);
77 return prev;
78}
79 64
80#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) 65#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
81 66
@@ -981,7 +966,7 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi
981 return TRUE; 966 return TRUE;
982} 967}
983 968
984static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch) 969static void drbd_flush(struct drbd_conf *mdev)
985{ 970{
986 int rv; 971 int rv;
987 972
@@ -997,24 +982,6 @@ static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct d
997 } 982 }
998 put_ldev(mdev); 983 put_ldev(mdev);
999 } 984 }
1000
1001 return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1002}
1003
1004static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1005{
1006 struct flush_work *fw = (struct flush_work *)w;
1007 struct drbd_epoch *epoch = fw->epoch;
1008
1009 kfree(w);
1010
1011 if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags))
1012 drbd_flush_after_epoch(mdev, epoch);
1013
1014 drbd_may_finish_epoch(mdev, epoch, EV_PUT |
1015 (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0));
1016
1017 return 1;
1018} 985}
1019 986
1020/** 987/**
@@ -1027,15 +994,13 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1027 struct drbd_epoch *epoch, 994 struct drbd_epoch *epoch,
1028 enum epoch_event ev) 995 enum epoch_event ev)
1029{ 996{
1030 int finish, epoch_size; 997 int epoch_size;
1031 struct drbd_epoch *next_epoch; 998 struct drbd_epoch *next_epoch;
1032 int schedule_flush = 0;
1033 enum finish_epoch rv = FE_STILL_LIVE; 999 enum finish_epoch rv = FE_STILL_LIVE;
1034 1000
1035 spin_lock(&mdev->epoch_lock); 1001 spin_lock(&mdev->epoch_lock);
1036 do { 1002 do {
1037 next_epoch = NULL; 1003 next_epoch = NULL;
1038 finish = 0;
1039 1004
1040 epoch_size = atomic_read(&epoch->epoch_size); 1005 epoch_size = atomic_read(&epoch->epoch_size);
1041 1006
@@ -1045,16 +1010,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1045 break; 1010 break;
1046 case EV_GOT_BARRIER_NR: 1011 case EV_GOT_BARRIER_NR:
1047 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags); 1012 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1048
1049 /* Special case: If we just switched from WO_bio_barrier to
1050 WO_bdev_flush we should not finish the current epoch */
1051 if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 &&
1052 mdev->write_ordering != WO_bio_barrier &&
1053 epoch == mdev->current_epoch)
1054 clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags);
1055 break;
1056 case EV_BARRIER_DONE:
1057 set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags);
1058 break; 1013 break;
1059 case EV_BECAME_LAST: 1014 case EV_BECAME_LAST:
1060 /* nothing to do*/ 1015 /* nothing to do*/
@@ -1063,23 +1018,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1063 1018
1064 if (epoch_size != 0 && 1019 if (epoch_size != 0 &&
1065 atomic_read(&epoch->active) == 0 && 1020 atomic_read(&epoch->active) == 0 &&
1066 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) && 1021 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
1067 epoch->list.prev == &mdev->current_epoch->list &&
1068 !test_bit(DE_IS_FINISHING, &epoch->flags)) {
1069 /* Nearly all conditions are met to finish that epoch... */
1070 if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ||
1071 mdev->write_ordering == WO_none ||
1072 (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) ||
1073 ev & EV_CLEANUP) {
1074 finish = 1;
1075 set_bit(DE_IS_FINISHING, &epoch->flags);
1076 } else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) &&
1077 mdev->write_ordering == WO_bio_barrier) {
1078 atomic_inc(&epoch->active);
1079 schedule_flush = 1;
1080 }
1081 }
1082 if (finish) {
1083 if (!(ev & EV_CLEANUP)) { 1022 if (!(ev & EV_CLEANUP)) {
1084 spin_unlock(&mdev->epoch_lock); 1023 spin_unlock(&mdev->epoch_lock);
1085 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size); 1024 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
@@ -1102,6 +1041,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1102 /* atomic_set(&epoch->active, 0); is already zero */ 1041 /* atomic_set(&epoch->active, 0); is already zero */
1103 if (rv == FE_STILL_LIVE) 1042 if (rv == FE_STILL_LIVE)
1104 rv = FE_RECYCLED; 1043 rv = FE_RECYCLED;
1044 wake_up(&mdev->ee_wait);
1105 } 1045 }
1106 } 1046 }
1107 1047
@@ -1113,22 +1053,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1113 1053
1114 spin_unlock(&mdev->epoch_lock); 1054 spin_unlock(&mdev->epoch_lock);
1115 1055
1116 if (schedule_flush) {
1117 struct flush_work *fw;
1118 fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
1119 if (fw) {
1120 fw->w.cb = w_flush;
1121 fw->epoch = epoch;
1122 drbd_queue_work(&mdev->data.work, &fw->w);
1123 } else {
1124 dev_warn(DEV, "Could not kmalloc a flush_work obj\n");
1125 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1126 /* That is not a recursion, only one level */
1127 drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1128 drbd_may_finish_epoch(mdev, epoch, EV_PUT);
1129 }
1130 }
1131
1132 return rv; 1056 return rv;
1133} 1057}
1134 1058
@@ -1144,19 +1068,16 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
1144 [WO_none] = "none", 1068 [WO_none] = "none",
1145 [WO_drain_io] = "drain", 1069 [WO_drain_io] = "drain",
1146 [WO_bdev_flush] = "flush", 1070 [WO_bdev_flush] = "flush",
1147 [WO_bio_barrier] = "barrier",
1148 }; 1071 };
1149 1072
1150 pwo = mdev->write_ordering; 1073 pwo = mdev->write_ordering;
1151 wo = min(pwo, wo); 1074 wo = min(pwo, wo);
1152 if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier)
1153 wo = WO_bdev_flush;
1154 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush) 1075 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1155 wo = WO_drain_io; 1076 wo = WO_drain_io;
1156 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain) 1077 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1157 wo = WO_none; 1078 wo = WO_none;
1158 mdev->write_ordering = wo; 1079 mdev->write_ordering = wo;
1159 if (pwo != mdev->write_ordering || wo == WO_bio_barrier) 1080 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
1160 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]); 1081 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1161} 1082}
1162 1083
@@ -1192,7 +1113,7 @@ next_bio:
1192 bio->bi_sector = sector; 1113 bio->bi_sector = sector;
1193 bio->bi_bdev = mdev->ldev->backing_bdev; 1114 bio->bi_bdev = mdev->ldev->backing_bdev;
1194 /* we special case some flags in the multi-bio case, see below 1115 /* we special case some flags in the multi-bio case, see below
1195 * (REQ_UNPLUG, REQ_HARDBARRIER) */ 1116 * (REQ_UNPLUG) */
1196 bio->bi_rw = rw; 1117 bio->bi_rw = rw;
1197 bio->bi_private = e; 1118 bio->bi_private = e;
1198 bio->bi_end_io = drbd_endio_sec; 1119 bio->bi_end_io = drbd_endio_sec;
@@ -1226,11 +1147,6 @@ next_bio:
1226 bio->bi_rw &= ~REQ_UNPLUG; 1147 bio->bi_rw &= ~REQ_UNPLUG;
1227 1148
1228 drbd_generic_make_request(mdev, fault_type, bio); 1149 drbd_generic_make_request(mdev, fault_type, bio);
1229
1230 /* strip off REQ_HARDBARRIER,
1231 * unless it is the first or last bio */
1232 if (bios && bios->bi_next)
1233 bios->bi_rw &= ~REQ_HARDBARRIER;
1234 } while (bios); 1150 } while (bios);
1235 maybe_kick_lo(mdev); 1151 maybe_kick_lo(mdev);
1236 return 0; 1152 return 0;
@@ -1244,45 +1160,9 @@ fail:
1244 return -ENOMEM; 1160 return -ENOMEM;
1245} 1161}
1246 1162
1247/**
1248 * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
1249 * @mdev: DRBD device.
1250 * @w: work object.
1251 * @cancel: The connection will be closed anyways (unused in this callback)
1252 */
1253int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
1254{
1255 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1256 /* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
1257 (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
1258 so that we can finish that epoch in drbd_may_finish_epoch().
1259 That is necessary if we already have a long chain of Epochs, before
1260 we realize that REQ_HARDBARRIER is actually not supported */
1261
1262 /* As long as the -ENOTSUPP on the barrier is reported immediately
1263 that will never trigger. If it is reported late, we will just
1264 print that warning and continue correctly for all future requests
1265 with WO_bdev_flush */
1266 if (previous_epoch(mdev, e->epoch))
1267 dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
1268
1269 /* we still have a local reference,
1270 * get_ldev was done in receive_Data. */
1271
1272 e->w.cb = e_end_block;
1273 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) {
1274 /* drbd_submit_ee fails for one reason only:
1275 * if was not able to allocate sufficient bios.
1276 * requeue, try again later. */
1277 e->w.cb = w_e_reissue;
1278 drbd_queue_work(&mdev->data.work, &e->w);
1279 }
1280 return 1;
1281}
1282
1283static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 1163static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1284{ 1164{
1285 int rv, issue_flush; 1165 int rv;
1286 struct p_barrier *p = &mdev->data.rbuf.barrier; 1166 struct p_barrier *p = &mdev->data.rbuf.barrier;
1287 struct drbd_epoch *epoch; 1167 struct drbd_epoch *epoch;
1288 1168
@@ -1300,44 +1180,40 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
1300 * Therefore we must send the barrier_ack after the barrier request was 1180 * Therefore we must send the barrier_ack after the barrier request was
1301 * completed. */ 1181 * completed. */
1302 switch (mdev->write_ordering) { 1182 switch (mdev->write_ordering) {
1303 case WO_bio_barrier:
1304 case WO_none: 1183 case WO_none:
1305 if (rv == FE_RECYCLED) 1184 if (rv == FE_RECYCLED)
1306 return TRUE; 1185 return TRUE;
1307 break; 1186
1187 /* receiver context, in the writeout path of the other node.
1188 * avoid potential distributed deadlock */
1189 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1190 if (epoch)
1191 break;
1192 else
1193 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1194 /* Fall through */
1308 1195
1309 case WO_bdev_flush: 1196 case WO_bdev_flush:
1310 case WO_drain_io: 1197 case WO_drain_io:
1311 if (rv == FE_STILL_LIVE) {
1312 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1313 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1314 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1315 }
1316 if (rv == FE_RECYCLED)
1317 return TRUE;
1318
1319 /* The asender will send all the ACKs and barrier ACKs out, since
1320 all EEs moved from the active_ee to the done_ee. We need to
1321 provide a new epoch object for the EEs that come in soon */
1322 break;
1323 }
1324
1325 /* receiver context, in the writeout path of the other node.
1326 * avoid potential distributed deadlock */
1327 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1328 if (!epoch) {
1329 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1330 issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1331 drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 1198 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1332 if (issue_flush) { 1199 drbd_flush(mdev);
1333 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); 1200
1334 if (rv == FE_RECYCLED) 1201 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1335 return TRUE; 1202 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1203 if (epoch)
1204 break;
1336 } 1205 }
1337 1206
1338 drbd_wait_ee_list_empty(mdev, &mdev->done_ee); 1207 epoch = mdev->current_epoch;
1208 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1209
1210 D_ASSERT(atomic_read(&epoch->active) == 0);
1211 D_ASSERT(epoch->flags == 0);
1339 1212
1340 return TRUE; 1213 return TRUE;
1214 default:
1215 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
1216 return FALSE;
1341 } 1217 }
1342 1218
1343 epoch->flags = 0; 1219 epoch->flags = 0;
@@ -1652,15 +1528,8 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1652{ 1528{
1653 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; 1529 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1654 sector_t sector = e->sector; 1530 sector_t sector = e->sector;
1655 struct drbd_epoch *epoch;
1656 int ok = 1, pcmd; 1531 int ok = 1, pcmd;
1657 1532
1658 if (e->flags & EE_IS_BARRIER) {
1659 epoch = previous_epoch(mdev, e->epoch);
1660 if (epoch)
1661 drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0));
1662 }
1663
1664 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) { 1533 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
1665 if (likely((e->flags & EE_WAS_ERROR) == 0)) { 1534 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1666 pcmd = (mdev->state.conn >= C_SYNC_SOURCE && 1535 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
@@ -1817,27 +1686,6 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1817 e->epoch = mdev->current_epoch; 1686 e->epoch = mdev->current_epoch;
1818 atomic_inc(&e->epoch->epoch_size); 1687 atomic_inc(&e->epoch->epoch_size);
1819 atomic_inc(&e->epoch->active); 1688 atomic_inc(&e->epoch->active);
1820
1821 if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) {
1822 struct drbd_epoch *epoch;
1823 /* Issue a barrier if we start a new epoch, and the previous epoch
1824 was not a epoch containing a single request which already was
1825 a Barrier. */
1826 epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
1827 if (epoch == e->epoch) {
1828 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1829 rw |= REQ_HARDBARRIER;
1830 e->flags |= EE_IS_BARRIER;
1831 } else {
1832 if (atomic_read(&epoch->epoch_size) > 1 ||
1833 !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
1834 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1835 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1836 rw |= REQ_HARDBARRIER;
1837 e->flags |= EE_IS_BARRIER;
1838 }
1839 }
1840 }
1841 spin_unlock(&mdev->epoch_lock); 1689 spin_unlock(&mdev->epoch_lock);
1842 1690
1843 dp_flags = be32_to_cpu(p->dp_flags); 1691 dp_flags = be32_to_cpu(p->dp_flags);
@@ -1995,10 +1843,11 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1995 break; 1843 break;
1996 } 1844 }
1997 1845
1998 if (mdev->state.pdsk == D_DISKLESS) { 1846 if (mdev->state.pdsk < D_INCONSISTENT) {
1999 /* In case we have the only disk of the cluster, */ 1847 /* In case we have the only disk of the cluster, */
2000 drbd_set_out_of_sync(mdev, e->sector, e->size); 1848 drbd_set_out_of_sync(mdev, e->sector, e->size);
2001 e->flags |= EE_CALL_AL_COMPLETE_IO; 1849 e->flags |= EE_CALL_AL_COMPLETE_IO;
1850 e->flags &= ~EE_MAY_SET_IN_SYNC;
2002 drbd_al_begin_io(mdev, e->sector); 1851 drbd_al_begin_io(mdev, e->sector);
2003 } 1852 }
2004 1853
@@ -3362,7 +3211,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3362 if (ns.conn == C_MASK) { 3211 if (ns.conn == C_MASK) {
3363 ns.conn = C_CONNECTED; 3212 ns.conn = C_CONNECTED;
3364 if (mdev->state.disk == D_NEGOTIATING) { 3213 if (mdev->state.disk == D_NEGOTIATING) {
3365 drbd_force_state(mdev, NS(disk, D_DISKLESS)); 3214 drbd_force_state(mdev, NS(disk, D_FAILED));
3366 } else if (peer_state.disk == D_NEGOTIATING) { 3215 } else if (peer_state.disk == D_NEGOTIATING) {
3367 dev_err(DEV, "Disk attach process on the peer node was aborted.\n"); 3216 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3368 peer_state.disk = D_DISKLESS; 3217 peer_state.disk = D_DISKLESS;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 9e91a2545fc..11a75d32a2e 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -258,7 +258,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
258 if (!hlist_unhashed(&req->colision)) 258 if (!hlist_unhashed(&req->colision))
259 hlist_del(&req->colision); 259 hlist_del(&req->colision);
260 else 260 else
261 D_ASSERT((s & RQ_NET_MASK) == 0); 261 D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
262 262
263 /* for writes we need to do some extra housekeeping */ 263 /* for writes we need to do some extra housekeeping */
264 if (rw == WRITE) 264 if (rw == WRITE)
@@ -813,7 +813,8 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
813 mdev->state.conn >= C_CONNECTED)); 813 mdev->state.conn >= C_CONNECTED));
814 814
815 if (!(local || remote) && !is_susp(mdev->state)) { 815 if (!(local || remote) && !is_susp(mdev->state)) {
816 dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); 816 if (__ratelimit(&drbd_ratelimit_state))
817 dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
817 goto fail_free_complete; 818 goto fail_free_complete;
818 } 819 }
819 820
@@ -942,12 +943,21 @@ allocate_barrier:
942 if (local) { 943 if (local) {
943 req->private_bio->bi_bdev = mdev->ldev->backing_bdev; 944 req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
944 945
945 if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR 946 /* State may have changed since we grabbed our reference on the
946 : rw == READ ? DRBD_FAULT_DT_RD 947 * mdev->ldev member. Double check, and short-circuit to endio.
947 : DRBD_FAULT_DT_RA)) 948 * In case the last activity log transaction failed to get on
949 * stable storage, and this is a WRITE, we may not even submit
950 * this bio. */
951 if (get_ldev(mdev)) {
952 if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
953 : rw == READ ? DRBD_FAULT_DT_RD
954 : DRBD_FAULT_DT_RA))
955 bio_endio(req->private_bio, -EIO);
956 else
957 generic_make_request(req->private_bio);
958 put_ldev(mdev);
959 } else
948 bio_endio(req->private_bio, -EIO); 960 bio_endio(req->private_bio, -EIO);
949 else
950 generic_make_request(req->private_bio);
951 } 961 }
952 962
953 /* we need to plug ALWAYS since we possibly need to kick lo_dev. 963 /* we need to plug ALWAYS since we possibly need to kick lo_dev.
@@ -1022,20 +1032,6 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
1022 return 0; 1032 return 0;
1023 } 1033 }
1024 1034
1025 /* Reject barrier requests if we know the underlying device does
1026 * not support them.
1027 * XXX: Need to get this info from peer as well some how so we
1028 * XXX: reject if EITHER side/data/metadata area does not support them.
1029 *
1030 * because of those XXX, this is not yet enabled,
1031 * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit.
1032 */
1033 if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) {
1034 /* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */
1035 bio_endio(bio, -EOPNOTSUPP);
1036 return 0;
1037 }
1038
1039 /* 1035 /*
1040 * what we "blindly" assume: 1036 * what we "blindly" assume:
1041 */ 1037 */
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 108d58015cd..b0551ba7ad0 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -102,12 +102,6 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
102 put_ldev(mdev); 102 put_ldev(mdev);
103} 103}
104 104
105static int is_failed_barrier(int ee_flags)
106{
107 return (ee_flags & (EE_IS_BARRIER|EE_WAS_ERROR|EE_RESUBMITTED))
108 == (EE_IS_BARRIER|EE_WAS_ERROR);
109}
110
111/* writes on behalf of the partner, or resync writes, 105/* writes on behalf of the partner, or resync writes,
112 * "submitted" by the receiver, final stage. */ 106 * "submitted" by the receiver, final stage. */
113static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local) 107static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
@@ -119,21 +113,6 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
119 int is_syncer_req; 113 int is_syncer_req;
120 int do_al_complete_io; 114 int do_al_complete_io;
121 115
122 /* if this is a failed barrier request, disable use of barriers,
123 * and schedule for resubmission */
124 if (is_failed_barrier(e->flags)) {
125 drbd_bump_write_ordering(mdev, WO_bdev_flush);
126 spin_lock_irqsave(&mdev->req_lock, flags);
127 list_del(&e->w.list);
128 e->flags = (e->flags & ~EE_WAS_ERROR) | EE_RESUBMITTED;
129 e->w.cb = w_e_reissue;
130 /* put_ldev actually happens below, once we come here again. */
131 __release(local);
132 spin_unlock_irqrestore(&mdev->req_lock, flags);
133 drbd_queue_work(&mdev->data.work, &e->w);
134 return;
135 }
136
137 D_ASSERT(e->block_id != ID_VACANT); 116 D_ASSERT(e->block_id != ID_VACANT);
138 117
139 /* after we moved e to done_ee, 118 /* after we moved e to done_ee,
@@ -925,7 +904,7 @@ out:
925 drbd_md_sync(mdev); 904 drbd_md_sync(mdev);
926 905
927 if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) { 906 if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) {
928 dev_warn(DEV, "Writing the whole bitmap, due to failed kmalloc\n"); 907 dev_info(DEV, "Writing the whole bitmap\n");
929 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished"); 908 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
930 } 909 }
931 910
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 767107cce98..3951020e494 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4363,9 +4363,9 @@ out_unreg_blkdev:
4363out_put_disk: 4363out_put_disk:
4364 while (dr--) { 4364 while (dr--) {
4365 del_timer(&motor_off_timer[dr]); 4365 del_timer(&motor_off_timer[dr]);
4366 put_disk(disks[dr]);
4367 if (disks[dr]->queue) 4366 if (disks[dr]->queue)
4368 blk_cleanup_queue(disks[dr]->queue); 4367 blk_cleanup_queue(disks[dr]->queue);
4368 put_disk(disks[dr]);
4369 } 4369 }
4370 return err; 4370 return err;
4371} 4371}
@@ -4573,8 +4573,8 @@ static void __exit floppy_module_exit(void)
4573 device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos); 4573 device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos);
4574 platform_device_unregister(&floppy_device[drive]); 4574 platform_device_unregister(&floppy_device[drive]);
4575 } 4575 }
4576 put_disk(disks[drive]);
4577 blk_cleanup_queue(disks[drive]->queue); 4576 blk_cleanup_queue(disks[drive]->queue);
4577 put_disk(disks[drive]);
4578 } 4578 }
4579 4579
4580 del_timer_sync(&fd_timeout); 4580 del_timer_sync(&fd_timeout);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 1e5284ef65f..7ea0bea2f7e 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -481,12 +481,6 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
481 if (bio_rw(bio) == WRITE) { 481 if (bio_rw(bio) == WRITE) {
482 struct file *file = lo->lo_backing_file; 482 struct file *file = lo->lo_backing_file;
483 483
484 /* REQ_HARDBARRIER is deprecated */
485 if (bio->bi_rw & REQ_HARDBARRIER) {
486 ret = -EOPNOTSUPP;
487 goto out;
488 }
489
490 if (bio->bi_rw & REQ_FLUSH) { 484 if (bio->bi_rw & REQ_FLUSH) {
491 ret = vfs_fsync(file, 0); 485 ret = vfs_fsync(file, 0);
492 if (unlikely(ret && ret != -EINVAL)) { 486 if (unlikely(ret && ret != -EINVAL)) {
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 06e2812ba12..255035cfc88 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -289,8 +289,6 @@ static int blkif_queue_request(struct request *req)
289 289
290 ring_req->operation = rq_data_dir(req) ? 290 ring_req->operation = rq_data_dir(req) ?
291 BLKIF_OP_WRITE : BLKIF_OP_READ; 291 BLKIF_OP_WRITE : BLKIF_OP_READ;
292 if (req->cmd_flags & REQ_HARDBARRIER)
293 ring_req->operation = BLKIF_OP_WRITE_BARRIER;
294 292
295 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); 293 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
296 BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); 294 BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index d120a5c1c09..ab3894f742c 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -68,6 +68,9 @@ static struct usb_device_id btusb_table[] = {
68 /* Apple MacBookPro6,2 */ 68 /* Apple MacBookPro6,2 */
69 { USB_DEVICE(0x05ac, 0x8218) }, 69 { USB_DEVICE(0x05ac, 0x8218) },
70 70
71 /* Apple MacBookAir3,1, MacBookAir3,2 */
72 { USB_DEVICE(0x05ac, 0x821b) },
73
71 /* AVM BlueFRITZ! USB v2.0 */ 74 /* AVM BlueFRITZ! USB v2.0 */
72 { USB_DEVICE(0x057c, 0x3800) }, 75 { USB_DEVICE(0x057c, 0x3800) },
73 76
@@ -1029,6 +1032,8 @@ static int btusb_probe(struct usb_interface *intf,
1029 1032
1030 usb_set_intfdata(intf, data); 1033 usb_set_intfdata(intf, data);
1031 1034
1035 usb_enable_autosuspend(interface_to_usbdev(intf));
1036
1032 return 0; 1037 return 0;
1033} 1038}
1034 1039
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 3a9c0141683..ba53ec956c9 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -2,24 +2,10 @@
2# Makefile for the kernel character device drivers. 2# Makefile for the kernel character device drivers.
3# 3#
4 4
5# 5obj-y += mem.o random.o
6# This file contains the font map for the default (hardware) font
7#
8FONTMAPFILE = cp437.uni
9
10obj-y += mem.o random.o tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o tty_buffer.o tty_port.o
11
12obj-y += tty_mutex.o
13obj-$(CONFIG_LEGACY_PTYS) += pty.o
14obj-$(CONFIG_UNIX98_PTYS) += pty.o
15obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o 6obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o
16obj-y += misc.o 7obj-y += misc.o
17obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o selection.o keyboard.o
18obj-$(CONFIG_BFIN_JTAG_COMM) += bfin_jtag_comm.o 8obj-$(CONFIG_BFIN_JTAG_COMM) += bfin_jtag_comm.o
19obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o
20obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o
21obj-$(CONFIG_AUDIT) += tty_audit.o
22obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
23obj-$(CONFIG_MVME147_SCC) += generic_serial.o vme_scc.o 9obj-$(CONFIG_MVME147_SCC) += generic_serial.o vme_scc.o
24obj-$(CONFIG_MVME162_SCC) += generic_serial.o vme_scc.o 10obj-$(CONFIG_MVME162_SCC) += generic_serial.o vme_scc.o
25obj-$(CONFIG_BVME6000_SCC) += generic_serial.o vme_scc.o 11obj-$(CONFIG_BVME6000_SCC) += generic_serial.o vme_scc.o
@@ -41,8 +27,6 @@ obj-$(CONFIG_ISI) += isicom.o
41obj-$(CONFIG_SYNCLINK) += synclink.o 27obj-$(CONFIG_SYNCLINK) += synclink.o
42obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o 28obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o
43obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o 29obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
44obj-$(CONFIG_N_HDLC) += n_hdlc.o
45obj-$(CONFIG_N_GSM) += n_gsm.o
46obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o 30obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
47obj-$(CONFIG_SX) += sx.o generic_serial.o 31obj-$(CONFIG_SX) += sx.o generic_serial.o
48obj-$(CONFIG_RIO) += rio/ generic_serial.o 32obj-$(CONFIG_RIO) += rio/ generic_serial.o
@@ -74,7 +58,6 @@ obj-$(CONFIG_PRINTER) += lp.o
74obj-$(CONFIG_APM_EMULATION) += apm-emulation.o 58obj-$(CONFIG_APM_EMULATION) += apm-emulation.o
75 59
76obj-$(CONFIG_DTLK) += dtlk.o 60obj-$(CONFIG_DTLK) += dtlk.o
77obj-$(CONFIG_R3964) += n_r3964.o
78obj-$(CONFIG_APPLICOM) += applicom.o 61obj-$(CONFIG_APPLICOM) += applicom.o
79obj-$(CONFIG_SONYPI) += sonypi.o 62obj-$(CONFIG_SONYPI) += sonypi.o
80obj-$(CONFIG_RTC) += rtc.o 63obj-$(CONFIG_RTC) += rtc.o
@@ -115,28 +98,3 @@ obj-$(CONFIG_RAMOOPS) += ramoops.o
115 98
116obj-$(CONFIG_JS_RTC) += js-rtc.o 99obj-$(CONFIG_JS_RTC) += js-rtc.o
117js-rtc-y = rtc.o 100js-rtc-y = rtc.o
118
119# Files generated that shall be removed upon make clean
120clean-files := consolemap_deftbl.c defkeymap.c
121
122quiet_cmd_conmk = CONMK $@
123 cmd_conmk = scripts/conmakehash $< > $@
124
125$(obj)/consolemap_deftbl.c: $(src)/$(FONTMAPFILE)
126 $(call cmd,conmk)
127
128$(obj)/defkeymap.o: $(obj)/defkeymap.c
129
130# Uncomment if you're changing the keymap and have an appropriate
131# loadkeys version for the map. By default, we'll use the shipped
132# versions.
133# GENERATE_KEYMAP := 1
134
135ifdef GENERATE_KEYMAP
136
137$(obj)/defkeymap.c: $(obj)/%.c: $(src)/%.map
138 loadkeys --mktable $< > $@.tmp
139 sed -e 's/^static *//' $@.tmp > $@
140 rm $@.tmp
141
142endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 6b6760ea243..9272c38dd3c 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1210,14 +1210,14 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
1210 unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; 1210 unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1211 u32 pte_flags; 1211 u32 pte_flags;
1212 1212
1213 if (type_mask == AGP_USER_UNCACHED_MEMORY) 1213 if (type_mask == AGP_USER_MEMORY)
1214 pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; 1214 pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
1215 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { 1215 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
1216 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; 1216 pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
1217 if (gfdt) 1217 if (gfdt)
1218 pte_flags |= GEN6_PTE_GFDT; 1218 pte_flags |= GEN6_PTE_GFDT;
1219 } else { /* set 'normal'/'cached' to LLC by default */ 1219 } else { /* set 'normal'/'cached' to LLC by default */
1220 pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; 1220 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
1221 if (gfdt) 1221 if (gfdt)
1222 pte_flags |= GEN6_PTE_GFDT; 1222 pte_flags |= GEN6_PTE_GFDT;
1223 } 1223 }
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index b0a70461a12..c0bd6f472c5 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -1299,7 +1299,6 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
1299{ 1299{
1300 struct async_struct * info = tty->driver_data; 1300 struct async_struct * info = tty->driver_data;
1301 struct async_icount cprev, cnow; /* kernel counter temps */ 1301 struct async_icount cprev, cnow; /* kernel counter temps */
1302 struct serial_icounter_struct icount;
1303 void __user *argp = (void __user *)arg; 1302 void __user *argp = (void __user *)arg;
1304 unsigned long flags; 1303 unsigned long flags;
1305 1304
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index 3bc0eef8871..d72433f2d31 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -120,7 +120,7 @@ static int i8k_smm(struct smm_regs *regs)
120 int eax = regs->eax; 120 int eax = regs->eax;
121 121
122#if defined(CONFIG_X86_64) 122#if defined(CONFIG_X86_64)
123 asm("pushq %%rax\n\t" 123 asm volatile("pushq %%rax\n\t"
124 "movl 0(%%rax),%%edx\n\t" 124 "movl 0(%%rax),%%edx\n\t"
125 "pushq %%rdx\n\t" 125 "pushq %%rdx\n\t"
126 "movl 4(%%rax),%%ebx\n\t" 126 "movl 4(%%rax),%%ebx\n\t"
@@ -146,7 +146,7 @@ static int i8k_smm(struct smm_regs *regs)
146 : "a"(regs) 146 : "a"(regs)
147 : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); 147 : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
148#else 148#else
149 asm("pushl %%eax\n\t" 149 asm volatile("pushl %%eax\n\t"
150 "movl 0(%%eax),%%edx\n\t" 150 "movl 0(%%eax),%%edx\n\t"
151 "push %%edx\n\t" 151 "push %%edx\n\t"
152 "movl 4(%%eax),%%ebx\n\t" 152 "movl 4(%%eax),%%ebx\n\t"
@@ -167,7 +167,8 @@ static int i8k_smm(struct smm_regs *regs)
167 "movl %%edx,0(%%eax)\n\t" 167 "movl %%edx,0(%%eax)\n\t"
168 "lahf\n\t" 168 "lahf\n\t"
169 "shrl $8,%%eax\n\t" 169 "shrl $8,%%eax\n\t"
170 "andl $1,%%eax\n":"=a"(rc) 170 "andl $1,%%eax\n"
171 :"=a"(rc)
171 : "a"(regs) 172 : "a"(regs)
172 : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); 173 : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
173#endif 174#endif
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index dd3f9b1f11b..294d03e8c61 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -1828,7 +1828,6 @@ static int ntty_ioctl(struct tty_struct *tty, struct file *file,
1828 unsigned int cmd, unsigned long arg) 1828 unsigned int cmd, unsigned long arg)
1829{ 1829{
1830 struct port *port = tty->driver_data; 1830 struct port *port = tty->driver_data;
1831 void __user *argp = (void __user *)arg;
1832 int rval = -ENOIOCTLCMD; 1831 int rval = -ENOIOCTLCMD;
1833 1832
1834 DBG1("******** IOCTL, cmd: %d", cmd); 1833 DBG1("******** IOCTL, cmd: %d", cmd);
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index d962f25dcc2..777181a2e60 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -979,8 +979,9 @@ static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count,
979 if (dev->flags0 & 1) { 979 if (dev->flags0 & 1) {
980 set_bit(IS_CMM_ABSENT, &dev->flags); 980 set_bit(IS_CMM_ABSENT, &dev->flags);
981 rc = -ENODEV; 981 rc = -ENODEV;
982 } else {
983 rc = -EIO;
982 } 984 }
983 rc = -EIO;
984 goto release_io; 985 goto release_io;
985 } 986 }
986 987
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index bfc10f89d95..eaa41992fbe 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -2796,6 +2796,7 @@ static const struct tty_operations mgslpc_ops = {
2796 .hangup = mgslpc_hangup, 2796 .hangup = mgslpc_hangup,
2797 .tiocmget = tiocmget, 2797 .tiocmget = tiocmget,
2798 .tiocmset = tiocmset, 2798 .tiocmset = tiocmset,
2799 .get_icount = mgslpc_get_icount,
2799 .proc_fops = &mgslpc_proc_fops, 2800 .proc_fops = &mgslpc_proc_fops,
2800}; 2801};
2801 2802
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index a4461165228..d68d3aa1814 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -616,13 +616,9 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
616 /* get hold of clock */ 616 /* get hold of clock */
617 p->clk = clk_get(&p->pdev->dev, "cmt_fck"); 617 p->clk = clk_get(&p->pdev->dev, "cmt_fck");
618 if (IS_ERR(p->clk)) { 618 if (IS_ERR(p->clk)) {
619 dev_warn(&p->pdev->dev, "using deprecated clock lookup\n"); 619 dev_err(&p->pdev->dev, "cannot get clock\n");
620 p->clk = clk_get(&p->pdev->dev, cfg->clk); 620 ret = PTR_ERR(p->clk);
621 if (IS_ERR(p->clk)) { 621 goto err1;
622 dev_err(&p->pdev->dev, "cannot get clock\n");
623 ret = PTR_ERR(p->clk);
624 goto err1;
625 }
626 } 622 }
627 623
628 if (resource_size(res) == 6) { 624 if (resource_size(res) == 6) {
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index ef7a5be8a09..40630cb9823 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -287,13 +287,9 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
287 /* get hold of clock */ 287 /* get hold of clock */
288 p->clk = clk_get(&p->pdev->dev, "mtu2_fck"); 288 p->clk = clk_get(&p->pdev->dev, "mtu2_fck");
289 if (IS_ERR(p->clk)) { 289 if (IS_ERR(p->clk)) {
290 dev_warn(&p->pdev->dev, "using deprecated clock lookup\n"); 290 dev_err(&p->pdev->dev, "cannot get clock\n");
291 p->clk = clk_get(&p->pdev->dev, cfg->clk); 291 ret = PTR_ERR(p->clk);
292 if (IS_ERR(p->clk)) { 292 goto err1;
293 dev_err(&p->pdev->dev, "cannot get clock\n");
294 ret = PTR_ERR(p->clk);
295 goto err1;
296 }
297 } 293 }
298 294
299 return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev), 295 return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index de715901b82..36aba992306 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -393,13 +393,9 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
393 /* get hold of clock */ 393 /* get hold of clock */
394 p->clk = clk_get(&p->pdev->dev, "tmu_fck"); 394 p->clk = clk_get(&p->pdev->dev, "tmu_fck");
395 if (IS_ERR(p->clk)) { 395 if (IS_ERR(p->clk)) {
396 dev_warn(&p->pdev->dev, "using deprecated clock lookup\n"); 396 dev_err(&p->pdev->dev, "cannot get clock\n");
397 p->clk = clk_get(&p->pdev->dev, cfg->clk); 397 ret = PTR_ERR(p->clk);
398 if (IS_ERR(p->clk)) { 398 goto err1;
399 dev_err(&p->pdev->dev, "cannot get clock\n");
400 ret = PTR_ERR(p->clk);
401 goto err1;
402 }
403 } 399 }
404 400
405 return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), 401 return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 88ee01510ec..76141262ea1 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1832,7 +1832,7 @@ static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
1832 return -ENODEV; 1832 return -ENODEV;
1833 1833
1834 ino = mdesc_get_property(mdesc, node, "ino", &ino_len); 1834 ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
1835 if (!intr) 1835 if (!ino)
1836 return -ENODEV; 1836 return -ENODEV;
1837 1837
1838 if (intr_len != ino_len) 1838 if (intr_len != ino_len)
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 2e992bc8015..8a515baa38f 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -286,7 +286,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
286 if (initial) 286 if (initial)
287 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ 287 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
288 : "+S" (input), "+D" (output), "+a" (iv) 288 : "+S" (input), "+D" (output), "+a" (iv)
289 : "d" (control_word), "b" (key), "c" (count)); 289 : "d" (control_word), "b" (key), "c" (initial));
290 290
291 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ 291 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
292 : "+S" (input), "+D" (output), "+a" (iv) 292 : "+S" (input), "+D" (output), "+a" (iv)
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 9dcb17d51ae..84eb607d6c0 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -577,17 +577,11 @@ static int ohci_update_phy_reg(struct fw_card *card, int addr,
577 return ret; 577 return ret;
578} 578}
579 579
580static int ar_context_add_page(struct ar_context *ctx) 580static void ar_context_link_page(struct ar_context *ctx,
581 struct ar_buffer *ab, dma_addr_t ab_bus)
581{ 582{
582 struct device *dev = ctx->ohci->card.device;
583 struct ar_buffer *ab;
584 dma_addr_t uninitialized_var(ab_bus);
585 size_t offset; 583 size_t offset;
586 584
587 ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
588 if (ab == NULL)
589 return -ENOMEM;
590
591 ab->next = NULL; 585 ab->next = NULL;
592 memset(&ab->descriptor, 0, sizeof(ab->descriptor)); 586 memset(&ab->descriptor, 0, sizeof(ab->descriptor));
593 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | 587 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
@@ -606,6 +600,19 @@ static int ar_context_add_page(struct ar_context *ctx)
606 600
607 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 601 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
608 flush_writes(ctx->ohci); 602 flush_writes(ctx->ohci);
603}
604
605static int ar_context_add_page(struct ar_context *ctx)
606{
607 struct device *dev = ctx->ohci->card.device;
608 struct ar_buffer *ab;
609 dma_addr_t uninitialized_var(ab_bus);
610
611 ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
612 if (ab == NULL)
613 return -ENOMEM;
614
615 ar_context_link_page(ctx, ab, ab_bus);
609 616
610 return 0; 617 return 0;
611} 618}
@@ -730,16 +737,17 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
730static void ar_context_tasklet(unsigned long data) 737static void ar_context_tasklet(unsigned long data)
731{ 738{
732 struct ar_context *ctx = (struct ar_context *)data; 739 struct ar_context *ctx = (struct ar_context *)data;
733 struct fw_ohci *ohci = ctx->ohci;
734 struct ar_buffer *ab; 740 struct ar_buffer *ab;
735 struct descriptor *d; 741 struct descriptor *d;
736 void *buffer, *end; 742 void *buffer, *end;
743 __le16 res_count;
737 744
738 ab = ctx->current_buffer; 745 ab = ctx->current_buffer;
739 d = &ab->descriptor; 746 d = &ab->descriptor;
740 747
741 if (d->res_count == 0) { 748 res_count = ACCESS_ONCE(d->res_count);
742 size_t size, rest, offset; 749 if (res_count == 0) {
750 size_t size, size2, rest, pktsize, size3, offset;
743 dma_addr_t start_bus; 751 dma_addr_t start_bus;
744 void *start; 752 void *start;
745 753
@@ -750,29 +758,63 @@ static void ar_context_tasklet(unsigned long data)
750 */ 758 */
751 759
752 offset = offsetof(struct ar_buffer, data); 760 offset = offsetof(struct ar_buffer, data);
753 start = buffer = ab; 761 start = ab;
754 start_bus = le32_to_cpu(ab->descriptor.data_address) - offset; 762 start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
763 buffer = ab->data;
755 764
756 ab = ab->next; 765 ab = ab->next;
757 d = &ab->descriptor; 766 d = &ab->descriptor;
758 size = buffer + PAGE_SIZE - ctx->pointer; 767 size = start + PAGE_SIZE - ctx->pointer;
768 /* valid buffer data in the next page */
759 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count); 769 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
770 /* what actually fits in this page */
771 size2 = min(rest, (size_t)PAGE_SIZE - offset - size);
760 memmove(buffer, ctx->pointer, size); 772 memmove(buffer, ctx->pointer, size);
761 memcpy(buffer + size, ab->data, rest); 773 memcpy(buffer + size, ab->data, size2);
762 ctx->current_buffer = ab; 774
763 ctx->pointer = (void *) ab->data + rest; 775 while (size > 0) {
764 end = buffer + size + rest; 776 void *next = handle_ar_packet(ctx, buffer);
777 pktsize = next - buffer;
778 if (pktsize >= size) {
779 /*
780 * We have handled all the data that was
781 * originally in this page, so we can now
782 * continue in the next page.
783 */
784 buffer = next;
785 break;
786 }
787 /* move the next packet to the start of the buffer */
788 memmove(buffer, next, size + size2 - pktsize);
789 size -= pktsize;
790 /* fill up this page again */
791 size3 = min(rest - size2,
792 (size_t)PAGE_SIZE - offset - size - size2);
793 memcpy(buffer + size + size2,
794 (void *) ab->data + size2, size3);
795 size2 += size3;
796 }
765 797
766 while (buffer < end) 798 if (rest > 0) {
767 buffer = handle_ar_packet(ctx, buffer); 799 /* handle the packets that are fully in the next page */
800 buffer = (void *) ab->data +
801 (buffer - (start + offset + size));
802 end = (void *) ab->data + rest;
803
804 while (buffer < end)
805 buffer = handle_ar_packet(ctx, buffer);
768 806
769 dma_free_coherent(ohci->card.device, PAGE_SIZE, 807 ctx->current_buffer = ab;
770 start, start_bus); 808 ctx->pointer = end;
771 ar_context_add_page(ctx); 809
810 ar_context_link_page(ctx, start, start_bus);
811 } else {
812 ctx->pointer = start + PAGE_SIZE;
813 }
772 } else { 814 } else {
773 buffer = ctx->pointer; 815 buffer = ctx->pointer;
774 ctx->pointer = end = 816 ctx->pointer = end =
775 (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count); 817 (void *) ab + PAGE_SIZE - le16_to_cpu(res_count);
776 818
777 while (buffer < end) 819 while (buffer < end)
778 buffer = handle_ar_packet(ctx, buffer); 820 buffer = handle_ar_packet(ctx, buffer);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index dcbeb98f195..f7af91cb273 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
276 struct drm_crtc *tmp; 276 struct drm_crtc *tmp;
277 int crtc_mask = 1; 277 int crtc_mask = 1;
278 278
279 WARN(!crtc, "checking null crtc?"); 279 WARN(!crtc, "checking null crtc?\n");
280 280
281 dev = crtc->dev; 281 dev = crtc->dev;
282 282
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index c1a26217a53..a245d17165a 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -240,7 +240,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
240 .addr = DDC_ADDR, 240 .addr = DDC_ADDR,
241 .flags = I2C_M_RD, 241 .flags = I2C_M_RD,
242 .len = len, 242 .len = len,
243 .buf = buf + start, 243 .buf = buf,
244 } 244 }
245 }; 245 };
246 246
@@ -253,7 +253,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
253static u8 * 253static u8 *
254drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 254drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
255{ 255{
256 int i, j = 0; 256 int i, j = 0, valid_extensions = 0;
257 u8 *block, *new; 257 u8 *block, *new;
258 258
259 if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) 259 if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
@@ -280,14 +280,28 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
280 280
281 for (j = 1; j <= block[0x7e]; j++) { 281 for (j = 1; j <= block[0x7e]; j++) {
282 for (i = 0; i < 4; i++) { 282 for (i = 0; i < 4; i++) {
283 if (drm_do_probe_ddc_edid(adapter, block, j, 283 if (drm_do_probe_ddc_edid(adapter,
284 EDID_LENGTH)) 284 block + (valid_extensions + 1) * EDID_LENGTH,
285 j, EDID_LENGTH))
285 goto out; 286 goto out;
286 if (drm_edid_block_valid(block + j * EDID_LENGTH)) 287 if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
288 valid_extensions++;
287 break; 289 break;
290 }
288 } 291 }
289 if (i == 4) 292 if (i == 4)
290 goto carp; 293 dev_warn(connector->dev->dev,
294 "%s: Ignoring invalid EDID block %d.\n",
295 drm_get_connector_name(connector), j);
296 }
297
298 if (valid_extensions != block[0x7e]) {
299 block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
300 block[0x7e] = valid_extensions;
301 new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
302 if (!new)
303 goto out;
304 block = new;
291 } 305 }
292 306
293 return block; 307 return block;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3467dd42076..80745f85902 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -44,7 +44,7 @@ unsigned int i915_fbpercrtc = 0;
44module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); 44module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
45 45
46unsigned int i915_powersave = 1; 46unsigned int i915_powersave = 1;
47module_param_named(powersave, i915_powersave, int, 0400); 47module_param_named(powersave, i915_powersave, int, 0600);
48 48
49unsigned int i915_lvds_downclock = 0; 49unsigned int i915_lvds_downclock = 0;
50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2c2c19b6285..90414ae86af 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1321,6 +1321,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
1321 1321
1322#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 1322#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1323#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1323#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1324#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1324 1325
1325#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1326#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1326 1327
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8eb8453208b..ef188e39140 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2172,7 +2172,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2172static int i915_ring_idle(struct drm_device *dev, 2172static int i915_ring_idle(struct drm_device *dev,
2173 struct intel_ring_buffer *ring) 2173 struct intel_ring_buffer *ring)
2174{ 2174{
2175 if (list_empty(&ring->gpu_write_list)) 2175 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2176 return 0; 2176 return 0;
2177 2177
2178 i915_gem_flush_ring(dev, NULL, ring, 2178 i915_gem_flush_ring(dev, NULL, ring,
@@ -2190,9 +2190,7 @@ i915_gpu_idle(struct drm_device *dev)
2190 int ret; 2190 int ret;
2191 2191
2192 lists_empty = (list_empty(&dev_priv->mm.flushing_list) && 2192 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2193 list_empty(&dev_priv->render_ring.active_list) && 2193 list_empty(&dev_priv->mm.active_list));
2194 list_empty(&dev_priv->bsd_ring.active_list) &&
2195 list_empty(&dev_priv->blt_ring.active_list));
2196 if (lists_empty) 2194 if (lists_empty)
2197 return 0; 2195 return 0;
2198 2196
@@ -3108,7 +3106,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
3108 * write domain 3106 * write domain
3109 */ 3107 */
3110 if (obj->write_domain && 3108 if (obj->write_domain &&
3111 obj->write_domain != obj->pending_read_domains) { 3109 (obj->write_domain != obj->pending_read_domains ||
3110 obj_priv->ring != ring)) {
3112 flush_domains |= obj->write_domain; 3111 flush_domains |= obj->write_domain;
3113 invalidate_domains |= 3112 invalidate_domains |=
3114 obj->pending_read_domains & ~obj->write_domain; 3113 obj->pending_read_domains & ~obj->write_domain;
@@ -3497,6 +3496,52 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
3497 return 0; 3496 return 0;
3498} 3497}
3499 3498
3499static int
3500i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
3501 struct drm_file *file,
3502 struct intel_ring_buffer *ring,
3503 struct drm_gem_object **objects,
3504 int count)
3505{
3506 struct drm_i915_private *dev_priv = dev->dev_private;
3507 int ret, i;
3508
3509 /* Zero the global flush/invalidate flags. These
3510 * will be modified as new domains are computed
3511 * for each object
3512 */
3513 dev->invalidate_domains = 0;
3514 dev->flush_domains = 0;
3515 dev_priv->mm.flush_rings = 0;
3516 for (i = 0; i < count; i++)
3517 i915_gem_object_set_to_gpu_domain(objects[i], ring);
3518
3519 if (dev->invalidate_domains | dev->flush_domains) {
3520#if WATCH_EXEC
3521 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3522 __func__,
3523 dev->invalidate_domains,
3524 dev->flush_domains);
3525#endif
3526 i915_gem_flush(dev, file,
3527 dev->invalidate_domains,
3528 dev->flush_domains,
3529 dev_priv->mm.flush_rings);
3530 }
3531
3532 for (i = 0; i < count; i++) {
3533 struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
3534 /* XXX replace with semaphores */
3535 if (obj->ring && ring != obj->ring) {
3536 ret = i915_gem_object_wait_rendering(&obj->base, true);
3537 if (ret)
3538 return ret;
3539 }
3540 }
3541
3542 return 0;
3543}
3544
3500/* Throttle our rendering by waiting until the ring has completed our requests 3545/* Throttle our rendering by waiting until the ring has completed our requests
3501 * emitted over 20 msec ago. 3546 * emitted over 20 msec ago.
3502 * 3547 *
@@ -3757,33 +3802,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3757 goto err; 3802 goto err;
3758 } 3803 }
3759 3804
3760 /* Zero the global flush/invalidate flags. These 3805 ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
3761 * will be modified as new domains are computed 3806 object_list, args->buffer_count);
3762 * for each object 3807 if (ret)
3763 */ 3808 goto err;
3764 dev->invalidate_domains = 0;
3765 dev->flush_domains = 0;
3766 dev_priv->mm.flush_rings = 0;
3767
3768 for (i = 0; i < args->buffer_count; i++) {
3769 struct drm_gem_object *obj = object_list[i];
3770
3771 /* Compute new gpu domains and update invalidate/flush */
3772 i915_gem_object_set_to_gpu_domain(obj, ring);
3773 }
3774
3775 if (dev->invalidate_domains | dev->flush_domains) {
3776#if WATCH_EXEC
3777 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3778 __func__,
3779 dev->invalidate_domains,
3780 dev->flush_domains);
3781#endif
3782 i915_gem_flush(dev, file,
3783 dev->invalidate_domains,
3784 dev->flush_domains,
3785 dev_priv->mm.flush_rings);
3786 }
3787 3809
3788 for (i = 0; i < args->buffer_count; i++) { 3810 for (i = 0; i < args->buffer_count; i++) {
3789 struct drm_gem_object *obj = object_list[i]; 3811 struct drm_gem_object *obj = object_list[i];
@@ -4043,8 +4065,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4043 alignment = i915_gem_get_gtt_alignment(obj); 4065 alignment = i915_gem_get_gtt_alignment(obj);
4044 if (obj_priv->gtt_offset & (alignment - 1)) { 4066 if (obj_priv->gtt_offset & (alignment - 1)) {
4045 WARN(obj_priv->pin_count, 4067 WARN(obj_priv->pin_count,
4046 "bo is already pinned with incorrect alignment:" 4068 "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
4047 " offset=%x, req.alignment=%x\n",
4048 obj_priv->gtt_offset, alignment); 4069 obj_priv->gtt_offset, alignment);
4049 ret = i915_gem_object_unbind(obj); 4070 ret = i915_gem_object_unbind(obj);
4050 if (ret) 4071 if (ret)
@@ -4856,17 +4877,24 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4856 struct drm_file *file_priv) 4877 struct drm_file *file_priv)
4857{ 4878{
4858 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 4879 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4859 void *obj_addr; 4880 void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
4860 int ret; 4881 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
4861 char __user *user_data;
4862 4882
4863 user_data = (char __user *) (uintptr_t) args->data_ptr; 4883 DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
4864 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4865 4884
4866 DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size); 4885 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4867 ret = copy_from_user(obj_addr, user_data, args->size); 4886 unsigned long unwritten;
4868 if (ret) 4887
4869 return -EFAULT; 4888 /* The physical object once assigned is fixed for the lifetime
4889 * of the obj, so we can safely drop the lock and continue
4890 * to access vaddr.
4891 */
4892 mutex_unlock(&dev->struct_mutex);
4893 unwritten = copy_from_user(vaddr, user_data, args->size);
4894 mutex_lock(&dev->struct_mutex);
4895 if (unwritten)
4896 return -EFAULT;
4897 }
4870 4898
4871 drm_agp_chipset_flush(dev); 4899 drm_agp_chipset_flush(dev);
4872 return 0; 4900 return 0;
@@ -4900,9 +4928,7 @@ i915_gpu_is_active(struct drm_device *dev)
4900 int lists_empty; 4928 int lists_empty;
4901 4929
4902 lists_empty = list_empty(&dev_priv->mm.flushing_list) && 4930 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4903 list_empty(&dev_priv->render_ring.active_list) && 4931 list_empty(&dev_priv->mm.active_list);
4904 list_empty(&dev_priv->bsd_ring.active_list) &&
4905 list_empty(&dev_priv->blt_ring.active_list);
4906 4932
4907 return !lists_empty; 4933 return !lists_empty;
4908} 4934}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 43a4013f53f..d8ae7d1d0cc 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -165,9 +165,7 @@ i915_gem_evict_everything(struct drm_device *dev)
165 165
166 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 166 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
167 list_empty(&dev_priv->mm.flushing_list) && 167 list_empty(&dev_priv->mm.flushing_list) &&
168 list_empty(&dev_priv->render_ring.active_list) && 168 list_empty(&dev_priv->mm.active_list));
169 list_empty(&dev_priv->bsd_ring.active_list) &&
170 list_empty(&dev_priv->blt_ring.active_list));
171 if (lists_empty) 169 if (lists_empty)
172 return -ENOSPC; 170 return -ENOSPC;
173 171
@@ -184,9 +182,7 @@ i915_gem_evict_everything(struct drm_device *dev)
184 182
185 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 183 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
186 list_empty(&dev_priv->mm.flushing_list) && 184 list_empty(&dev_priv->mm.flushing_list) &&
187 list_empty(&dev_priv->render_ring.active_list) && 185 list_empty(&dev_priv->mm.active_list));
188 list_empty(&dev_priv->bsd_ring.active_list) &&
189 list_empty(&dev_priv->blt_ring.active_list));
190 BUG_ON(!lists_empty); 186 BUG_ON(!lists_empty);
191 187
192 return 0; 188 return 0;
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 989c19d2d95..454c064f8ef 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev)
862 /* Clock gating state */ 862 /* Clock gating state */
863 intel_init_clock_gating(dev); 863 intel_init_clock_gating(dev);
864 864
865 if (HAS_PCH_SPLIT(dev)) 865 if (HAS_PCH_SPLIT(dev)) {
866 ironlake_enable_drps(dev); 866 ironlake_enable_drps(dev);
867 intel_init_emon(dev);
868 }
867 869
868 /* Cache mode state */ 870 /* Cache mode state */
869 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 871 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 990f065374b..48d8fd686ea 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1681,6 +1681,37 @@ static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
1681 udelay(500); 1681 udelay(500);
1682} 1682}
1683 1683
1684static void intel_fdi_normal_train(struct drm_crtc *crtc)
1685{
1686 struct drm_device *dev = crtc->dev;
1687 struct drm_i915_private *dev_priv = dev->dev_private;
1688 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1689 int pipe = intel_crtc->pipe;
1690 u32 reg, temp;
1691
1692 /* enable normal train */
1693 reg = FDI_TX_CTL(pipe);
1694 temp = I915_READ(reg);
1695 temp &= ~FDI_LINK_TRAIN_NONE;
1696 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
1697 I915_WRITE(reg, temp);
1698
1699 reg = FDI_RX_CTL(pipe);
1700 temp = I915_READ(reg);
1701 if (HAS_PCH_CPT(dev)) {
1702 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1703 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
1704 } else {
1705 temp &= ~FDI_LINK_TRAIN_NONE;
1706 temp |= FDI_LINK_TRAIN_NONE;
1707 }
1708 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
1709
1710 /* wait one idle pattern time */
1711 POSTING_READ(reg);
1712 udelay(1000);
1713}
1714
1684/* The FDI link training functions for ILK/Ibexpeak. */ 1715/* The FDI link training functions for ILK/Ibexpeak. */
1685static void ironlake_fdi_link_train(struct drm_crtc *crtc) 1716static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1686{ 1717{
@@ -1767,27 +1798,6 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1767 1798
1768 DRM_DEBUG_KMS("FDI train done\n"); 1799 DRM_DEBUG_KMS("FDI train done\n");
1769 1800
1770 /* enable normal train */
1771 reg = FDI_TX_CTL(pipe);
1772 temp = I915_READ(reg);
1773 temp &= ~FDI_LINK_TRAIN_NONE;
1774 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
1775 I915_WRITE(reg, temp);
1776
1777 reg = FDI_RX_CTL(pipe);
1778 temp = I915_READ(reg);
1779 if (HAS_PCH_CPT(dev)) {
1780 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1781 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
1782 } else {
1783 temp &= ~FDI_LINK_TRAIN_NONE;
1784 temp |= FDI_LINK_TRAIN_NONE;
1785 }
1786 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
1787
1788 /* wait one idle pattern time */
1789 POSTING_READ(reg);
1790 udelay(1000);
1791} 1801}
1792 1802
1793static const int const snb_b_fdi_train_param [] = { 1803static const int const snb_b_fdi_train_param [] = {
@@ -2090,6 +2100,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2090 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); 2100 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2091 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); 2101 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
2092 2102
2103 intel_fdi_normal_train(crtc);
2104
2093 /* For PCH DP, enable TRANS_DP_CTL */ 2105 /* For PCH DP, enable TRANS_DP_CTL */
2094 if (HAS_PCH_CPT(dev) && 2106 if (HAS_PCH_CPT(dev) &&
2095 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 2107 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
@@ -2200,9 +2212,10 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
2200 udelay(100); 2212 udelay(100);
2201 2213
2202 /* Ironlake workaround, disable clock pointer after downing FDI */ 2214 /* Ironlake workaround, disable clock pointer after downing FDI */
2203 I915_WRITE(FDI_RX_CHICKEN(pipe), 2215 if (HAS_PCH_IBX(dev))
2204 I915_READ(FDI_RX_CHICKEN(pipe) & 2216 I915_WRITE(FDI_RX_CHICKEN(pipe),
2205 ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); 2217 I915_READ(FDI_RX_CHICKEN(pipe) &
2218 ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
2206 2219
2207 /* still set train pattern 1 */ 2220 /* still set train pattern 1 */
2208 reg = FDI_TX_CTL(pipe); 2221 reg = FDI_TX_CTL(pipe);
@@ -5581,20 +5594,19 @@ void ironlake_enable_drps(struct drm_device *dev)
5581 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 5594 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
5582 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 5595 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
5583 MEMMODE_FSTART_SHIFT; 5596 MEMMODE_FSTART_SHIFT;
5584 fstart = fmax;
5585 5597
5586 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> 5598 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
5587 PXVFREQ_PX_SHIFT; 5599 PXVFREQ_PX_SHIFT;
5588 5600
5589 dev_priv->fmax = fstart; /* IPS callback will increase this */ 5601 dev_priv->fmax = fmax; /* IPS callback will increase this */
5590 dev_priv->fstart = fstart; 5602 dev_priv->fstart = fstart;
5591 5603
5592 dev_priv->max_delay = fmax; 5604 dev_priv->max_delay = fstart;
5593 dev_priv->min_delay = fmin; 5605 dev_priv->min_delay = fmin;
5594 dev_priv->cur_delay = fstart; 5606 dev_priv->cur_delay = fstart;
5595 5607
5596 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin, 5608 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
5597 fstart); 5609 fmax, fmin, fstart);
5598 5610
5599 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 5611 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
5600 5612
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 891f4f1d63b..c8e00555331 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1517,7 +1517,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
1517 status = connector_status_connected; 1517 status = connector_status_connected;
1518 } 1518 }
1519 1519
1520 return bit; 1520 return status;
1521} 1521}
1522 1522
1523/** 1523/**
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9af9f86a876..21551fe7454 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -296,6 +296,7 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
296extern void intel_init_clock_gating(struct drm_device *dev); 296extern void intel_init_clock_gating(struct drm_device *dev);
297extern void ironlake_enable_drps(struct drm_device *dev); 297extern void ironlake_enable_drps(struct drm_device *dev);
298extern void ironlake_disable_drps(struct drm_device *dev); 298extern void ironlake_disable_drps(struct drm_device *dev);
299extern void intel_init_emon(struct drm_device *dev);
299 300
300extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, 301extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
301 struct drm_gem_object *obj, 302 struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f1a649990ea..4324a326f98 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -481,11 +481,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
481 struct drm_device *dev = connector->dev; 481 struct drm_device *dev = connector->dev;
482 struct drm_display_mode *mode; 482 struct drm_display_mode *mode;
483 483
484 if (intel_lvds->edid) { 484 if (intel_lvds->edid)
485 drm_mode_connector_update_edid_property(connector,
486 intel_lvds->edid);
487 return drm_add_edid_modes(connector, intel_lvds->edid); 485 return drm_add_edid_modes(connector, intel_lvds->edid);
488 }
489 486
490 mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); 487 mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
491 if (mode == 0) 488 if (mode == 0)
@@ -939,7 +936,16 @@ void intel_lvds_init(struct drm_device *dev)
939 */ 936 */
940 intel_lvds->edid = drm_get_edid(connector, 937 intel_lvds->edid = drm_get_edid(connector,
941 &dev_priv->gmbus[pin].adapter); 938 &dev_priv->gmbus[pin].adapter);
942 939 if (intel_lvds->edid) {
940 if (drm_add_edid_modes(connector,
941 intel_lvds->edid)) {
942 drm_mode_connector_update_edid_property(connector,
943 intel_lvds->edid);
944 } else {
945 kfree(intel_lvds->edid);
946 intel_lvds->edid = NULL;
947 }
948 }
943 if (!intel_lvds->edid) { 949 if (!intel_lvds->edid) {
944 /* Didn't get an EDID, so 950 /* Didn't get an EDID, so
945 * Set wide sync ranges so we get all modes 951 * Set wide sync ranges so we get all modes
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 917c7dc3cd6..9b0d9a867ae 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -512,6 +512,6 @@ int intel_opregion_setup(struct drm_device *dev)
512 return 0; 512 return 0;
513 513
514err_out: 514err_out:
515 iounmap(opregion->header); 515 iounmap(base);
516 return err; 516 return err;
517} 517}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index afb96d25219..02ff0a481f4 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -946,7 +946,9 @@ static int check_overlay_src(struct drm_device *dev,
946{ 946{
947 int uv_hscale = uv_hsubsampling(rec->flags); 947 int uv_hscale = uv_hsubsampling(rec->flags);
948 int uv_vscale = uv_vsubsampling(rec->flags); 948 int uv_vscale = uv_vsubsampling(rec->flags);
949 u32 stride_mask, depth, tmp; 949 u32 stride_mask;
950 int depth;
951 u32 tmp;
950 952
951 /* check src dimensions */ 953 /* check src dimensions */
952 if (IS_845G(dev) || IS_I830(dev)) { 954 if (IS_845G(dev) || IS_I830(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 09f2dc353ae..b83306f9244 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -177,7 +177,7 @@ static int init_ring_common(struct drm_device *dev,
177 177
178 I915_WRITE_CTL(ring, 178 I915_WRITE_CTL(ring,
179 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) 179 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
180 | RING_NO_REPORT | RING_VALID); 180 | RING_REPORT_64K | RING_VALID);
181 181
182 head = I915_READ_HEAD(ring) & HEAD_ADDR; 182 head = I915_READ_HEAD(ring) & HEAD_ADDR;
183 /* If the head is still not zero, the ring is dead */ 183 /* If the head is still not zero, the ring is dead */
@@ -654,6 +654,10 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
654 i915_gem_object_unpin(ring->gem_object); 654 i915_gem_object_unpin(ring->gem_object);
655 drm_gem_object_unreference(ring->gem_object); 655 drm_gem_object_unreference(ring->gem_object);
656 ring->gem_object = NULL; 656 ring->gem_object = NULL;
657
658 if (ring->cleanup)
659 ring->cleanup(ring);
660
657 cleanup_status_page(dev, ring); 661 cleanup_status_page(dev, ring);
658} 662}
659 663
@@ -688,6 +692,17 @@ int intel_wait_ring_buffer(struct drm_device *dev,
688{ 692{
689 unsigned long end; 693 unsigned long end;
690 drm_i915_private_t *dev_priv = dev->dev_private; 694 drm_i915_private_t *dev_priv = dev->dev_private;
695 u32 head;
696
697 head = intel_read_status_page(ring, 4);
698 if (head) {
699 ring->head = head & HEAD_ADDR;
700 ring->space = ring->head - (ring->tail + 8);
701 if (ring->space < 0)
702 ring->space += ring->size;
703 if (ring->space >= n)
704 return 0;
705 }
691 706
692 trace_i915_ring_wait_begin (dev); 707 trace_i915_ring_wait_begin (dev);
693 end = jiffies + 3 * HZ; 708 end = jiffies + 3 * HZ;
@@ -854,19 +869,125 @@ blt_ring_put_user_irq(struct drm_device *dev,
854 /* do nothing */ 869 /* do nothing */
855} 870}
856 871
872
873/* Workaround for some stepping of SNB,
874 * each time when BLT engine ring tail moved,
875 * the first command in the ring to be parsed
876 * should be MI_BATCH_BUFFER_START
877 */
878#define NEED_BLT_WORKAROUND(dev) \
879 (IS_GEN6(dev) && (dev->pdev->revision < 8))
880
881static inline struct drm_i915_gem_object *
882to_blt_workaround(struct intel_ring_buffer *ring)
883{
884 return ring->private;
885}
886
887static int blt_ring_init(struct drm_device *dev,
888 struct intel_ring_buffer *ring)
889{
890 if (NEED_BLT_WORKAROUND(dev)) {
891 struct drm_i915_gem_object *obj;
892 u32 __iomem *ptr;
893 int ret;
894
895 obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
896 if (obj == NULL)
897 return -ENOMEM;
898
899 ret = i915_gem_object_pin(&obj->base, 4096);
900 if (ret) {
901 drm_gem_object_unreference(&obj->base);
902 return ret;
903 }
904
905 ptr = kmap(obj->pages[0]);
906 iowrite32(MI_BATCH_BUFFER_END, ptr);
907 iowrite32(MI_NOOP, ptr+1);
908 kunmap(obj->pages[0]);
909
910 ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
911 if (ret) {
912 i915_gem_object_unpin(&obj->base);
913 drm_gem_object_unreference(&obj->base);
914 return ret;
915 }
916
917 ring->private = obj;
918 }
919
920 return init_ring_common(dev, ring);
921}
922
923static void blt_ring_begin(struct drm_device *dev,
924 struct intel_ring_buffer *ring,
925 int num_dwords)
926{
927 if (ring->private) {
928 intel_ring_begin(dev, ring, num_dwords+2);
929 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
930 intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
931 } else
932 intel_ring_begin(dev, ring, 4);
933}
934
935static void blt_ring_flush(struct drm_device *dev,
936 struct intel_ring_buffer *ring,
937 u32 invalidate_domains,
938 u32 flush_domains)
939{
940 blt_ring_begin(dev, ring, 4);
941 intel_ring_emit(dev, ring, MI_FLUSH_DW);
942 intel_ring_emit(dev, ring, 0);
943 intel_ring_emit(dev, ring, 0);
944 intel_ring_emit(dev, ring, 0);
945 intel_ring_advance(dev, ring);
946}
947
948static u32
949blt_ring_add_request(struct drm_device *dev,
950 struct intel_ring_buffer *ring,
951 u32 flush_domains)
952{
953 u32 seqno = i915_gem_get_seqno(dev);
954
955 blt_ring_begin(dev, ring, 4);
956 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
957 intel_ring_emit(dev, ring,
958 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
959 intel_ring_emit(dev, ring, seqno);
960 intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
961 intel_ring_advance(dev, ring);
962
963 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
964 return seqno;
965}
966
967static void blt_ring_cleanup(struct intel_ring_buffer *ring)
968{
969 if (!ring->private)
970 return;
971
972 i915_gem_object_unpin(ring->private);
973 drm_gem_object_unreference(ring->private);
974 ring->private = NULL;
975}
976
857static const struct intel_ring_buffer gen6_blt_ring = { 977static const struct intel_ring_buffer gen6_blt_ring = {
858 .name = "blt ring", 978 .name = "blt ring",
859 .id = RING_BLT, 979 .id = RING_BLT,
860 .mmio_base = BLT_RING_BASE, 980 .mmio_base = BLT_RING_BASE,
861 .size = 32 * PAGE_SIZE, 981 .size = 32 * PAGE_SIZE,
862 .init = init_ring_common, 982 .init = blt_ring_init,
863 .write_tail = ring_write_tail, 983 .write_tail = ring_write_tail,
864 .flush = gen6_ring_flush, 984 .flush = blt_ring_flush,
865 .add_request = ring_add_request, 985 .add_request = blt_ring_add_request,
866 .get_seqno = ring_status_page_get_seqno, 986 .get_seqno = ring_status_page_get_seqno,
867 .user_irq_get = blt_ring_get_user_irq, 987 .user_irq_get = blt_ring_get_user_irq,
868 .user_irq_put = blt_ring_put_user_irq, 988 .user_irq_put = blt_ring_put_user_irq,
869 .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, 989 .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
990 .cleanup = blt_ring_cleanup,
870}; 991};
871 992
872int intel_init_render_ring_buffer(struct drm_device *dev) 993int intel_init_render_ring_buffer(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a05aff0e576..3126c268198 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -63,6 +63,7 @@ struct intel_ring_buffer {
63 struct drm_i915_gem_execbuffer2 *exec, 63 struct drm_i915_gem_execbuffer2 *exec,
64 struct drm_clip_rect *cliprects, 64 struct drm_clip_rect *cliprects,
65 uint64_t exec_offset); 65 uint64_t exec_offset);
66 void (*cleanup)(struct intel_ring_buffer *ring);
66 67
67 /** 68 /**
68 * List of objects currently involved in rendering from the 69 * List of objects currently involved in rendering from the
@@ -98,6 +99,8 @@ struct intel_ring_buffer {
98 99
99 wait_queue_head_t irq_queue; 100 wait_queue_head_t irq_queue;
100 drm_local_map_t map; 101 drm_local_map_t map;
102
103 void *private;
101}; 104};
102 105
103static inline u32 106static inline u32
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f12a5b3ec05..488c36c8f5e 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2033,7 +2033,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
2033 u32 grbm_int_cntl = 0; 2033 u32 grbm_int_cntl = 0;
2034 2034
2035 if (!rdev->irq.installed) { 2035 if (!rdev->irq.installed) {
2036 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 2036 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2037 return -EINVAL; 2037 return -EINVAL;
2038 } 2038 }
2039 /* don't enable anything if the ih is disabled */ 2039 /* don't enable anything if the ih is disabled */
@@ -2295,6 +2295,7 @@ restart_ih:
2295 case 0: /* D1 vblank */ 2295 case 0: /* D1 vblank */
2296 if (disp_int & LB_D1_VBLANK_INTERRUPT) { 2296 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
2297 drm_handle_vblank(rdev->ddev, 0); 2297 drm_handle_vblank(rdev->ddev, 0);
2298 rdev->pm.vblank_sync = true;
2298 wake_up(&rdev->irq.vblank_queue); 2299 wake_up(&rdev->irq.vblank_queue);
2299 disp_int &= ~LB_D1_VBLANK_INTERRUPT; 2300 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2300 DRM_DEBUG("IH: D1 vblank\n"); 2301 DRM_DEBUG("IH: D1 vblank\n");
@@ -2316,6 +2317,7 @@ restart_ih:
2316 case 0: /* D2 vblank */ 2317 case 0: /* D2 vblank */
2317 if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { 2318 if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
2318 drm_handle_vblank(rdev->ddev, 1); 2319 drm_handle_vblank(rdev->ddev, 1);
2320 rdev->pm.vblank_sync = true;
2319 wake_up(&rdev->irq.vblank_queue); 2321 wake_up(&rdev->irq.vblank_queue);
2320 disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; 2322 disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
2321 DRM_DEBUG("IH: D2 vblank\n"); 2323 DRM_DEBUG("IH: D2 vblank\n");
@@ -2337,6 +2339,7 @@ restart_ih:
2337 case 0: /* D3 vblank */ 2339 case 0: /* D3 vblank */
2338 if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { 2340 if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
2339 drm_handle_vblank(rdev->ddev, 2); 2341 drm_handle_vblank(rdev->ddev, 2);
2342 rdev->pm.vblank_sync = true;
2340 wake_up(&rdev->irq.vblank_queue); 2343 wake_up(&rdev->irq.vblank_queue);
2341 disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; 2344 disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
2342 DRM_DEBUG("IH: D3 vblank\n"); 2345 DRM_DEBUG("IH: D3 vblank\n");
@@ -2358,6 +2361,7 @@ restart_ih:
2358 case 0: /* D4 vblank */ 2361 case 0: /* D4 vblank */
2359 if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { 2362 if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
2360 drm_handle_vblank(rdev->ddev, 3); 2363 drm_handle_vblank(rdev->ddev, 3);
2364 rdev->pm.vblank_sync = true;
2361 wake_up(&rdev->irq.vblank_queue); 2365 wake_up(&rdev->irq.vblank_queue);
2362 disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; 2366 disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
2363 DRM_DEBUG("IH: D4 vblank\n"); 2367 DRM_DEBUG("IH: D4 vblank\n");
@@ -2379,6 +2383,7 @@ restart_ih:
2379 case 0: /* D5 vblank */ 2383 case 0: /* D5 vblank */
2380 if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { 2384 if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
2381 drm_handle_vblank(rdev->ddev, 4); 2385 drm_handle_vblank(rdev->ddev, 4);
2386 rdev->pm.vblank_sync = true;
2382 wake_up(&rdev->irq.vblank_queue); 2387 wake_up(&rdev->irq.vblank_queue);
2383 disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; 2388 disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
2384 DRM_DEBUG("IH: D5 vblank\n"); 2389 DRM_DEBUG("IH: D5 vblank\n");
@@ -2400,6 +2405,7 @@ restart_ih:
2400 case 0: /* D6 vblank */ 2405 case 0: /* D6 vblank */
2401 if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { 2406 if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
2402 drm_handle_vblank(rdev->ddev, 5); 2407 drm_handle_vblank(rdev->ddev, 5);
2408 rdev->pm.vblank_sync = true;
2403 wake_up(&rdev->irq.vblank_queue); 2409 wake_up(&rdev->irq.vblank_queue);
2404 disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; 2410 disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
2405 DRM_DEBUG("IH: D6 vblank\n"); 2411 DRM_DEBUG("IH: D6 vblank\n");
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 0e8f28a6892..8e10aa9f74b 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -442,7 +442,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
442 int r; 442 int r;
443 443
444 if (rdev->gart.table.ram.ptr) { 444 if (rdev->gart.table.ram.ptr) {
445 WARN(1, "R100 PCI GART already initialized.\n"); 445 WARN(1, "R100 PCI GART already initialized\n");
446 return 0; 446 return 0;
447 } 447 }
448 /* Initialize common gart structure */ 448 /* Initialize common gart structure */
@@ -516,7 +516,7 @@ int r100_irq_set(struct radeon_device *rdev)
516 uint32_t tmp = 0; 516 uint32_t tmp = 0;
517 517
518 if (!rdev->irq.installed) { 518 if (!rdev->irq.installed) {
519 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 519 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
520 WREG32(R_000040_GEN_INT_CNTL, 0); 520 WREG32(R_000040_GEN_INT_CNTL, 0);
521 return -EINVAL; 521 return -EINVAL;
522 } 522 }
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 34527e600fe..cde1d3480d9 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -91,7 +91,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
91 int r; 91 int r;
92 92
93 if (rdev->gart.table.vram.robj) { 93 if (rdev->gart.table.vram.robj) {
94 WARN(1, "RV370 PCIE GART already initialized.\n"); 94 WARN(1, "RV370 PCIE GART already initialized\n");
95 return 0; 95 return 0;
96 } 96 }
97 /* Initialize common gart structure */ 97 /* Initialize common gart structure */
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 33952a12f0a..0f806cc7dc7 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -97,14 +97,8 @@ u32 rv6xx_get_temp(struct radeon_device *rdev)
97{ 97{
98 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> 98 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
99 ASIC_T_SHIFT; 99 ASIC_T_SHIFT;
100 u32 actual_temp = 0;
101 100
102 if ((temp >> 7) & 1) 101 return temp * 1000;
103 actual_temp = 0;
104 else
105 actual_temp = (temp >> 1) & 0xff;
106
107 return actual_temp * 1000;
108} 102}
109 103
110void r600_pm_get_dynpm_state(struct radeon_device *rdev) 104void r600_pm_get_dynpm_state(struct radeon_device *rdev)
@@ -919,7 +913,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev)
919 int r; 913 int r;
920 914
921 if (rdev->gart.table.vram.robj) { 915 if (rdev->gart.table.vram.robj) {
922 WARN(1, "R600 PCIE GART already initialized.\n"); 916 WARN(1, "R600 PCIE GART already initialized\n");
923 return 0; 917 return 0;
924 } 918 }
925 /* Initialize common gart structure */ 919 /* Initialize common gart structure */
@@ -2995,7 +2989,7 @@ int r600_irq_set(struct radeon_device *rdev)
2995 u32 hdmi1, hdmi2; 2989 u32 hdmi1, hdmi2;
2996 2990
2997 if (!rdev->irq.installed) { 2991 if (!rdev->irq.installed) {
2998 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 2992 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2999 return -EINVAL; 2993 return -EINVAL;
3000 } 2994 }
3001 /* don't enable anything if the ih is disabled */ 2995 /* don't enable anything if the ih is disabled */
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 04cac7ec903..87ead090c7d 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -526,8 +526,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
526 if (crev < 2) 526 if (crev < 2)
527 return false; 527 return false;
528 528
529 router.valid = false;
530
531 obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); 529 obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
532 path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) 530 path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
533 (ctx->bios + data_offset + 531 (ctx->bios + data_offset +
@@ -624,6 +622,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
624 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 622 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
625 continue; 623 continue;
626 624
625 router.ddc_valid = false;
626 router.cd_valid = false;
627 for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { 627 for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
628 uint8_t grph_obj_id, grph_obj_num, grph_obj_type; 628 uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
629 629
@@ -647,9 +647,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
647 usDeviceTag)); 647 usDeviceTag));
648 648
649 } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { 649 } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
650 router.valid = false;
651 for (k = 0; k < router_obj->ucNumberOfObjects; k++) { 650 for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
652 u16 router_obj_id = le16_to_cpu(router_obj->asObjects[j].usObjectID); 651 u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
653 if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) { 652 if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
654 ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) 653 ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
655 (ctx->bios + data_offset + 654 (ctx->bios + data_offset +
@@ -657,6 +656,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
657 ATOM_I2C_RECORD *i2c_record; 656 ATOM_I2C_RECORD *i2c_record;
658 ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; 657 ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
659 ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path; 658 ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
659 ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path;
660 ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table = 660 ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
661 (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) 661 (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
662 (ctx->bios + data_offset + 662 (ctx->bios + data_offset +
@@ -690,10 +690,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
690 case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE: 690 case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
691 ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *) 691 ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
692 record; 692 record;
693 router.valid = true; 693 router.ddc_valid = true;
694 router.mux_type = ddc_path->ucMuxType; 694 router.ddc_mux_type = ddc_path->ucMuxType;
695 router.mux_control_pin = ddc_path->ucMuxControlPin; 695 router.ddc_mux_control_pin = ddc_path->ucMuxControlPin;
696 router.mux_state = ddc_path->ucMuxState[enum_id]; 696 router.ddc_mux_state = ddc_path->ucMuxState[enum_id];
697 break;
698 case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE:
699 cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *)
700 record;
701 router.cd_valid = true;
702 router.cd_mux_type = cd_path->ucMuxType;
703 router.cd_mux_control_pin = cd_path->ucMuxControlPin;
704 router.cd_mux_state = cd_path->ucMuxState[enum_id];
697 break; 705 break;
698 } 706 }
699 record = (ATOM_COMMON_RECORD_HEADER *) 707 record = (ATOM_COMMON_RECORD_HEADER *)
@@ -860,7 +868,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
860 size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE; 868 size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
861 struct radeon_router router; 869 struct radeon_router router;
862 870
863 router.valid = false; 871 router.ddc_valid = false;
872 router.cd_valid = false;
864 873
865 bios_connectors = kzalloc(bc_size, GFP_KERNEL); 874 bios_connectors = kzalloc(bc_size, GFP_KERNEL);
866 if (!bios_connectors) 875 if (!bios_connectors)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 4dac4b0a02e..fe6c74780f1 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -183,13 +183,13 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
183 continue; 183 continue;
184 184
185 if (priority == true) { 185 if (priority == true) {
186 DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); 186 DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
187 DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); 187 DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector));
188 conflict->status = connector_status_disconnected; 188 conflict->status = connector_status_disconnected;
189 radeon_connector_update_scratch_regs(conflict, connector_status_disconnected); 189 radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
190 } else { 190 } else {
191 DRM_INFO("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector)); 191 DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
192 DRM_INFO("in favor of %s\n", drm_get_connector_name(conflict)); 192 DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict));
193 current_status = connector_status_disconnected; 193 current_status = connector_status_disconnected;
194 } 194 }
195 break; 195 break;
@@ -432,13 +432,13 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
432 mode->vdisplay == native_mode->vdisplay) { 432 mode->vdisplay == native_mode->vdisplay) {
433 *native_mode = *mode; 433 *native_mode = *mode;
434 drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V); 434 drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
435 DRM_INFO("Determined LVDS native mode details from EDID\n"); 435 DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
436 break; 436 break;
437 } 437 }
438 } 438 }
439 } 439 }
440 if (!native_mode->clock) { 440 if (!native_mode->clock) {
441 DRM_INFO("No LVDS native mode details, disabling RMX\n"); 441 DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
442 radeon_encoder->rmx_type = RMX_OFF; 442 radeon_encoder->rmx_type = RMX_OFF;
443 } 443 }
444} 444}
@@ -1116,7 +1116,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1116 radeon_connector->shared_ddc = true; 1116 radeon_connector->shared_ddc = true;
1117 shared_ddc = true; 1117 shared_ddc = true;
1118 } 1118 }
1119 if (radeon_connector->router_bus && router->valid && 1119 if (radeon_connector->router_bus && router->ddc_valid &&
1120 (radeon_connector->router.router_id == router->router_id)) { 1120 (radeon_connector->router.router_id == router->router_id)) {
1121 radeon_connector->shared_ddc = false; 1121 radeon_connector->shared_ddc = false;
1122 shared_ddc = false; 1122 shared_ddc = false;
@@ -1136,7 +1136,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1136 radeon_connector->connector_object_id = connector_object_id; 1136 radeon_connector->connector_object_id = connector_object_id;
1137 radeon_connector->hpd = *hpd; 1137 radeon_connector->hpd = *hpd;
1138 radeon_connector->router = *router; 1138 radeon_connector->router = *router;
1139 if (router->valid) { 1139 if (router->ddc_valid || router->cd_valid) {
1140 radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); 1140 radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
1141 if (!radeon_connector->router_bus) 1141 if (!radeon_connector->router_bus)
1142 goto failed; 1142 goto failed;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0383631da69..1df4dc6c063 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -315,10 +315,14 @@ static void radeon_print_display_setup(struct drm_device *dev)
315 radeon_connector->ddc_bus->rec.en_data_reg, 315 radeon_connector->ddc_bus->rec.en_data_reg,
316 radeon_connector->ddc_bus->rec.y_clk_reg, 316 radeon_connector->ddc_bus->rec.y_clk_reg,
317 radeon_connector->ddc_bus->rec.y_data_reg); 317 radeon_connector->ddc_bus->rec.y_data_reg);
318 if (radeon_connector->router_bus) 318 if (radeon_connector->router.ddc_valid)
319 DRM_INFO(" DDC Router 0x%x/0x%x\n", 319 DRM_INFO(" DDC Router 0x%x/0x%x\n",
320 radeon_connector->router.mux_control_pin, 320 radeon_connector->router.ddc_mux_control_pin,
321 radeon_connector->router.mux_state); 321 radeon_connector->router.ddc_mux_state);
322 if (radeon_connector->router.cd_valid)
323 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
324 radeon_connector->router.cd_mux_control_pin,
325 radeon_connector->router.cd_mux_state);
322 } else { 326 } else {
323 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || 327 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
324 connector->connector_type == DRM_MODE_CONNECTOR_DVII || 328 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
@@ -398,8 +402,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
398 int ret = 0; 402 int ret = 0;
399 403
400 /* on hw with routers, select right port */ 404 /* on hw with routers, select right port */
401 if (radeon_connector->router.valid) 405 if (radeon_connector->router.ddc_valid)
402 radeon_router_select_port(radeon_connector); 406 radeon_router_select_ddc_port(radeon_connector);
403 407
404 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 408 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
405 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { 409 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
@@ -432,8 +436,8 @@ static int radeon_ddc_dump(struct drm_connector *connector)
432 int ret = 0; 436 int ret = 0;
433 437
434 /* on hw with routers, select right port */ 438 /* on hw with routers, select right port */
435 if (radeon_connector->router.valid) 439 if (radeon_connector->router.ddc_valid)
436 radeon_router_select_port(radeon_connector); 440 radeon_router_select_ddc_port(radeon_connector);
437 441
438 if (!radeon_connector->ddc_bus) 442 if (!radeon_connector->ddc_bus)
439 return -1; 443 return -1;
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index ae58b6849a2..f678257c42e 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -1520,6 +1520,7 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
1520static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) 1520static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
1521{ 1521{
1522 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1522 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1523 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1523 1524
1524 if (radeon_encoder->active_device & 1525 if (radeon_encoder->active_device &
1525 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { 1526 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
@@ -1531,6 +1532,13 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
1531 radeon_atom_output_lock(encoder, true); 1532 radeon_atom_output_lock(encoder, true);
1532 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 1533 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1533 1534
1535 /* select the clock/data port if it uses a router */
1536 if (connector) {
1537 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1538 if (radeon_connector->router.cd_valid)
1539 radeon_router_select_cd_port(radeon_connector);
1540 }
1541
1534 /* this is needed for the pll/ss setup to work correctly in some cases */ 1542 /* this is needed for the pll/ss setup to work correctly in some cases */
1535 atombios_set_encoder_crtc_source(encoder); 1543 atombios_set_encoder_crtc_source(encoder);
1536} 1544}
@@ -1547,6 +1555,23 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1547 struct radeon_device *rdev = dev->dev_private; 1555 struct radeon_device *rdev = dev->dev_private;
1548 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1556 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1549 struct radeon_encoder_atom_dig *dig; 1557 struct radeon_encoder_atom_dig *dig;
1558
1559 /* check for pre-DCE3 cards with shared encoders;
1560 * can't really use the links individually, so don't disable
1561 * the encoder if it's in use by another connector
1562 */
1563 if (!ASIC_IS_DCE3(rdev)) {
1564 struct drm_encoder *other_encoder;
1565 struct radeon_encoder *other_radeon_encoder;
1566
1567 list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
1568 other_radeon_encoder = to_radeon_encoder(other_encoder);
1569 if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
1570 drm_helper_encoder_in_use(other_encoder))
1571 goto disable_done;
1572 }
1573 }
1574
1550 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 1575 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1551 1576
1552 switch (radeon_encoder->encoder_id) { 1577 switch (radeon_encoder->encoder_id) {
@@ -1586,6 +1611,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1586 break; 1611 break;
1587 } 1612 }
1588 1613
1614disable_done:
1589 if (radeon_encoder_is_digital(encoder)) { 1615 if (radeon_encoder_is_digital(encoder)) {
1590 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) 1616 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
1591 r600_hdmi_disable(encoder); 1617 r600_hdmi_disable(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 216392d0353..daacb281dfa 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -240,7 +240,8 @@ retry:
240 */ 240 */
241 if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { 241 if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
242 /* good news we believe it's a lockup */ 242 /* good news we believe it's a lockup */
243 WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq); 243 WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
244 fence->seq, seq);
244 /* FIXME: what should we do ? marking everyone 245 /* FIXME: what should we do ? marking everyone
245 * as signaled for now 246 * as signaled for now
246 */ 247 */
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 6a13ee38a5b..0cfbba02c4d 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -53,8 +53,8 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
53 }; 53 };
54 54
55 /* on hw with routers, select right port */ 55 /* on hw with routers, select right port */
56 if (radeon_connector->router.valid) 56 if (radeon_connector->router.ddc_valid)
57 radeon_router_select_port(radeon_connector); 57 radeon_router_select_ddc_port(radeon_connector);
58 58
59 ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); 59 ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
60 if (ret == 2) 60 if (ret == 2)
@@ -1084,26 +1084,51 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
1084 addr, val); 1084 addr, val);
1085} 1085}
1086 1086
1087/* router switching */ 1087/* ddc router switching */
1088void radeon_router_select_port(struct radeon_connector *radeon_connector) 1088void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
1089{ 1089{
1090 u8 val; 1090 u8 val;
1091 1091
1092 if (!radeon_connector->router.valid) 1092 if (!radeon_connector->router.ddc_valid)
1093 return; 1093 return;
1094 1094
1095 radeon_i2c_get_byte(radeon_connector->router_bus, 1095 radeon_i2c_get_byte(radeon_connector->router_bus,
1096 radeon_connector->router.i2c_addr, 1096 radeon_connector->router.i2c_addr,
1097 0x3, &val); 1097 0x3, &val);
1098 val &= radeon_connector->router.mux_control_pin; 1098 val &= ~radeon_connector->router.ddc_mux_control_pin;
1099 radeon_i2c_put_byte(radeon_connector->router_bus, 1099 radeon_i2c_put_byte(radeon_connector->router_bus,
1100 radeon_connector->router.i2c_addr, 1100 radeon_connector->router.i2c_addr,
1101 0x3, val); 1101 0x3, val);
1102 radeon_i2c_get_byte(radeon_connector->router_bus, 1102 radeon_i2c_get_byte(radeon_connector->router_bus,
1103 radeon_connector->router.i2c_addr, 1103 radeon_connector->router.i2c_addr,
1104 0x1, &val); 1104 0x1, &val);
1105 val &= radeon_connector->router.mux_control_pin; 1105 val &= ~radeon_connector->router.ddc_mux_control_pin;
1106 val |= radeon_connector->router.mux_state; 1106 val |= radeon_connector->router.ddc_mux_state;
1107 radeon_i2c_put_byte(radeon_connector->router_bus,
1108 radeon_connector->router.i2c_addr,
1109 0x1, val);
1110}
1111
1112/* clock/data router switching */
1113void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
1114{
1115 u8 val;
1116
1117 if (!radeon_connector->router.cd_valid)
1118 return;
1119
1120 radeon_i2c_get_byte(radeon_connector->router_bus,
1121 radeon_connector->router.i2c_addr,
1122 0x3, &val);
1123 val &= ~radeon_connector->router.cd_mux_control_pin;
1124 radeon_i2c_put_byte(radeon_connector->router_bus,
1125 radeon_connector->router.i2c_addr,
1126 0x3, val);
1127 radeon_i2c_get_byte(radeon_connector->router_bus,
1128 radeon_connector->router.i2c_addr,
1129 0x1, &val);
1130 val &= ~radeon_connector->router.cd_mux_control_pin;
1131 val |= radeon_connector->router.cd_mux_state;
1107 radeon_i2c_put_byte(radeon_connector->router_bus, 1132 radeon_i2c_put_byte(radeon_connector->router_bus,
1108 radeon_connector->router.i2c_addr, 1133 radeon_connector->router.i2c_addr,
1109 0x1, val); 1134 0x1, val);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 92457163d07..680f57644e8 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -401,13 +401,19 @@ struct radeon_hpd {
401}; 401};
402 402
403struct radeon_router { 403struct radeon_router {
404 bool valid;
405 u32 router_id; 404 u32 router_id;
406 struct radeon_i2c_bus_rec i2c_info; 405 struct radeon_i2c_bus_rec i2c_info;
407 u8 i2c_addr; 406 u8 i2c_addr;
408 u8 mux_type; 407 /* i2c mux */
409 u8 mux_control_pin; 408 bool ddc_valid;
410 u8 mux_state; 409 u8 ddc_mux_type;
410 u8 ddc_mux_control_pin;
411 u8 ddc_mux_state;
412 /* clock/data mux */
413 bool cd_valid;
414 u8 cd_mux_type;
415 u8 cd_mux_control_pin;
416 u8 cd_mux_state;
411}; 417};
412 418
413struct radeon_connector { 419struct radeon_connector {
@@ -488,7 +494,8 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
488 u8 slave_addr, 494 u8 slave_addr,
489 u8 addr, 495 u8 addr,
490 u8 val); 496 u8 val);
491extern void radeon_router_select_port(struct radeon_connector *radeon_connector); 497extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
498extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
492extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); 499extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
493extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); 500extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
494 501
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d7ab9141641..8eb18346601 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -102,6 +102,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
102 type = ttm_bo_type_device; 102 type = ttm_bo_type_device;
103 } 103 }
104 *bo_ptr = NULL; 104 *bo_ptr = NULL;
105
106retry:
105 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); 107 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
106 if (bo == NULL) 108 if (bo == NULL)
107 return -ENOMEM; 109 return -ENOMEM;
@@ -109,8 +111,6 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
109 bo->gobj = gobj; 111 bo->gobj = gobj;
110 bo->surface_reg = -1; 112 bo->surface_reg = -1;
111 INIT_LIST_HEAD(&bo->list); 113 INIT_LIST_HEAD(&bo->list);
112
113retry:
114 radeon_ttm_placement_from_domain(bo, domain); 114 radeon_ttm_placement_from_domain(bo, domain);
115 /* Kernel allocation are uninterruptible */ 115 /* Kernel allocation are uninterruptible */
116 mutex_lock(&rdev->vram_mutex); 116 mutex_lock(&rdev->vram_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index fe95bb35317..01c2c736a1d 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -689,7 +689,8 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
689 gtt = container_of(backend, struct radeon_ttm_backend, backend); 689 gtt = container_of(backend, struct radeon_ttm_backend, backend);
690 gtt->offset = bo_mem->start << PAGE_SHIFT; 690 gtt->offset = bo_mem->start << PAGE_SHIFT;
691 if (!gtt->num_pages) { 691 if (!gtt->num_pages) {
692 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend); 692 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
693 gtt->num_pages, bo_mem, backend);
693 } 694 }
694 r = radeon_gart_bind(gtt->rdev, gtt->offset, 695 r = radeon_gart_bind(gtt->rdev, gtt->offset,
695 gtt->num_pages, gtt->pages); 696 gtt->num_pages, gtt->pages);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index f683e51a2a0..5512e4e5e63 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -78,7 +78,7 @@ int rs400_gart_init(struct radeon_device *rdev)
78 int r; 78 int r;
79 79
80 if (rdev->gart.table.ram.ptr) { 80 if (rdev->gart.table.ram.ptr) {
81 WARN(1, "RS400 GART already initialized.\n"); 81 WARN(1, "RS400 GART already initialized\n");
82 return 0; 82 return 0;
83 } 83 }
84 /* Check gart size */ 84 /* Check gart size */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index b091a1f6fa4..f1c6e02c2e6 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -375,7 +375,7 @@ int rs600_gart_init(struct radeon_device *rdev)
375 int r; 375 int r;
376 376
377 if (rdev->gart.table.vram.robj) { 377 if (rdev->gart.table.vram.robj) {
378 WARN(1, "RS600 GART already initialized.\n"); 378 WARN(1, "RS600 GART already initialized\n");
379 return 0; 379 return 0;
380 } 380 }
381 /* Initialize common gart structure */ 381 /* Initialize common gart structure */
@@ -505,7 +505,7 @@ int rs600_irq_set(struct radeon_device *rdev)
505 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); 505 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
506 506
507 if (!rdev->irq.installed) { 507 if (!rdev->irq.installed) {
508 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 508 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
509 WREG32(R_000040_GEN_INT_CNTL, 0); 509 WREG32(R_000040_GEN_INT_CNTL, 0);
510 return -EINVAL; 510 return -EINVAL;
511 } 511 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a1cb783c713..3ca77dc0391 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -27,14 +27,6 @@
27/* 27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */ 29 */
30/* Notes:
31 *
32 * We store bo pointer in drm_mm_node struct so we know which bo own a
33 * specific node. There is no protection on the pointer, thus to make
34 * sure things don't go berserk you have to access this pointer while
35 * holding the global lru lock and make sure anytime you free a node you
36 * reset the pointer to NULL.
37 */
38 30
39#include "ttm/ttm_module.h" 31#include "ttm/ttm_module.h"
40#include "ttm/ttm_bo_driver.h" 32#include "ttm/ttm_bo_driver.h"
@@ -45,6 +37,7 @@
45#include <linux/mm.h> 37#include <linux/mm.h>
46#include <linux/file.h> 38#include <linux/file.h>
47#include <linux/module.h> 39#include <linux/module.h>
40#include <asm/atomic.h>
48 41
49#define TTM_ASSERT_LOCKED(param) 42#define TTM_ASSERT_LOCKED(param)
50#define TTM_DEBUG(fmt, arg...) 43#define TTM_DEBUG(fmt, arg...)
@@ -452,6 +445,11 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
452 ttm_bo_mem_put(bo, &bo->mem); 445 ttm_bo_mem_put(bo, &bo->mem);
453 446
454 atomic_set(&bo->reserved, 0); 447 atomic_set(&bo->reserved, 0);
448
449 /*
450 * Make processes trying to reserve really pick it up.
451 */
452 smp_mb__after_atomic_dec();
455 wake_up_all(&bo->event_queue); 453 wake_up_all(&bo->event_queue);
456} 454}
457 455
@@ -460,7 +458,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
460 struct ttm_bo_device *bdev = bo->bdev; 458 struct ttm_bo_device *bdev = bo->bdev;
461 struct ttm_bo_global *glob = bo->glob; 459 struct ttm_bo_global *glob = bo->glob;
462 struct ttm_bo_driver *driver; 460 struct ttm_bo_driver *driver;
463 void *sync_obj; 461 void *sync_obj = NULL;
464 void *sync_obj_arg; 462 void *sync_obj_arg;
465 int put_count; 463 int put_count;
466 int ret; 464 int ret;
@@ -495,17 +493,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
495 spin_lock(&glob->lru_lock); 493 spin_lock(&glob->lru_lock);
496 } 494 }
497queue: 495queue:
498 sync_obj = bo->sync_obj;
499 sync_obj_arg = bo->sync_obj_arg;
500 driver = bdev->driver; 496 driver = bdev->driver;
497 if (bo->sync_obj)
498 sync_obj = driver->sync_obj_ref(bo->sync_obj);
499 sync_obj_arg = bo->sync_obj_arg;
501 500
502 kref_get(&bo->list_kref); 501 kref_get(&bo->list_kref);
503 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 502 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
504 spin_unlock(&glob->lru_lock); 503 spin_unlock(&glob->lru_lock);
505 spin_unlock(&bo->lock); 504 spin_unlock(&bo->lock);
506 505
507 if (sync_obj) 506 if (sync_obj) {
508 driver->sync_obj_flush(sync_obj, sync_obj_arg); 507 driver->sync_obj_flush(sync_obj, sync_obj_arg);
508 driver->sync_obj_unref(&sync_obj);
509 }
509 schedule_delayed_work(&bdev->wq, 510 schedule_delayed_work(&bdev->wq,
510 ((HZ / 100) < 1) ? 1 : HZ / 100); 511 ((HZ / 100) < 1) ? 1 : HZ / 100);
511} 512}
@@ -822,7 +823,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
822 bool no_wait_gpu) 823 bool no_wait_gpu)
823{ 824{
824 struct ttm_bo_device *bdev = bo->bdev; 825 struct ttm_bo_device *bdev = bo->bdev;
825 struct ttm_bo_global *glob = bdev->glob;
826 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 826 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
827 int ret; 827 int ret;
828 828
@@ -832,12 +832,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
832 return ret; 832 return ret;
833 if (mem->mm_node) 833 if (mem->mm_node)
834 break; 834 break;
835 spin_lock(&glob->lru_lock);
836 if (list_empty(&man->lru)) {
837 spin_unlock(&glob->lru_lock);
838 break;
839 }
840 spin_unlock(&glob->lru_lock);
841 ret = ttm_mem_evict_first(bdev, mem_type, interruptible, 835 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
842 no_wait_reserve, no_wait_gpu); 836 no_wait_reserve, no_wait_gpu);
843 if (unlikely(ret != 0)) 837 if (unlikely(ret != 0))
@@ -1125,35 +1119,9 @@ EXPORT_SYMBOL(ttm_bo_validate);
1125int ttm_bo_check_placement(struct ttm_buffer_object *bo, 1119int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1126 struct ttm_placement *placement) 1120 struct ttm_placement *placement)
1127{ 1121{
1128 int i; 1122 BUG_ON((placement->fpfn || placement->lpfn) &&
1123 (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
1129 1124
1130 if (placement->fpfn || placement->lpfn) {
1131 if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
1132 printk(KERN_ERR TTM_PFX "Page number range to small "
1133 "Need %lu pages, range is [%u, %u]\n",
1134 bo->mem.num_pages, placement->fpfn,
1135 placement->lpfn);
1136 return -EINVAL;
1137 }
1138 }
1139 for (i = 0; i < placement->num_placement; i++) {
1140 if (!capable(CAP_SYS_ADMIN)) {
1141 if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
1142 printk(KERN_ERR TTM_PFX "Need to be root to "
1143 "modify NO_EVICT status.\n");
1144 return -EINVAL;
1145 }
1146 }
1147 }
1148 for (i = 0; i < placement->num_busy_placement; i++) {
1149 if (!capable(CAP_SYS_ADMIN)) {
1150 if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
1151 printk(KERN_ERR TTM_PFX "Need to be root to "
1152 "modify NO_EVICT status.\n");
1153 return -EINVAL;
1154 }
1155 }
1156 }
1157 return 0; 1125 return 0;
1158} 1126}
1159 1127
@@ -1176,6 +1144,10 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1176 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1144 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1177 if (num_pages == 0) { 1145 if (num_pages == 0) {
1178 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n"); 1146 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1147 if (destroy)
1148 (*destroy)(bo);
1149 else
1150 kfree(bo);
1179 return -EINVAL; 1151 return -EINVAL;
1180 } 1152 }
1181 bo->destroy = destroy; 1153 bo->destroy = destroy;
@@ -1369,18 +1341,9 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1369 int ret = -EINVAL; 1341 int ret = -EINVAL;
1370 struct ttm_mem_type_manager *man; 1342 struct ttm_mem_type_manager *man;
1371 1343
1372 if (type >= TTM_NUM_MEM_TYPES) { 1344 BUG_ON(type >= TTM_NUM_MEM_TYPES);
1373 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1374 return ret;
1375 }
1376
1377 man = &bdev->man[type]; 1345 man = &bdev->man[type];
1378 if (man->has_type) { 1346 BUG_ON(man->has_type);
1379 printk(KERN_ERR TTM_PFX
1380 "Memory manager already initialized for type %d\n",
1381 type);
1382 return ret;
1383 }
1384 1347
1385 ret = bdev->driver->init_mem_type(bdev, type, man); 1348 ret = bdev->driver->init_mem_type(bdev, type, man);
1386 if (ret) 1349 if (ret)
@@ -1389,13 +1352,6 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1389 1352
1390 ret = 0; 1353 ret = 0;
1391 if (type != TTM_PL_SYSTEM) { 1354 if (type != TTM_PL_SYSTEM) {
1392 if (!p_size) {
1393 printk(KERN_ERR TTM_PFX
1394 "Zero size memory manager type %d\n",
1395 type);
1396 return ret;
1397 }
1398
1399 ret = (*man->func->init)(man, p_size); 1355 ret = (*man->func->init)(man, p_size);
1400 if (ret) 1356 if (ret)
1401 return ret; 1357 return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 7410c190c89..038e947d00f 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,20 +31,29 @@
31#include "ttm/ttm_module.h" 31#include "ttm/ttm_module.h"
32#include "ttm/ttm_bo_driver.h" 32#include "ttm/ttm_bo_driver.h"
33#include "ttm/ttm_placement.h" 33#include "ttm/ttm_placement.h"
34#include <linux/jiffies.h> 34#include "drm_mm.h"
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/sched.h> 36#include <linux/spinlock.h>
37#include <linux/mm.h>
38#include <linux/file.h>
39#include <linux/module.h> 37#include <linux/module.h>
40 38
39/**
40 * Currently we use a spinlock for the lock, but a mutex *may* be
41 * more appropriate to reduce scheduling latency if the range manager
42 * ends up with very fragmented allocation patterns.
43 */
44
45struct ttm_range_manager {
46 struct drm_mm mm;
47 spinlock_t lock;
48};
49
41static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, 50static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
42 struct ttm_buffer_object *bo, 51 struct ttm_buffer_object *bo,
43 struct ttm_placement *placement, 52 struct ttm_placement *placement,
44 struct ttm_mem_reg *mem) 53 struct ttm_mem_reg *mem)
45{ 54{
46 struct ttm_bo_global *glob = man->bdev->glob; 55 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
47 struct drm_mm *mm = man->priv; 56 struct drm_mm *mm = &rman->mm;
48 struct drm_mm_node *node = NULL; 57 struct drm_mm_node *node = NULL;
49 unsigned long lpfn; 58 unsigned long lpfn;
50 int ret; 59 int ret;
@@ -57,19 +66,19 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
57 if (unlikely(ret)) 66 if (unlikely(ret))
58 return ret; 67 return ret;
59 68
60 spin_lock(&glob->lru_lock); 69 spin_lock(&rman->lock);
61 node = drm_mm_search_free_in_range(mm, 70 node = drm_mm_search_free_in_range(mm,
62 mem->num_pages, mem->page_alignment, 71 mem->num_pages, mem->page_alignment,
63 placement->fpfn, lpfn, 1); 72 placement->fpfn, lpfn, 1);
64 if (unlikely(node == NULL)) { 73 if (unlikely(node == NULL)) {
65 spin_unlock(&glob->lru_lock); 74 spin_unlock(&rman->lock);
66 return 0; 75 return 0;
67 } 76 }
68 node = drm_mm_get_block_atomic_range(node, mem->num_pages, 77 node = drm_mm_get_block_atomic_range(node, mem->num_pages,
69 mem->page_alignment, 78 mem->page_alignment,
70 placement->fpfn, 79 placement->fpfn,
71 lpfn); 80 lpfn);
72 spin_unlock(&glob->lru_lock); 81 spin_unlock(&rman->lock);
73 } while (node == NULL); 82 } while (node == NULL);
74 83
75 mem->mm_node = node; 84 mem->mm_node = node;
@@ -80,12 +89,12 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
80static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, 89static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
81 struct ttm_mem_reg *mem) 90 struct ttm_mem_reg *mem)
82{ 91{
83 struct ttm_bo_global *glob = man->bdev->glob; 92 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
84 93
85 if (mem->mm_node) { 94 if (mem->mm_node) {
86 spin_lock(&glob->lru_lock); 95 spin_lock(&rman->lock);
87 drm_mm_put_block(mem->mm_node); 96 drm_mm_put_block(mem->mm_node);
88 spin_unlock(&glob->lru_lock); 97 spin_unlock(&rman->lock);
89 mem->mm_node = NULL; 98 mem->mm_node = NULL;
90 } 99 }
91} 100}
@@ -93,49 +102,49 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
93static int ttm_bo_man_init(struct ttm_mem_type_manager *man, 102static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
94 unsigned long p_size) 103 unsigned long p_size)
95{ 104{
96 struct drm_mm *mm; 105 struct ttm_range_manager *rman;
97 int ret; 106 int ret;
98 107
99 mm = kzalloc(sizeof(*mm), GFP_KERNEL); 108 rman = kzalloc(sizeof(*rman), GFP_KERNEL);
100 if (!mm) 109 if (!rman)
101 return -ENOMEM; 110 return -ENOMEM;
102 111
103 ret = drm_mm_init(mm, 0, p_size); 112 ret = drm_mm_init(&rman->mm, 0, p_size);
104 if (ret) { 113 if (ret) {
105 kfree(mm); 114 kfree(rman);
106 return ret; 115 return ret;
107 } 116 }
108 117
109 man->priv = mm; 118 spin_lock_init(&rman->lock);
119 man->priv = rman;
110 return 0; 120 return 0;
111} 121}
112 122
113static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) 123static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
114{ 124{
115 struct ttm_bo_global *glob = man->bdev->glob; 125 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
116 struct drm_mm *mm = man->priv; 126 struct drm_mm *mm = &rman->mm;
117 int ret = 0;
118 127
119 spin_lock(&glob->lru_lock); 128 spin_lock(&rman->lock);
120 if (drm_mm_clean(mm)) { 129 if (drm_mm_clean(mm)) {
121 drm_mm_takedown(mm); 130 drm_mm_takedown(mm);
122 kfree(mm); 131 spin_unlock(&rman->lock);
132 kfree(rman);
123 man->priv = NULL; 133 man->priv = NULL;
124 } else 134 return 0;
125 ret = -EBUSY; 135 }
126 spin_unlock(&glob->lru_lock); 136 spin_unlock(&rman->lock);
127 return ret; 137 return -EBUSY;
128} 138}
129 139
130static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, 140static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
131 const char *prefix) 141 const char *prefix)
132{ 142{
133 struct ttm_bo_global *glob = man->bdev->glob; 143 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
134 struct drm_mm *mm = man->priv;
135 144
136 spin_lock(&glob->lru_lock); 145 spin_lock(&rman->lock);
137 drm_mm_debug_table(mm, prefix); 146 drm_mm_debug_table(&rman->mm, prefix);
138 spin_unlock(&glob->lru_lock); 147 spin_unlock(&rman->lock);
139} 148}
140 149
141const struct ttm_mem_type_manager_func ttm_bo_manager_func = { 150const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index a7bab87a548..af789dc869b 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -440,10 +440,8 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
440 return ret; 440 return ret;
441 441
442 ret = be->func->bind(be, bo_mem); 442 ret = be->func->bind(be, bo_mem);
443 if (ret) { 443 if (unlikely(ret != 0))
444 printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
445 return ret; 444 return ret;
446 }
447 445
448 ttm->state = tt_bound; 446 ttm->state = tt_bound;
449 447
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 9b5b4d9dd62..3e038a394c5 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -235,9 +235,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
235 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) - 235 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
236 first_pfn + 1; 236 first_pfn + 1;
237 237
238 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) 238 vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages);
239 if (NULL == vsg->pages)
239 return -ENOMEM; 240 return -ENOMEM;
240 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
241 down_read(&current->mm->mmap_sem); 241 down_read(&current->mm->mmap_sem);
242 ret = get_user_pages(current, current->mm, 242 ret = get_user_pages(current, current->mm,
243 (unsigned long)xfer->mem_addr, 243 (unsigned long)xfer->mem_addr,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 51d9f9f1d7f..76954e3528c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -691,6 +691,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
691 691
692 fence_rep.error = ret; 692 fence_rep.error = ret;
693 fence_rep.fence_seq = (uint64_t) sequence; 693 fence_rep.fence_seq = (uint64_t) sequence;
694 fence_rep.pad64 = 0;
694 695
695 user_fence_rep = (struct drm_vmw_fence_rep __user *) 696 user_fence_rep = (struct drm_vmw_fence_rep __user *)
696 (unsigned long)arg->fence_rep; 697 (unsigned long)arg->fence_rep;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 87c6e6156d7..cceeb42789b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -720,6 +720,8 @@ static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
720 &vmw_vram_ne_placement, 720 &vmw_vram_ne_placement,
721 false, &vmw_dmabuf_bo_free); 721 false, &vmw_dmabuf_bo_free);
722 vmw_overlay_resume_all(dev_priv); 722 vmw_overlay_resume_all(dev_priv);
723 if (unlikely(ret != 0))
724 vfbs->buffer = NULL;
723 725
724 return ret; 726 return ret;
725} 727}
@@ -730,6 +732,9 @@ static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
730 struct vmw_framebuffer_surface *vfbs = 732 struct vmw_framebuffer_surface *vfbs =
731 vmw_framebuffer_to_vfbs(&vfb->base); 733 vmw_framebuffer_to_vfbs(&vfb->base);
732 734
735 if (unlikely(vfbs->buffer == NULL))
736 return 0;
737
733 bo = &vfbs->buffer->base; 738 bo = &vfbs->buffer->base;
734 ttm_bo_unref(&bo); 739 ttm_bo_unref(&bo);
735 vfbs->buffer = NULL; 740 vfbs->buffer = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index a01c47ddb5b..29113c9b26a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -557,7 +557,7 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
557 return -EINVAL; 557 return -EINVAL;
558 } 558 }
559 559
560 dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv)); 560 dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL);
561 561
562 if (!dev_priv->ldu_priv) 562 if (!dev_priv->ldu_priv)
563 return -ENOMEM; 563 return -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index df2036ed18d..f1a52f9e729 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -585,7 +585,7 @@ int vmw_overlay_init(struct vmw_private *dev_priv)
585 return -ENOSYS; 585 return -ENOSYS;
586 } 586 }
587 587
588 overlay = kmalloc(GFP_KERNEL, sizeof(*overlay)); 588 overlay = kmalloc(sizeof(*overlay), GFP_KERNEL);
589 if (!overlay) 589 if (!overlay)
590 return -ENOMEM; 590 return -ENOMEM;
591 591
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
index 742c423567c..0e1edd7311f 100644
--- a/drivers/gpu/stub/Kconfig
+++ b/drivers/gpu/stub/Kconfig
@@ -3,6 +3,9 @@ config STUB_POULSBO
3 depends on PCI 3 depends on PCI
4 # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled 4 # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
5 # but for select to work, need to select ACPI_VIDEO's dependencies, ick 5 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
6 select VIDEO_OUTPUT_CONTROL if ACPI
7 select BACKLIGHT_CLASS_DEVICE if ACPI
8 select INPUT if ACPI
6 select ACPI_VIDEO if ACPI 9 select ACPI_VIDEO if ACPI
7 help 10 help
8 Choose this option if you have a system that has Intel GMA500 11 Choose this option if you have a system that has Intel GMA500
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index 1e4c21fc1a8..86d822aa9bb 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -178,11 +178,13 @@ static int ad7414_probe(struct i2c_client *client,
178{ 178{
179 struct ad7414_data *data; 179 struct ad7414_data *data;
180 int conf; 180 int conf;
181 int err = 0; 181 int err;
182 182
183 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA | 183 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
184 I2C_FUNC_SMBUS_READ_WORD_DATA)) 184 I2C_FUNC_SMBUS_READ_WORD_DATA)) {
185 err = -EOPNOTSUPP;
185 goto exit; 186 goto exit;
187 }
186 188
187 data = kzalloc(sizeof(struct ad7414_data), GFP_KERNEL); 189 data = kzalloc(sizeof(struct ad7414_data), GFP_KERNEL);
188 if (!data) { 190 if (!data) {
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 9e775717abb..87d92a56a93 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -1286,8 +1286,10 @@ static int adt7470_probe(struct i2c_client *client,
1286 init_completion(&data->auto_update_stop); 1286 init_completion(&data->auto_update_stop);
1287 data->auto_update = kthread_run(adt7470_update_thread, client, 1287 data->auto_update = kthread_run(adt7470_update_thread, client,
1288 dev_name(data->hwmon_dev)); 1288 dev_name(data->hwmon_dev));
1289 if (IS_ERR(data->auto_update)) 1289 if (IS_ERR(data->auto_update)) {
1290 err = PTR_ERR(data->auto_update);
1290 goto exit_unregister; 1291 goto exit_unregister;
1292 }
1291 1293
1292 return 0; 1294 return 0;
1293 1295
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index fa9708c2d72..4033974d1bb 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -4,7 +4,7 @@
4 Copyright (C) 2009 T. Mertelj <tomaz.mertelj@guest.arnes.si> 4 Copyright (C) 2009 T. Mertelj <tomaz.mertelj@guest.arnes.si>
5 5
6 Based on max6650.c: 6 Based on max6650.c:
7 Copyright (C) 2007 Hans J. Koch <hjk@linutronix.de> 7 Copyright (C) 2007 Hans J. Koch <hjk@hansjkoch.de>
8 8
9 This program is free software; you can redistribute it and/or modify 9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by 10 it under the terms of the GNU General Public License as published by
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index aa701a18370..f141a1de519 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -376,10 +376,6 @@ static int fan_ctrl_init(struct gpio_fan_data *fan_data,
376 } 376 }
377 } 377 }
378 378
379 err = sysfs_create_group(&pdev->dev.kobj, &gpio_fan_ctrl_group);
380 if (err)
381 goto err_free_gpio;
382
383 fan_data->num_ctrl = num_ctrl; 379 fan_data->num_ctrl = num_ctrl;
384 fan_data->ctrl = ctrl; 380 fan_data->ctrl = ctrl;
385 fan_data->num_speed = pdata->num_speed; 381 fan_data->num_speed = pdata->num_speed;
@@ -391,6 +387,10 @@ static int fan_ctrl_init(struct gpio_fan_data *fan_data,
391 goto err_free_gpio; 387 goto err_free_gpio;
392 } 388 }
393 389
390 err = sysfs_create_group(&pdev->dev.kobj, &gpio_fan_ctrl_group);
391 if (err)
392 goto err_free_gpio;
393
394 return 0; 394 return 0;
395 395
396err_free_gpio: 396err_free_gpio:
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index 6669255aadc..c9ed14eba5a 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -20,7 +20,7 @@
20 Adapted to 2.6.20 by Carsten Emde <cbe@osadl.org> 20 Adapted to 2.6.20 by Carsten Emde <cbe@osadl.org>
21 Copyright (c) 2006 Carsten Emde, Open Source Automation Development Lab 21 Copyright (c) 2006 Carsten Emde, Open Source Automation Development Lab
22 22
23 Modified for mainline integration by Hans J. Koch <hjk@linutronix.de> 23 Modified for mainline integration by Hans J. Koch <hjk@hansjkoch.de>
24 Copyright (c) 2007 Hans J. Koch, Linutronix GmbH 24 Copyright (c) 2007 Hans J. Koch, Linutronix GmbH
25 25
26 This program is free software; you can redistribute it and/or modify 26 This program is free software; you can redistribute it and/or modify
@@ -2629,7 +2629,7 @@ static void __exit lm93_exit(void)
2629} 2629}
2630 2630
2631MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>, " 2631MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>, "
2632 "Hans J. Koch <hjk@linutronix.de"); 2632 "Hans J. Koch <hjk@hansjkoch.de>");
2633MODULE_DESCRIPTION("LM93 driver"); 2633MODULE_DESCRIPTION("LM93 driver");
2634MODULE_LICENSE("GPL"); 2634MODULE_LICENSE("GPL");
2635 2635
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 464340f2549..4546d82f024 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -128,9 +128,12 @@ static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
128{ 128{
129 struct i2c_client *client = to_i2c_client(dev); 129 struct i2c_client *client = to_i2c_client(dev);
130 struct lm95241_data *data = i2c_get_clientdata(client); 130 struct lm95241_data *data = i2c_get_clientdata(client);
131 unsigned long val;
131 132
132 strict_strtol(buf, 10, &data->interval); 133 if (strict_strtoul(buf, 10, &val) < 0)
133 data->interval = data->interval * HZ / 1000; 134 return -EINVAL;
135
136 data->interval = val * HZ / 1000;
134 137
135 return count; 138 return count;
136} 139}
@@ -188,7 +191,9 @@ static ssize_t set_type##flag(struct device *dev, \
188 struct lm95241_data *data = i2c_get_clientdata(client); \ 191 struct lm95241_data *data = i2c_get_clientdata(client); \
189\ 192\
190 long val; \ 193 long val; \
191 strict_strtol(buf, 10, &val); \ 194\
195 if (strict_strtol(buf, 10, &val) < 0) \
196 return -EINVAL; \
192\ 197\
193 if ((val == 1) || (val == 2)) { \ 198 if ((val == 1) || (val == 2)) { \
194\ 199\
@@ -227,7 +232,9 @@ static ssize_t set_min##flag(struct device *dev, \
227 struct lm95241_data *data = i2c_get_clientdata(client); \ 232 struct lm95241_data *data = i2c_get_clientdata(client); \
228\ 233\
229 long val; \ 234 long val; \
230 strict_strtol(buf, 10, &val); \ 235\
236 if (strict_strtol(buf, 10, &val) < 0) \
237 return -EINVAL;\
231\ 238\
232 mutex_lock(&data->update_lock); \ 239 mutex_lock(&data->update_lock); \
233\ 240\
@@ -256,7 +263,9 @@ static ssize_t set_max##flag(struct device *dev, \
256 struct lm95241_data *data = i2c_get_clientdata(client); \ 263 struct lm95241_data *data = i2c_get_clientdata(client); \
257\ 264\
258 long val; \ 265 long val; \
259 strict_strtol(buf, 10, &val); \ 266\
267 if (strict_strtol(buf, 10, &val) < 0) \
268 return -EINVAL; \
260\ 269\
261 mutex_lock(&data->update_lock); \ 270 mutex_lock(&data->update_lock); \
262\ 271\
diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c
index 26762617867..4b50601027d 100644
--- a/drivers/hwmon/ltc4261.c
+++ b/drivers/hwmon/ltc4261.c
@@ -82,7 +82,7 @@ static struct ltc4261_data *ltc4261_update_device(struct device *dev)
82 val = i2c_smbus_read_byte_data(client, i); 82 val = i2c_smbus_read_byte_data(client, i);
83 if (unlikely(val < 0)) { 83 if (unlikely(val < 0)) {
84 dev_dbg(dev, 84 dev_dbg(dev,
85 "Failed to read ADC value: error %d", 85 "Failed to read ADC value: error %d\n",
86 val); 86 val);
87 ret = ERR_PTR(val); 87 ret = ERR_PTR(val);
88 goto abort; 88 goto abort;
@@ -230,8 +230,7 @@ static int ltc4261_probe(struct i2c_client *client,
230 return -ENODEV; 230 return -ENODEV;
231 231
232 if (i2c_smbus_read_byte_data(client, LTC4261_STATUS) < 0) { 232 if (i2c_smbus_read_byte_data(client, LTC4261_STATUS) < 0) {
233 dev_err(&client->dev, "Failed to read register %d:%02x:%02x\n", 233 dev_err(&client->dev, "Failed to read status register\n");
234 adapter->id, client->addr, LTC4261_STATUS);
235 return -ENODEV; 234 return -ENODEV;
236 } 235 }
237 236
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index a0160ee5cae..9a11532ecae 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -2,7 +2,7 @@
2 * max6650.c - Part of lm_sensors, Linux kernel modules for hardware 2 * max6650.c - Part of lm_sensors, Linux kernel modules for hardware
3 * monitoring. 3 * monitoring.
4 * 4 *
5 * (C) 2007 by Hans J. Koch <hjk@linutronix.de> 5 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
6 * 6 *
7 * based on code written by John Morris <john.morris@spirentcom.com> 7 * based on code written by John Morris <john.morris@spirentcom.com>
8 * Copyright (c) 2003 Spirent Communications 8 * Copyright (c) 2003 Spirent Communications
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 1d840aa8378..cdbc7448491 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -165,10 +165,14 @@ static const u8 IN_LSB_SHIFT_IDX[][2] = {
165 165
166#define W83795_REG_VID_CTRL 0x6A 166#define W83795_REG_VID_CTRL 0x6A
167 167
168#define W83795_REG_ALARM_CTRL 0x40
169#define ALARM_CTRL_RTSACS (1 << 7)
168#define W83795_REG_ALARM(index) (0x41 + (index)) 170#define W83795_REG_ALARM(index) (0x41 + (index))
171#define W83795_REG_CLR_CHASSIS 0x4D
169#define W83795_REG_BEEP(index) (0x50 + (index)) 172#define W83795_REG_BEEP(index) (0x50 + (index))
170 173
171#define W83795_REG_CLR_CHASSIS 0x4D 174#define W83795_REG_OVT_CFG 0x58
175#define OVT_CFG_SEL (1 << 7)
172 176
173 177
174#define W83795_REG_FCMS1 0x201 178#define W83795_REG_FCMS1 0x201
@@ -178,6 +182,14 @@ static const u8 IN_LSB_SHIFT_IDX[][2] = {
178 182
179#define W83795_REG_TSS(index) (0x209 + (index)) 183#define W83795_REG_TSS(index) (0x209 + (index))
180 184
185#define TSS_MAP_RESERVED 0xff
186static const u8 tss_map[4][6] = {
187 { 0, 1, 2, 3, 4, 5},
188 { 6, 7, 8, 9, 0, 1},
189 {10, 11, 12, 13, 2, 3},
190 { 4, 5, 4, 5, TSS_MAP_RESERVED, TSS_MAP_RESERVED},
191};
192
181#define PWM_OUTPUT 0 193#define PWM_OUTPUT 0
182#define PWM_FREQ 1 194#define PWM_FREQ 1
183#define PWM_START 2 195#define PWM_START 2
@@ -369,6 +381,7 @@ struct w83795_data {
369 u8 setup_pwm[3]; /* Register value */ 381 u8 setup_pwm[3]; /* Register value */
370 382
371 u8 alarms[6]; /* Register value */ 383 u8 alarms[6]; /* Register value */
384 u8 enable_beep;
372 u8 beeps[6]; /* Register value */ 385 u8 beeps[6]; /* Register value */
373 386
374 char valid; 387 char valid;
@@ -499,8 +512,11 @@ static void w83795_update_limits(struct i2c_client *client)
499 } 512 }
500 513
501 /* Read beep settings */ 514 /* Read beep settings */
502 for (i = 0; i < ARRAY_SIZE(data->beeps); i++) 515 if (data->enable_beep) {
503 data->beeps[i] = w83795_read(client, W83795_REG_BEEP(i)); 516 for (i = 0; i < ARRAY_SIZE(data->beeps); i++)
517 data->beeps[i] =
518 w83795_read(client, W83795_REG_BEEP(i));
519 }
504 520
505 data->valid_limits = 1; 521 data->valid_limits = 1;
506} 522}
@@ -577,6 +593,7 @@ static struct w83795_data *w83795_update_device(struct device *dev)
577 struct i2c_client *client = to_i2c_client(dev); 593 struct i2c_client *client = to_i2c_client(dev);
578 struct w83795_data *data = i2c_get_clientdata(client); 594 struct w83795_data *data = i2c_get_clientdata(client);
579 u16 tmp; 595 u16 tmp;
596 u8 intrusion;
580 int i; 597 int i;
581 598
582 mutex_lock(&data->update_lock); 599 mutex_lock(&data->update_lock);
@@ -648,9 +665,24 @@ static struct w83795_data *w83795_update_device(struct device *dev)
648 w83795_read(client, W83795_REG_PWM(i, PWM_OUTPUT)); 665 w83795_read(client, W83795_REG_PWM(i, PWM_OUTPUT));
649 } 666 }
650 667
651 /* update alarm */ 668 /* Update intrusion and alarms
669 * It is important to read intrusion first, because reading from
670 * register SMI STS6 clears the interrupt status temporarily. */
671 tmp = w83795_read(client, W83795_REG_ALARM_CTRL);
672 /* Switch to interrupt status for intrusion if needed */
673 if (tmp & ALARM_CTRL_RTSACS)
674 w83795_write(client, W83795_REG_ALARM_CTRL,
675 tmp & ~ALARM_CTRL_RTSACS);
676 intrusion = w83795_read(client, W83795_REG_ALARM(5)) & (1 << 6);
677 /* Switch to real-time alarms */
678 w83795_write(client, W83795_REG_ALARM_CTRL, tmp | ALARM_CTRL_RTSACS);
652 for (i = 0; i < ARRAY_SIZE(data->alarms); i++) 679 for (i = 0; i < ARRAY_SIZE(data->alarms); i++)
653 data->alarms[i] = w83795_read(client, W83795_REG_ALARM(i)); 680 data->alarms[i] = w83795_read(client, W83795_REG_ALARM(i));
681 data->alarms[5] |= intrusion;
682 /* Restore original configuration if needed */
683 if (!(tmp & ALARM_CTRL_RTSACS))
684 w83795_write(client, W83795_REG_ALARM_CTRL,
685 tmp & ~ALARM_CTRL_RTSACS);
654 686
655 data->last_updated = jiffies; 687 data->last_updated = jiffies;
656 data->valid = 1; 688 data->valid = 1;
@@ -730,6 +762,10 @@ store_chassis_clear(struct device *dev,
730 val = w83795_read(client, W83795_REG_CLR_CHASSIS); 762 val = w83795_read(client, W83795_REG_CLR_CHASSIS);
731 val |= 0x80; 763 val |= 0x80;
732 w83795_write(client, W83795_REG_CLR_CHASSIS, val); 764 w83795_write(client, W83795_REG_CLR_CHASSIS, val);
765
766 /* Clear status and force cache refresh */
767 w83795_read(client, W83795_REG_ALARM(5));
768 data->valid = 0;
733 mutex_unlock(&data->update_lock); 769 mutex_unlock(&data->update_lock);
734 return count; 770 return count;
735} 771}
@@ -857,20 +893,20 @@ show_pwm_enable(struct device *dev, struct device_attribute *attr, char *buf)
857 int index = sensor_attr->index; 893 int index = sensor_attr->index;
858 u8 tmp; 894 u8 tmp;
859 895
860 if (1 == (data->pwm_fcms[0] & (1 << index))) { 896 /* Speed cruise mode */
897 if (data->pwm_fcms[0] & (1 << index)) {
861 tmp = 2; 898 tmp = 2;
862 goto out; 899 goto out;
863 } 900 }
901 /* Thermal cruise or SmartFan IV mode */
864 for (tmp = 0; tmp < 6; tmp++) { 902 for (tmp = 0; tmp < 6; tmp++) {
865 if (data->pwm_tfmr[tmp] & (1 << index)) { 903 if (data->pwm_tfmr[tmp] & (1 << index)) {
866 tmp = 3; 904 tmp = 3;
867 goto out; 905 goto out;
868 } 906 }
869 } 907 }
870 if (data->pwm_fomc & (1 << index)) 908 /* Manual mode */
871 tmp = 0; 909 tmp = 1;
872 else
873 tmp = 1;
874 910
875out: 911out:
876 return sprintf(buf, "%u\n", tmp); 912 return sprintf(buf, "%u\n", tmp);
@@ -890,23 +926,21 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
890 926
891 if (strict_strtoul(buf, 10, &val) < 0) 927 if (strict_strtoul(buf, 10, &val) < 0)
892 return -EINVAL; 928 return -EINVAL;
893 if (val > 2) 929 if (val < 1 || val > 2)
894 return -EINVAL; 930 return -EINVAL;
895 931
896 mutex_lock(&data->update_lock); 932 mutex_lock(&data->update_lock);
897 switch (val) { 933 switch (val) {
898 case 0:
899 case 1: 934 case 1:
935 /* Clear speed cruise mode bits */
900 data->pwm_fcms[0] &= ~(1 << index); 936 data->pwm_fcms[0] &= ~(1 << index);
901 w83795_write(client, W83795_REG_FCMS1, data->pwm_fcms[0]); 937 w83795_write(client, W83795_REG_FCMS1, data->pwm_fcms[0]);
938 /* Clear thermal cruise mode bits */
902 for (i = 0; i < 6; i++) { 939 for (i = 0; i < 6; i++) {
903 data->pwm_tfmr[i] &= ~(1 << index); 940 data->pwm_tfmr[i] &= ~(1 << index);
904 w83795_write(client, W83795_REG_TFMR(i), 941 w83795_write(client, W83795_REG_TFMR(i),
905 data->pwm_tfmr[i]); 942 data->pwm_tfmr[i]);
906 } 943 }
907 data->pwm_fomc |= 1 << index;
908 data->pwm_fomc ^= val << index;
909 w83795_write(client, W83795_REG_FOMC, data->pwm_fomc);
910 break; 944 break;
911 case 2: 945 case 2:
912 data->pwm_fcms[0] |= (1 << index); 946 data->pwm_fcms[0] |= (1 << index);
@@ -918,23 +952,60 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
918} 952}
919 953
920static ssize_t 954static ssize_t
955show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf)
956{
957 struct w83795_data *data = w83795_update_pwm_config(dev);
958 int index = to_sensor_dev_attr_2(attr)->index;
959 unsigned int mode;
960
961 if (data->pwm_fomc & (1 << index))
962 mode = 0; /* DC */
963 else
964 mode = 1; /* PWM */
965
966 return sprintf(buf, "%u\n", mode);
967}
968
969/*
970 * Check whether a given temperature source can ever be useful.
971 * Returns the number of selectable temperature channels which are
972 * enabled.
973 */
974static int w83795_tss_useful(const struct w83795_data *data, int tsrc)
975{
976 int useful = 0, i;
977
978 for (i = 0; i < 4; i++) {
979 if (tss_map[i][tsrc] == TSS_MAP_RESERVED)
980 continue;
981 if (tss_map[i][tsrc] < 6) /* Analog */
982 useful += (data->has_temp >> tss_map[i][tsrc]) & 1;
983 else /* Digital */
984 useful += (data->has_dts >> (tss_map[i][tsrc] - 6)) & 1;
985 }
986
987 return useful;
988}
989
990static ssize_t
921show_temp_src(struct device *dev, struct device_attribute *attr, char *buf) 991show_temp_src(struct device *dev, struct device_attribute *attr, char *buf)
922{ 992{
923 struct sensor_device_attribute_2 *sensor_attr = 993 struct sensor_device_attribute_2 *sensor_attr =
924 to_sensor_dev_attr_2(attr); 994 to_sensor_dev_attr_2(attr);
925 struct w83795_data *data = w83795_update_pwm_config(dev); 995 struct w83795_data *data = w83795_update_pwm_config(dev);
926 int index = sensor_attr->index; 996 int index = sensor_attr->index;
927 u8 val = index / 2; 997 u8 tmp = data->temp_src[index / 2];
928 u8 tmp = data->temp_src[val];
929 998
930 if (index & 1) 999 if (index & 1)
931 val = 4; 1000 tmp >>= 4; /* Pick high nibble */
932 else 1001 else
933 val = 0; 1002 tmp &= 0x0f; /* Pick low nibble */
934 tmp >>= val;
935 tmp &= 0x0f;
936 1003
937 return sprintf(buf, "%u\n", tmp); 1004 /* Look-up the actual temperature channel number */
1005 if (tmp >= 4 || tss_map[tmp][index] == TSS_MAP_RESERVED)
1006 return -EINVAL; /* Shouldn't happen */
1007
1008 return sprintf(buf, "%u\n", (unsigned int)tss_map[tmp][index] + 1);
938} 1009}
939 1010
940static ssize_t 1011static ssize_t
@@ -946,12 +1017,21 @@ store_temp_src(struct device *dev, struct device_attribute *attr,
946 struct sensor_device_attribute_2 *sensor_attr = 1017 struct sensor_device_attribute_2 *sensor_attr =
947 to_sensor_dev_attr_2(attr); 1018 to_sensor_dev_attr_2(attr);
948 int index = sensor_attr->index; 1019 int index = sensor_attr->index;
949 unsigned long tmp; 1020 int tmp;
1021 unsigned long channel;
950 u8 val = index / 2; 1022 u8 val = index / 2;
951 1023
952 if (strict_strtoul(buf, 10, &tmp) < 0) 1024 if (strict_strtoul(buf, 10, &channel) < 0 ||
1025 channel < 1 || channel > 14)
1026 return -EINVAL;
1027
1028 /* Check if request can be fulfilled */
1029 for (tmp = 0; tmp < 4; tmp++) {
1030 if (tss_map[tmp][index] == channel - 1)
1031 break;
1032 }
1033 if (tmp == 4) /* No match */
953 return -EINVAL; 1034 return -EINVAL;
954 tmp = SENSORS_LIMIT(tmp, 0, 15);
955 1035
956 mutex_lock(&data->update_lock); 1036 mutex_lock(&data->update_lock);
957 if (index & 1) { 1037 if (index & 1) {
@@ -1515,7 +1595,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
1515 1595
1516#define NOT_USED -1 1596#define NOT_USED -1
1517 1597
1518/* Don't change the attribute order, _max and _min are accessed by index 1598/* Don't change the attribute order, _max, _min and _beep are accessed by index
1519 * somewhere else in the code */ 1599 * somewhere else in the code */
1520#define SENSOR_ATTR_IN(index) { \ 1600#define SENSOR_ATTR_IN(index) { \
1521 SENSOR_ATTR_2(in##index##_input, S_IRUGO, show_in, NULL, \ 1601 SENSOR_ATTR_2(in##index##_input, S_IRUGO, show_in, NULL, \
@@ -1530,6 +1610,8 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
1530 show_alarm_beep, store_beep, BEEP_ENABLE, \ 1610 show_alarm_beep, store_beep, BEEP_ENABLE, \
1531 index + ((index > 14) ? 1 : 0)) } 1611 index + ((index > 14) ? 1 : 0)) }
1532 1612
1613/* Don't change the attribute order, _beep is accessed by index
1614 * somewhere else in the code */
1533#define SENSOR_ATTR_FAN(index) { \ 1615#define SENSOR_ATTR_FAN(index) { \
1534 SENSOR_ATTR_2(fan##index##_input, S_IRUGO, show_fan, \ 1616 SENSOR_ATTR_2(fan##index##_input, S_IRUGO, show_fan, \
1535 NULL, FAN_INPUT, index - 1), \ 1617 NULL, FAN_INPUT, index - 1), \
@@ -1553,9 +1635,13 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
1553 show_pwm, store_pwm, PWM_FREQ, index - 1), \ 1635 show_pwm, store_pwm, PWM_FREQ, index - 1), \
1554 SENSOR_ATTR_2(pwm##index##_enable, S_IWUSR | S_IRUGO, \ 1636 SENSOR_ATTR_2(pwm##index##_enable, S_IWUSR | S_IRUGO, \
1555 show_pwm_enable, store_pwm_enable, NOT_USED, index - 1), \ 1637 show_pwm_enable, store_pwm_enable, NOT_USED, index - 1), \
1638 SENSOR_ATTR_2(pwm##index##_mode, S_IRUGO, \
1639 show_pwm_mode, NULL, NOT_USED, index - 1), \
1556 SENSOR_ATTR_2(fan##index##_target, S_IWUSR | S_IRUGO, \ 1640 SENSOR_ATTR_2(fan##index##_target, S_IWUSR | S_IRUGO, \
1557 show_fanin, store_fanin, FANIN_TARGET, index - 1) } 1641 show_fanin, store_fanin, FANIN_TARGET, index - 1) }
1558 1642
1643/* Don't change the attribute order, _beep is accessed by index
1644 * somewhere else in the code */
1559#define SENSOR_ATTR_DTS(index) { \ 1645#define SENSOR_ATTR_DTS(index) { \
1560 SENSOR_ATTR_2(temp##index##_type, S_IRUGO , \ 1646 SENSOR_ATTR_2(temp##index##_type, S_IRUGO , \
1561 show_dts_mode, NULL, NOT_USED, index - 7), \ 1647 show_dts_mode, NULL, NOT_USED, index - 7), \
@@ -1574,6 +1660,8 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
1574 SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \ 1660 SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \
1575 show_alarm_beep, store_beep, BEEP_ENABLE, index + 17) } 1661 show_alarm_beep, store_beep, BEEP_ENABLE, index + 17) }
1576 1662
1663/* Don't change the attribute order, _beep is accessed by index
1664 * somewhere else in the code */
1577#define SENSOR_ATTR_TEMP(index) { \ 1665#define SENSOR_ATTR_TEMP(index) { \
1578 SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 4 ? S_IWUSR : 0), \ 1666 SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 4 ? S_IWUSR : 0), \
1579 show_temp_mode, store_temp_mode, NOT_USED, index - 1), \ 1667 show_temp_mode, store_temp_mode, NOT_USED, index - 1), \
@@ -1593,8 +1681,6 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
1593 SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \ 1681 SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \
1594 show_alarm_beep, store_beep, BEEP_ENABLE, \ 1682 show_alarm_beep, store_beep, BEEP_ENABLE, \
1595 index + (index > 4 ? 11 : 17)), \ 1683 index + (index > 4 ? 11 : 17)), \
1596 SENSOR_ATTR_2(temp##index##_source_sel, S_IWUSR | S_IRUGO, \
1597 show_temp_src, store_temp_src, NOT_USED, index - 1), \
1598 SENSOR_ATTR_2(temp##index##_pwm_enable, S_IWUSR | S_IRUGO, \ 1684 SENSOR_ATTR_2(temp##index##_pwm_enable, S_IWUSR | S_IRUGO, \
1599 show_temp_pwm_enable, store_temp_pwm_enable, \ 1685 show_temp_pwm_enable, store_temp_pwm_enable, \
1600 TEMP_PWM_ENABLE, index - 1), \ 1686 TEMP_PWM_ENABLE, index - 1), \
@@ -1680,7 +1766,7 @@ static const struct sensor_device_attribute_2 w83795_fan[][4] = {
1680 SENSOR_ATTR_FAN(14), 1766 SENSOR_ATTR_FAN(14),
1681}; 1767};
1682 1768
1683static const struct sensor_device_attribute_2 w83795_temp[][29] = { 1769static const struct sensor_device_attribute_2 w83795_temp[][28] = {
1684 SENSOR_ATTR_TEMP(1), 1770 SENSOR_ATTR_TEMP(1),
1685 SENSOR_ATTR_TEMP(2), 1771 SENSOR_ATTR_TEMP(2),
1686 SENSOR_ATTR_TEMP(3), 1772 SENSOR_ATTR_TEMP(3),
@@ -1700,7 +1786,7 @@ static const struct sensor_device_attribute_2 w83795_dts[][8] = {
1700 SENSOR_ATTR_DTS(14), 1786 SENSOR_ATTR_DTS(14),
1701}; 1787};
1702 1788
1703static const struct sensor_device_attribute_2 w83795_pwm[][7] = { 1789static const struct sensor_device_attribute_2 w83795_pwm[][8] = {
1704 SENSOR_ATTR_PWM(1), 1790 SENSOR_ATTR_PWM(1),
1705 SENSOR_ATTR_PWM(2), 1791 SENSOR_ATTR_PWM(2),
1706 SENSOR_ATTR_PWM(3), 1792 SENSOR_ATTR_PWM(3),
@@ -1711,13 +1797,24 @@ static const struct sensor_device_attribute_2 w83795_pwm[][7] = {
1711 SENSOR_ATTR_PWM(8), 1797 SENSOR_ATTR_PWM(8),
1712}; 1798};
1713 1799
1800static const struct sensor_device_attribute_2 w83795_tss[6] = {
1801 SENSOR_ATTR_2(temp1_source_sel, S_IWUSR | S_IRUGO,
1802 show_temp_src, store_temp_src, NOT_USED, 0),
1803 SENSOR_ATTR_2(temp2_source_sel, S_IWUSR | S_IRUGO,
1804 show_temp_src, store_temp_src, NOT_USED, 1),
1805 SENSOR_ATTR_2(temp3_source_sel, S_IWUSR | S_IRUGO,
1806 show_temp_src, store_temp_src, NOT_USED, 2),
1807 SENSOR_ATTR_2(temp4_source_sel, S_IWUSR | S_IRUGO,
1808 show_temp_src, store_temp_src, NOT_USED, 3),
1809 SENSOR_ATTR_2(temp5_source_sel, S_IWUSR | S_IRUGO,
1810 show_temp_src, store_temp_src, NOT_USED, 4),
1811 SENSOR_ATTR_2(temp6_source_sel, S_IWUSR | S_IRUGO,
1812 show_temp_src, store_temp_src, NOT_USED, 5),
1813};
1814
1714static const struct sensor_device_attribute_2 sda_single_files[] = { 1815static const struct sensor_device_attribute_2 sda_single_files[] = {
1715 SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm_beep, 1816 SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm_beep,
1716 store_chassis_clear, ALARM_STATUS, 46), 1817 store_chassis_clear, ALARM_STATUS, 46),
1717 SENSOR_ATTR_2(intrusion0_beep, S_IWUSR | S_IRUGO, show_alarm_beep,
1718 store_beep, BEEP_ENABLE, 46),
1719 SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_alarm_beep,
1720 store_beep, BEEP_ENABLE, 47),
1721#ifdef CONFIG_SENSORS_W83795_FANCTRL 1818#ifdef CONFIG_SENSORS_W83795_FANCTRL
1722 SENSOR_ATTR_2(speed_cruise_tolerance, S_IWUSR | S_IRUGO, show_fanin, 1819 SENSOR_ATTR_2(speed_cruise_tolerance, S_IWUSR | S_IRUGO, show_fanin,
1723 store_fanin, FANIN_TOL, NOT_USED), 1820 store_fanin, FANIN_TOL, NOT_USED),
@@ -1730,6 +1827,13 @@ static const struct sensor_device_attribute_2 sda_single_files[] = {
1730#endif 1827#endif
1731}; 1828};
1732 1829
1830static const struct sensor_device_attribute_2 sda_beep_files[] = {
1831 SENSOR_ATTR_2(intrusion0_beep, S_IWUSR | S_IRUGO, show_alarm_beep,
1832 store_beep, BEEP_ENABLE, 46),
1833 SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_alarm_beep,
1834 store_beep, BEEP_ENABLE, 47),
1835};
1836
1733/* 1837/*
1734 * Driver interface 1838 * Driver interface
1735 */ 1839 */
@@ -1859,6 +1963,8 @@ static int w83795_handle_files(struct device *dev, int (*fn)(struct device *,
1859 if (!(data->has_in & (1 << i))) 1963 if (!(data->has_in & (1 << i)))
1860 continue; 1964 continue;
1861 for (j = 0; j < ARRAY_SIZE(w83795_in[0]); j++) { 1965 for (j = 0; j < ARRAY_SIZE(w83795_in[0]); j++) {
1966 if (j == 4 && !data->enable_beep)
1967 continue;
1862 err = fn(dev, &w83795_in[i][j].dev_attr); 1968 err = fn(dev, &w83795_in[i][j].dev_attr);
1863 if (err) 1969 if (err)
1864 return err; 1970 return err;
@@ -1869,18 +1975,37 @@ static int w83795_handle_files(struct device *dev, int (*fn)(struct device *,
1869 if (!(data->has_fan & (1 << i))) 1975 if (!(data->has_fan & (1 << i)))
1870 continue; 1976 continue;
1871 for (j = 0; j < ARRAY_SIZE(w83795_fan[0]); j++) { 1977 for (j = 0; j < ARRAY_SIZE(w83795_fan[0]); j++) {
1978 if (j == 3 && !data->enable_beep)
1979 continue;
1872 err = fn(dev, &w83795_fan[i][j].dev_attr); 1980 err = fn(dev, &w83795_fan[i][j].dev_attr);
1873 if (err) 1981 if (err)
1874 return err; 1982 return err;
1875 } 1983 }
1876 } 1984 }
1877 1985
1986 for (i = 0; i < ARRAY_SIZE(w83795_tss); i++) {
1987 j = w83795_tss_useful(data, i);
1988 if (!j)
1989 continue;
1990 err = fn(dev, &w83795_tss[i].dev_attr);
1991 if (err)
1992 return err;
1993 }
1994
1878 for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) { 1995 for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) {
1879 err = fn(dev, &sda_single_files[i].dev_attr); 1996 err = fn(dev, &sda_single_files[i].dev_attr);
1880 if (err) 1997 if (err)
1881 return err; 1998 return err;
1882 } 1999 }
1883 2000
2001 if (data->enable_beep) {
2002 for (i = 0; i < ARRAY_SIZE(sda_beep_files); i++) {
2003 err = fn(dev, &sda_beep_files[i].dev_attr);
2004 if (err)
2005 return err;
2006 }
2007 }
2008
1884#ifdef CONFIG_SENSORS_W83795_FANCTRL 2009#ifdef CONFIG_SENSORS_W83795_FANCTRL
1885 for (i = 0; i < data->has_pwm; i++) { 2010 for (i = 0; i < data->has_pwm; i++) {
1886 for (j = 0; j < ARRAY_SIZE(w83795_pwm[0]); j++) { 2011 for (j = 0; j < ARRAY_SIZE(w83795_pwm[0]); j++) {
@@ -1899,6 +2024,8 @@ static int w83795_handle_files(struct device *dev, int (*fn)(struct device *,
1899#else 2024#else
1900 for (j = 0; j < 8; j++) { 2025 for (j = 0; j < 8; j++) {
1901#endif 2026#endif
2027 if (j == 7 && !data->enable_beep)
2028 continue;
1902 err = fn(dev, &w83795_temp[i][j].dev_attr); 2029 err = fn(dev, &w83795_temp[i][j].dev_attr);
1903 if (err) 2030 if (err)
1904 return err; 2031 return err;
@@ -1910,6 +2037,8 @@ static int w83795_handle_files(struct device *dev, int (*fn)(struct device *,
1910 if (!(data->has_dts & (1 << i))) 2037 if (!(data->has_dts & (1 << i)))
1911 continue; 2038 continue;
1912 for (j = 0; j < ARRAY_SIZE(w83795_dts[0]); j++) { 2039 for (j = 0; j < ARRAY_SIZE(w83795_dts[0]); j++) {
2040 if (j == 7 && !data->enable_beep)
2041 continue;
1913 err = fn(dev, &w83795_dts[i][j].dev_attr); 2042 err = fn(dev, &w83795_dts[i][j].dev_attr);
1914 if (err) 2043 if (err)
1915 return err; 2044 return err;
@@ -2049,6 +2178,18 @@ static int w83795_probe(struct i2c_client *client,
2049 else 2178 else
2050 data->has_pwm = 2; 2179 data->has_pwm = 2;
2051 2180
2181 /* Check if BEEP pin is available */
2182 if (data->chip_type == w83795g) {
2183 /* The W83795G has a dedicated BEEP pin */
2184 data->enable_beep = 1;
2185 } else {
2186 /* The W83795ADG has a shared pin for OVT# and BEEP, so you
2187 * can't have both */
2188 tmp = w83795_read(client, W83795_REG_OVT_CFG);
2189 if ((tmp & OVT_CFG_SEL) == 0)
2190 data->enable_beep = 1;
2191 }
2192
2052 err = w83795_handle_files(dev, device_create_file); 2193 err = w83795_handle_files(dev, device_create_file);
2053 if (err) 2194 if (err)
2054 goto exit_remove; 2195 goto exit_remove;
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index d231f683f57..6b4cc567645 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -848,6 +848,18 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
848 goto out_list; 848 goto out_list;
849 } 849 }
850 850
851 /* Sanity checks */
852 if (unlikely(adap->name[0] == '\0')) {
853 pr_err("i2c-core: Attempt to register an adapter with "
854 "no name!\n");
855 return -EINVAL;
856 }
857 if (unlikely(!adap->algo)) {
858 pr_err("i2c-core: Attempt to register adapter '%s' with "
859 "no algo!\n", adap->name);
860 return -EINVAL;
861 }
862
851 rt_mutex_init(&adap->bus_lock); 863 rt_mutex_init(&adap->bus_lock);
852 mutex_init(&adap->userspace_clients_lock); 864 mutex_init(&adap->userspace_clients_lock);
853 INIT_LIST_HEAD(&adap->userspace_clients); 865 INIT_LIST_HEAD(&adap->userspace_clients);
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index d32a4843fc3..d7a4833be41 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -120,7 +120,6 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
120 snprintf(priv->adap.name, sizeof(priv->adap.name), 120 snprintf(priv->adap.name, sizeof(priv->adap.name),
121 "i2c-%d-mux (chan_id %d)", i2c_adapter_id(parent), chan_id); 121 "i2c-%d-mux (chan_id %d)", i2c_adapter_id(parent), chan_id);
122 priv->adap.owner = THIS_MODULE; 122 priv->adap.owner = THIS_MODULE;
123 priv->adap.id = parent->id;
124 priv->adap.algo = &priv->algo; 123 priv->adap.algo = &priv->algo;
125 priv->adap.algo_data = priv; 124 priv->adap.algo_data = priv;
126 priv->adap.dev.parent = &parent->dev; 125 priv->adap.dev.parent = &parent->dev;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index d092ef9291d..7f26ca6ecf7 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -74,6 +74,7 @@ static int input_defuzz_abs_event(int value, int old_val, int fuzz)
74 * dev->event_lock held and interrupts disabled. 74 * dev->event_lock held and interrupts disabled.
75 */ 75 */
76static void input_pass_event(struct input_dev *dev, 76static void input_pass_event(struct input_dev *dev,
77 struct input_handler *src_handler,
77 unsigned int type, unsigned int code, int value) 78 unsigned int type, unsigned int code, int value)
78{ 79{
79 struct input_handler *handler; 80 struct input_handler *handler;
@@ -92,6 +93,15 @@ static void input_pass_event(struct input_dev *dev,
92 continue; 93 continue;
93 94
94 handler = handle->handler; 95 handler = handle->handler;
96
97 /*
98 * If this is the handler that injected this
99 * particular event we want to skip it to avoid
100 * filters firing again and again.
101 */
102 if (handler == src_handler)
103 continue;
104
95 if (!handler->filter) { 105 if (!handler->filter) {
96 if (filtered) 106 if (filtered)
97 break; 107 break;
@@ -121,7 +131,7 @@ static void input_repeat_key(unsigned long data)
121 if (test_bit(dev->repeat_key, dev->key) && 131 if (test_bit(dev->repeat_key, dev->key) &&
122 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { 132 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
123 133
124 input_pass_event(dev, EV_KEY, dev->repeat_key, 2); 134 input_pass_event(dev, NULL, EV_KEY, dev->repeat_key, 2);
125 135
126 if (dev->sync) { 136 if (dev->sync) {
127 /* 137 /*
@@ -130,7 +140,7 @@ static void input_repeat_key(unsigned long data)
130 * Otherwise assume that the driver will send 140 * Otherwise assume that the driver will send
131 * SYN_REPORT once it's done. 141 * SYN_REPORT once it's done.
132 */ 142 */
133 input_pass_event(dev, EV_SYN, SYN_REPORT, 1); 143 input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
134 } 144 }
135 145
136 if (dev->rep[REP_PERIOD]) 146 if (dev->rep[REP_PERIOD])
@@ -163,6 +173,7 @@ static void input_stop_autorepeat(struct input_dev *dev)
163#define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) 173#define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
164 174
165static int input_handle_abs_event(struct input_dev *dev, 175static int input_handle_abs_event(struct input_dev *dev,
176 struct input_handler *src_handler,
166 unsigned int code, int *pval) 177 unsigned int code, int *pval)
167{ 178{
168 bool is_mt_event; 179 bool is_mt_event;
@@ -206,13 +217,15 @@ static int input_handle_abs_event(struct input_dev *dev,
206 /* Flush pending "slot" event */ 217 /* Flush pending "slot" event */
207 if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) { 218 if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
208 input_abs_set_val(dev, ABS_MT_SLOT, dev->slot); 219 input_abs_set_val(dev, ABS_MT_SLOT, dev->slot);
209 input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot); 220 input_pass_event(dev, src_handler,
221 EV_ABS, ABS_MT_SLOT, dev->slot);
210 } 222 }
211 223
212 return INPUT_PASS_TO_HANDLERS; 224 return INPUT_PASS_TO_HANDLERS;
213} 225}
214 226
215static void input_handle_event(struct input_dev *dev, 227static void input_handle_event(struct input_dev *dev,
228 struct input_handler *src_handler,
216 unsigned int type, unsigned int code, int value) 229 unsigned int type, unsigned int code, int value)
217{ 230{
218 int disposition = INPUT_IGNORE_EVENT; 231 int disposition = INPUT_IGNORE_EVENT;
@@ -265,7 +278,8 @@ static void input_handle_event(struct input_dev *dev,
265 278
266 case EV_ABS: 279 case EV_ABS:
267 if (is_event_supported(code, dev->absbit, ABS_MAX)) 280 if (is_event_supported(code, dev->absbit, ABS_MAX))
268 disposition = input_handle_abs_event(dev, code, &value); 281 disposition = input_handle_abs_event(dev, src_handler,
282 code, &value);
269 283
270 break; 284 break;
271 285
@@ -323,7 +337,7 @@ static void input_handle_event(struct input_dev *dev,
323 dev->event(dev, type, code, value); 337 dev->event(dev, type, code, value);
324 338
325 if (disposition & INPUT_PASS_TO_HANDLERS) 339 if (disposition & INPUT_PASS_TO_HANDLERS)
326 input_pass_event(dev, type, code, value); 340 input_pass_event(dev, src_handler, type, code, value);
327} 341}
328 342
329/** 343/**
@@ -352,7 +366,7 @@ void input_event(struct input_dev *dev,
352 366
353 spin_lock_irqsave(&dev->event_lock, flags); 367 spin_lock_irqsave(&dev->event_lock, flags);
354 add_input_randomness(type, code, value); 368 add_input_randomness(type, code, value);
355 input_handle_event(dev, type, code, value); 369 input_handle_event(dev, NULL, type, code, value);
356 spin_unlock_irqrestore(&dev->event_lock, flags); 370 spin_unlock_irqrestore(&dev->event_lock, flags);
357 } 371 }
358} 372}
@@ -382,7 +396,8 @@ void input_inject_event(struct input_handle *handle,
382 rcu_read_lock(); 396 rcu_read_lock();
383 grab = rcu_dereference(dev->grab); 397 grab = rcu_dereference(dev->grab);
384 if (!grab || grab == handle) 398 if (!grab || grab == handle)
385 input_handle_event(dev, type, code, value); 399 input_handle_event(dev, handle->handler,
400 type, code, value);
386 rcu_read_unlock(); 401 rcu_read_unlock();
387 402
388 spin_unlock_irqrestore(&dev->event_lock, flags); 403 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -595,10 +610,10 @@ static void input_dev_release_keys(struct input_dev *dev)
595 for (code = 0; code <= KEY_MAX; code++) { 610 for (code = 0; code <= KEY_MAX; code++) {
596 if (is_event_supported(code, dev->keybit, KEY_MAX) && 611 if (is_event_supported(code, dev->keybit, KEY_MAX) &&
597 __test_and_clear_bit(code, dev->key)) { 612 __test_and_clear_bit(code, dev->key)) {
598 input_pass_event(dev, EV_KEY, code, 0); 613 input_pass_event(dev, NULL, EV_KEY, code, 0);
599 } 614 }
600 } 615 }
601 input_pass_event(dev, EV_SYN, SYN_REPORT, 1); 616 input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
602 } 617 }
603} 618}
604 619
@@ -873,9 +888,9 @@ int input_set_keycode(struct input_dev *dev,
873 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && 888 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
874 __test_and_clear_bit(old_keycode, dev->key)) { 889 __test_and_clear_bit(old_keycode, dev->key)) {
875 890
876 input_pass_event(dev, EV_KEY, old_keycode, 0); 891 input_pass_event(dev, NULL, EV_KEY, old_keycode, 0);
877 if (dev->sync) 892 if (dev->sync)
878 input_pass_event(dev, EV_SYN, SYN_REPORT, 1); 893 input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
879 } 894 }
880 895
881 out: 896 out:
@@ -1565,8 +1580,7 @@ static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env)
1565 } \ 1580 } \
1566 } while (0) 1581 } while (0)
1567 1582
1568#ifdef CONFIG_PM 1583static void input_dev_toggle(struct input_dev *dev, bool activate)
1569static void input_dev_reset(struct input_dev *dev, bool activate)
1570{ 1584{
1571 if (!dev->event) 1585 if (!dev->event)
1572 return; 1586 return;
@@ -1580,12 +1594,44 @@ static void input_dev_reset(struct input_dev *dev, bool activate)
1580 } 1594 }
1581} 1595}
1582 1596
1597/**
1598 * input_reset_device() - reset/restore the state of input device
1599 * @dev: input device whose state needs to be reset
1600 *
1601 * This function tries to reset the state of an opened input device and
1602 * bring internal state and state if the hardware in sync with each other.
1603 * We mark all keys as released, restore LED state, repeat rate, etc.
1604 */
1605void input_reset_device(struct input_dev *dev)
1606{
1607 mutex_lock(&dev->mutex);
1608
1609 if (dev->users) {
1610 input_dev_toggle(dev, true);
1611
1612 /*
1613 * Keys that have been pressed at suspend time are unlikely
1614 * to be still pressed when we resume.
1615 */
1616 spin_lock_irq(&dev->event_lock);
1617 input_dev_release_keys(dev);
1618 spin_unlock_irq(&dev->event_lock);
1619 }
1620
1621 mutex_unlock(&dev->mutex);
1622}
1623EXPORT_SYMBOL(input_reset_device);
1624
1625#ifdef CONFIG_PM
1583static int input_dev_suspend(struct device *dev) 1626static int input_dev_suspend(struct device *dev)
1584{ 1627{
1585 struct input_dev *input_dev = to_input_dev(dev); 1628 struct input_dev *input_dev = to_input_dev(dev);
1586 1629
1587 mutex_lock(&input_dev->mutex); 1630 mutex_lock(&input_dev->mutex);
1588 input_dev_reset(input_dev, false); 1631
1632 if (input_dev->users)
1633 input_dev_toggle(input_dev, false);
1634
1589 mutex_unlock(&input_dev->mutex); 1635 mutex_unlock(&input_dev->mutex);
1590 1636
1591 return 0; 1637 return 0;
@@ -1595,18 +1641,7 @@ static int input_dev_resume(struct device *dev)
1595{ 1641{
1596 struct input_dev *input_dev = to_input_dev(dev); 1642 struct input_dev *input_dev = to_input_dev(dev);
1597 1643
1598 mutex_lock(&input_dev->mutex); 1644 input_reset_device(input_dev);
1599 input_dev_reset(input_dev, true);
1600
1601 /*
1602 * Keys that have been pressed at suspend time are unlikely
1603 * to be still pressed when we resume.
1604 */
1605 spin_lock_irq(&input_dev->event_lock);
1606 input_dev_release_keys(input_dev);
1607 spin_unlock_irq(&input_dev->event_lock);
1608
1609 mutex_unlock(&input_dev->mutex);
1610 1645
1611 return 0; 1646 return 0;
1612} 1647}
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index b92d1cd5cba..af45d275f68 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -4,7 +4,7 @@
4 * I2C QWERTY Keypad and IO Expander 4 * I2C QWERTY Keypad and IO Expander
5 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 5 * Bugs: Enter bugs at http://blackfin.uclinux.org/
6 * 6 *
7 * Copyright (C) 2008-2009 Analog Devices Inc. 7 * Copyright (C) 2008-2010 Analog Devices Inc.
8 * Licensed under the GPL-2 or later. 8 * Licensed under the GPL-2 or later.
9 */ 9 */
10 10
@@ -24,29 +24,6 @@
24 24
25#include <linux/i2c/adp5588.h> 25#include <linux/i2c/adp5588.h>
26 26
27 /* Configuration Register1 */
28#define AUTO_INC (1 << 7)
29#define GPIEM_CFG (1 << 6)
30#define OVR_FLOW_M (1 << 5)
31#define INT_CFG (1 << 4)
32#define OVR_FLOW_IEN (1 << 3)
33#define K_LCK_IM (1 << 2)
34#define GPI_IEN (1 << 1)
35#define KE_IEN (1 << 0)
36
37/* Interrupt Status Register */
38#define CMP2_INT (1 << 5)
39#define CMP1_INT (1 << 4)
40#define OVR_FLOW_INT (1 << 3)
41#define K_LCK_INT (1 << 2)
42#define GPI_INT (1 << 1)
43#define KE_INT (1 << 0)
44
45/* Key Lock and Event Counter Register */
46#define K_LCK_EN (1 << 6)
47#define LCK21 0x30
48#define KEC 0xF
49
50/* Key Event Register xy */ 27/* Key Event Register xy */
51#define KEY_EV_PRESSED (1 << 7) 28#define KEY_EV_PRESSED (1 << 7)
52#define KEY_EV_MASK (0x7F) 29#define KEY_EV_MASK (0x7F)
@@ -55,10 +32,6 @@
55 32
56#define KEYP_MAX_EVENT 10 33#define KEYP_MAX_EVENT 10
57 34
58#define MAXGPIO 18
59#define ADP_BANK(offs) ((offs) >> 3)
60#define ADP_BIT(offs) (1u << ((offs) & 0x7))
61
62/* 35/*
63 * Early pre 4.0 Silicon required to delay readout by at least 25ms, 36 * Early pre 4.0 Silicon required to delay readout by at least 25ms,
64 * since the Event Counter Register updated 25ms after the interrupt 37 * since the Event Counter Register updated 25ms after the interrupt
@@ -75,7 +48,7 @@ struct adp5588_kpad {
75 const struct adp5588_gpi_map *gpimap; 48 const struct adp5588_gpi_map *gpimap;
76 unsigned short gpimapsize; 49 unsigned short gpimapsize;
77#ifdef CONFIG_GPIOLIB 50#ifdef CONFIG_GPIOLIB
78 unsigned char gpiomap[MAXGPIO]; 51 unsigned char gpiomap[ADP5588_MAXGPIO];
79 bool export_gpio; 52 bool export_gpio;
80 struct gpio_chip gc; 53 struct gpio_chip gc;
81 struct mutex gpio_lock; /* Protect cached dir, dat_out */ 54 struct mutex gpio_lock; /* Protect cached dir, dat_out */
@@ -103,8 +76,8 @@ static int adp5588_write(struct i2c_client *client, u8 reg, u8 val)
103static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off) 76static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
104{ 77{
105 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); 78 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
106 unsigned int bank = ADP_BANK(kpad->gpiomap[off]); 79 unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
107 unsigned int bit = ADP_BIT(kpad->gpiomap[off]); 80 unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
108 81
109 return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit); 82 return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit);
110} 83}
@@ -113,8 +86,8 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip,
113 unsigned off, int val) 86 unsigned off, int val)
114{ 87{
115 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); 88 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
116 unsigned int bank = ADP_BANK(kpad->gpiomap[off]); 89 unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
117 unsigned int bit = ADP_BIT(kpad->gpiomap[off]); 90 unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
118 91
119 mutex_lock(&kpad->gpio_lock); 92 mutex_lock(&kpad->gpio_lock);
120 93
@@ -132,8 +105,8 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip,
132static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off) 105static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off)
133{ 106{
134 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); 107 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
135 unsigned int bank = ADP_BANK(kpad->gpiomap[off]); 108 unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
136 unsigned int bit = ADP_BIT(kpad->gpiomap[off]); 109 unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
137 int ret; 110 int ret;
138 111
139 mutex_lock(&kpad->gpio_lock); 112 mutex_lock(&kpad->gpio_lock);
@@ -150,8 +123,8 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
150 unsigned off, int val) 123 unsigned off, int val)
151{ 124{
152 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); 125 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
153 unsigned int bank = ADP_BANK(kpad->gpiomap[off]); 126 unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
154 unsigned int bit = ADP_BIT(kpad->gpiomap[off]); 127 unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
155 int ret; 128 int ret;
156 129
157 mutex_lock(&kpad->gpio_lock); 130 mutex_lock(&kpad->gpio_lock);
@@ -176,7 +149,7 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
176static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad, 149static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad,
177 const struct adp5588_kpad_platform_data *pdata) 150 const struct adp5588_kpad_platform_data *pdata)
178{ 151{
179 bool pin_used[MAXGPIO]; 152 bool pin_used[ADP5588_MAXGPIO];
180 int n_unused = 0; 153 int n_unused = 0;
181 int i; 154 int i;
182 155
@@ -191,7 +164,7 @@ static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad,
191 for (i = 0; i < kpad->gpimapsize; i++) 164 for (i = 0; i < kpad->gpimapsize; i++)
192 pin_used[kpad->gpimap[i].pin - GPI_PIN_BASE] = true; 165 pin_used[kpad->gpimap[i].pin - GPI_PIN_BASE] = true;
193 166
194 for (i = 0; i < MAXGPIO; i++) 167 for (i = 0; i < ADP5588_MAXGPIO; i++)
195 if (!pin_used[i]) 168 if (!pin_used[i])
196 kpad->gpiomap[n_unused++] = i; 169 kpad->gpiomap[n_unused++] = i;
197 170
@@ -234,7 +207,7 @@ static int __devinit adp5588_gpio_add(struct adp5588_kpad *kpad)
234 return error; 207 return error;
235 } 208 }
236 209
237 for (i = 0; i <= ADP_BANK(MAXGPIO); i++) { 210 for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
238 kpad->dat_out[i] = adp5588_read(kpad->client, 211 kpad->dat_out[i] = adp5588_read(kpad->client,
239 GPIO_DAT_OUT1 + i); 212 GPIO_DAT_OUT1 + i);
240 kpad->dir[i] = adp5588_read(kpad->client, GPIO_DIR1 + i); 213 kpad->dir[i] = adp5588_read(kpad->client, GPIO_DIR1 + i);
@@ -318,11 +291,11 @@ static void adp5588_work(struct work_struct *work)
318 291
319 status = adp5588_read(client, INT_STAT); 292 status = adp5588_read(client, INT_STAT);
320 293
321 if (status & OVR_FLOW_INT) /* Unlikely and should never happen */ 294 if (status & ADP5588_OVR_FLOW_INT) /* Unlikely and should never happen */
322 dev_err(&client->dev, "Event Overflow Error\n"); 295 dev_err(&client->dev, "Event Overflow Error\n");
323 296
324 if (status & KE_INT) { 297 if (status & ADP5588_KE_INT) {
325 ev_cnt = adp5588_read(client, KEY_LCK_EC_STAT) & KEC; 298 ev_cnt = adp5588_read(client, KEY_LCK_EC_STAT) & ADP5588_KEC;
326 if (ev_cnt) { 299 if (ev_cnt) {
327 adp5588_report_events(kpad, ev_cnt); 300 adp5588_report_events(kpad, ev_cnt);
328 input_sync(kpad->input); 301 input_sync(kpad->input);
@@ -360,7 +333,7 @@ static int __devinit adp5588_setup(struct i2c_client *client)
360 if (pdata->en_keylock) { 333 if (pdata->en_keylock) {
361 ret |= adp5588_write(client, UNLOCK1, pdata->unlock_key1); 334 ret |= adp5588_write(client, UNLOCK1, pdata->unlock_key1);
362 ret |= adp5588_write(client, UNLOCK2, pdata->unlock_key2); 335 ret |= adp5588_write(client, UNLOCK2, pdata->unlock_key2);
363 ret |= adp5588_write(client, KEY_LCK_EC_STAT, K_LCK_EN); 336 ret |= adp5588_write(client, KEY_LCK_EC_STAT, ADP5588_K_LCK_EN);
364 } 337 }
365 338
366 for (i = 0; i < KEYP_MAX_EVENT; i++) 339 for (i = 0; i < KEYP_MAX_EVENT; i++)
@@ -384,7 +357,7 @@ static int __devinit adp5588_setup(struct i2c_client *client)
384 } 357 }
385 358
386 if (gpio_data) { 359 if (gpio_data) {
387 for (i = 0; i <= ADP_BANK(MAXGPIO); i++) { 360 for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
388 int pull_mask = gpio_data->pullup_dis_mask; 361 int pull_mask = gpio_data->pullup_dis_mask;
389 362
390 ret |= adp5588_write(client, GPIO_PULL1 + i, 363 ret |= adp5588_write(client, GPIO_PULL1 + i,
@@ -392,11 +365,14 @@ static int __devinit adp5588_setup(struct i2c_client *client)
392 } 365 }
393 } 366 }
394 367
395 ret |= adp5588_write(client, INT_STAT, CMP2_INT | CMP1_INT | 368 ret |= adp5588_write(client, INT_STAT,
396 OVR_FLOW_INT | K_LCK_INT | 369 ADP5588_CMP2_INT | ADP5588_CMP1_INT |
397 GPI_INT | KE_INT); /* Status is W1C */ 370 ADP5588_OVR_FLOW_INT | ADP5588_K_LCK_INT |
371 ADP5588_GPI_INT | ADP5588_KE_INT); /* Status is W1C */
398 372
399 ret |= adp5588_write(client, CFG, INT_CFG | OVR_FLOW_IEN | KE_IEN); 373 ret |= adp5588_write(client, CFG, ADP5588_INT_CFG |
374 ADP5588_OVR_FLOW_IEN |
375 ADP5588_KE_IEN);
400 376
401 if (ret < 0) { 377 if (ret < 0) {
402 dev_err(&client->dev, "Write Error\n"); 378 dev_err(&client->dev, "Write Error\n");
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index d358ef8623f..11478eb2c27 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -63,6 +63,10 @@ static bool atkbd_extra;
63module_param_named(extra, atkbd_extra, bool, 0); 63module_param_named(extra, atkbd_extra, bool, 0);
64MODULE_PARM_DESC(extra, "Enable extra LEDs and keys on IBM RapidAcces, EzKey and similar keyboards"); 64MODULE_PARM_DESC(extra, "Enable extra LEDs and keys on IBM RapidAcces, EzKey and similar keyboards");
65 65
66static bool atkbd_terminal;
67module_param_named(terminal, atkbd_terminal, bool, 0);
68MODULE_PARM_DESC(terminal, "Enable break codes on an IBM Terminal keyboard connected via AT/PS2");
69
66/* 70/*
67 * Scancode to keycode tables. These are just the default setting, and 71 * Scancode to keycode tables. These are just the default setting, and
68 * are loadable via a userland utility. 72 * are loadable via a userland utility.
@@ -136,7 +140,8 @@ static const unsigned short atkbd_unxlate_table[128] = {
136#define ATKBD_CMD_ENABLE 0x00f4 140#define ATKBD_CMD_ENABLE 0x00f4
137#define ATKBD_CMD_RESET_DIS 0x00f5 /* Reset to defaults and disable */ 141#define ATKBD_CMD_RESET_DIS 0x00f5 /* Reset to defaults and disable */
138#define ATKBD_CMD_RESET_DEF 0x00f6 /* Reset to defaults */ 142#define ATKBD_CMD_RESET_DEF 0x00f6 /* Reset to defaults */
139#define ATKBD_CMD_SETALL_MBR 0x00fa 143#define ATKBD_CMD_SETALL_MB 0x00f8 /* Set all keys to give break codes */
144#define ATKBD_CMD_SETALL_MBR 0x00fa /* ... and repeat */
140#define ATKBD_CMD_RESET_BAT 0x02ff 145#define ATKBD_CMD_RESET_BAT 0x02ff
141#define ATKBD_CMD_RESEND 0x00fe 146#define ATKBD_CMD_RESEND 0x00fe
142#define ATKBD_CMD_EX_ENABLE 0x10ea 147#define ATKBD_CMD_EX_ENABLE 0x10ea
@@ -764,6 +769,11 @@ static int atkbd_select_set(struct atkbd *atkbd, int target_set, int allow_extra
764 } 769 }
765 } 770 }
766 771
772 if (atkbd_terminal) {
773 ps2_command(ps2dev, param, ATKBD_CMD_SETALL_MB);
774 return 3;
775 }
776
767 if (target_set != 3) 777 if (target_set != 3)
768 return 2; 778 return 2;
769 779
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index 4b42ffc0532..d1583aea172 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -127,14 +127,6 @@ static int __devinit pcf8574_kp_probe(struct i2c_client *client, const struct i2
127 idev->id.product = 0x0001; 127 idev->id.product = 0x0001;
128 idev->id.version = 0x0100; 128 idev->id.version = 0x0100;
129 129
130 input_set_drvdata(idev, lp);
131
132 ret = input_register_device(idev);
133 if (ret) {
134 dev_err(&client->dev, "input_register_device() failed\n");
135 goto fail_register;
136 }
137
138 lp->laststate = read_state(lp); 130 lp->laststate = read_state(lp);
139 131
140 ret = request_threaded_irq(client->irq, NULL, pcf8574_kp_irq_handler, 132 ret = request_threaded_irq(client->irq, NULL, pcf8574_kp_irq_handler,
@@ -142,16 +134,21 @@ static int __devinit pcf8574_kp_probe(struct i2c_client *client, const struct i2
142 DRV_NAME, lp); 134 DRV_NAME, lp);
143 if (ret) { 135 if (ret) {
144 dev_err(&client->dev, "IRQ %d is not free\n", client->irq); 136 dev_err(&client->dev, "IRQ %d is not free\n", client->irq);
145 goto fail_irq; 137 goto fail_free_device;
138 }
139
140 ret = input_register_device(idev);
141 if (ret) {
142 dev_err(&client->dev, "input_register_device() failed\n");
143 goto fail_free_irq;
146 } 144 }
147 145
148 i2c_set_clientdata(client, lp); 146 i2c_set_clientdata(client, lp);
149 return 0; 147 return 0;
150 148
151 fail_irq: 149 fail_free_irq:
152 input_unregister_device(idev); 150 free_irq(client->irq, lp);
153 fail_register: 151 fail_free_device:
154 input_set_drvdata(idev, NULL);
155 input_free_device(idev); 152 input_free_device(idev);
156 fail_allocate: 153 fail_allocate:
157 kfree(lp); 154 kfree(lp);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index ed7ad7416b2..a5475b57708 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -351,6 +351,17 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
351 }, 351 },
352 }, 352 },
353 { 353 {
354 /*
355 * Most (all?) VAIOs do not have external PS/2 ports nor
356 * they implement active multiplexing properly, and
357 * MUX discovery usually messes up keyboard/touchpad.
358 */
359 .matches = {
360 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
361 DMI_MATCH(DMI_BOARD_NAME, "VAIO"),
362 },
363 },
364 {
354 /* Amoi M636/A737 */ 365 /* Amoi M636/A737 */
355 .matches = { 366 .matches = {
356 DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."), 367 DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index aea9a9399a3..d94f7e9aa99 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -229,12 +229,13 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
229 229
230 err = input_register_device(acecad->input); 230 err = input_register_device(acecad->input);
231 if (err) 231 if (err)
232 goto fail2; 232 goto fail3;
233 233
234 usb_set_intfdata(intf, acecad); 234 usb_set_intfdata(intf, acecad);
235 235
236 return 0; 236 return 0;
237 237
238 fail3: usb_free_urb(acecad->irq);
238 fail2: usb_free_coherent(dev, 8, acecad->data, acecad->data_dma); 239 fail2: usb_free_coherent(dev, 8, acecad->data, acecad->data_dma);
239 fail1: input_free_device(input_dev); 240 fail1: input_free_device(input_dev);
240 kfree(acecad); 241 kfree(acecad);
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index 40b914bded8..2e72227bd07 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -1427,8 +1427,8 @@ modeisar(struct BCState *bcs, int mode, int bc)
1427 &bcs->hw.isar.reg->Flags)) 1427 &bcs->hw.isar.reg->Flags))
1428 bcs->hw.isar.dpath = 1; 1428 bcs->hw.isar.dpath = 1;
1429 else { 1429 else {
1430 printk(KERN_WARNING"isar modeisar analog funktions only with DP1\n"); 1430 printk(KERN_WARNING"isar modeisar analog functions only with DP1\n");
1431 debugl1(cs, "isar modeisar analog funktions only with DP1"); 1431 debugl1(cs, "isar modeisar analog functions only with DP1");
1432 return(1); 1432 return(1);
1433 } 1433 }
1434 break; 1434 break;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index cc2a88d5192..77b8fd20cd9 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -10,7 +10,7 @@ menuconfig NEW_LEDS
10if NEW_LEDS 10if NEW_LEDS
11 11
12config LEDS_CLASS 12config LEDS_CLASS
13 tristate "LED Class Support" 13 bool "LED Class Support"
14 help 14 help
15 This option enables the led sysfs class in /sys/class/leds. You'll 15 This option enables the led sysfs class in /sys/class/leds. You'll
16 need this to do anything useful with LEDs. If unsure, say N. 16 need this to do anything useful with LEDs. If unsure, say N.
@@ -176,6 +176,24 @@ config LEDS_LP3944
176 To compile this driver as a module, choose M here: the 176 To compile this driver as a module, choose M here: the
177 module will be called leds-lp3944. 177 module will be called leds-lp3944.
178 178
179config LEDS_LP5521
180 tristate "LED Support for N.S. LP5521 LED driver chip"
181 depends on LEDS_CLASS && I2C
182 help
183 If you say yes here you get support for the National Semiconductor
184 LP5521 LED driver. It is 3 channel chip with programmable engines.
185 Driver provides direct control via LED class and interface for
186 programming the engines.
187
188config LEDS_LP5523
189 tristate "LED Support for N.S. LP5523 LED driver chip"
190 depends on LEDS_CLASS && I2C
191 help
192 If you say yes here you get support for the National Semiconductor
193 LP5523 LED driver. It is 9 channel chip with programmable engines.
194 Driver provides direct control via LED class and interface for
195 programming the engines.
196
179config LEDS_CLEVO_MAIL 197config LEDS_CLEVO_MAIL
180 tristate "Mail LED on Clevo notebook" 198 tristate "Mail LED on Clevo notebook"
181 depends on X86 && SERIO_I8042 && DMI 199 depends on X86 && SERIO_I8042 && DMI
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 9c96db40ef6..aae6989ff6b 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -23,6 +23,8 @@ obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
23obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o 23obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
24obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o 24obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
25obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o 25obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
26obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o
27obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
26obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o 28obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
27obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o 29obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
28obj-$(CONFIG_LEDS_FSG) += leds-fsg.o 30obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 26066007650..211e21f34bd 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -81,6 +81,79 @@ static struct device_attribute led_class_attrs[] = {
81 __ATTR_NULL, 81 __ATTR_NULL,
82}; 82};
83 83
84static void led_timer_function(unsigned long data)
85{
86 struct led_classdev *led_cdev = (void *)data;
87 unsigned long brightness;
88 unsigned long delay;
89
90 if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
91 led_set_brightness(led_cdev, LED_OFF);
92 return;
93 }
94
95 brightness = led_get_brightness(led_cdev);
96 if (!brightness) {
97 /* Time to switch the LED on. */
98 brightness = led_cdev->blink_brightness;
99 delay = led_cdev->blink_delay_on;
100 } else {
101 /* Store the current brightness value to be able
102 * to restore it when the delay_off period is over.
103 */
104 led_cdev->blink_brightness = brightness;
105 brightness = LED_OFF;
106 delay = led_cdev->blink_delay_off;
107 }
108
109 led_set_brightness(led_cdev, brightness);
110
111 mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay));
112}
113
114static void led_stop_software_blink(struct led_classdev *led_cdev)
115{
116 /* deactivate previous settings */
117 del_timer_sync(&led_cdev->blink_timer);
118 led_cdev->blink_delay_on = 0;
119 led_cdev->blink_delay_off = 0;
120}
121
122static void led_set_software_blink(struct led_classdev *led_cdev,
123 unsigned long delay_on,
124 unsigned long delay_off)
125{
126 int current_brightness;
127
128 current_brightness = led_get_brightness(led_cdev);
129 if (current_brightness)
130 led_cdev->blink_brightness = current_brightness;
131 if (!led_cdev->blink_brightness)
132 led_cdev->blink_brightness = led_cdev->max_brightness;
133
134 if (delay_on == led_cdev->blink_delay_on &&
135 delay_off == led_cdev->blink_delay_off)
136 return;
137
138 led_stop_software_blink(led_cdev);
139
140 led_cdev->blink_delay_on = delay_on;
141 led_cdev->blink_delay_off = delay_off;
142
143 /* never on - don't blink */
144 if (!delay_on)
145 return;
146
147 /* never off - just set to brightness */
148 if (!delay_off) {
149 led_set_brightness(led_cdev, led_cdev->blink_brightness);
150 return;
151 }
152
153 mod_timer(&led_cdev->blink_timer, jiffies + 1);
154}
155
156
84/** 157/**
85 * led_classdev_suspend - suspend an led_classdev. 158 * led_classdev_suspend - suspend an led_classdev.
86 * @led_cdev: the led_classdev to suspend. 159 * @led_cdev: the led_classdev to suspend.
@@ -148,6 +221,10 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
148 221
149 led_update_brightness(led_cdev); 222 led_update_brightness(led_cdev);
150 223
224 init_timer(&led_cdev->blink_timer);
225 led_cdev->blink_timer.function = led_timer_function;
226 led_cdev->blink_timer.data = (unsigned long)led_cdev;
227
151#ifdef CONFIG_LEDS_TRIGGERS 228#ifdef CONFIG_LEDS_TRIGGERS
152 led_trigger_set_default(led_cdev); 229 led_trigger_set_default(led_cdev);
153#endif 230#endif
@@ -157,7 +234,6 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
157 234
158 return 0; 235 return 0;
159} 236}
160
161EXPORT_SYMBOL_GPL(led_classdev_register); 237EXPORT_SYMBOL_GPL(led_classdev_register);
162 238
163/** 239/**
@@ -175,6 +251,9 @@ void led_classdev_unregister(struct led_classdev *led_cdev)
175 up_write(&led_cdev->trigger_lock); 251 up_write(&led_cdev->trigger_lock);
176#endif 252#endif
177 253
254 /* Stop blinking */
255 led_brightness_set(led_cdev, LED_OFF);
256
178 device_unregister(led_cdev->dev); 257 device_unregister(led_cdev->dev);
179 258
180 down_write(&leds_list_lock); 259 down_write(&leds_list_lock);
@@ -183,6 +262,30 @@ void led_classdev_unregister(struct led_classdev *led_cdev)
183} 262}
184EXPORT_SYMBOL_GPL(led_classdev_unregister); 263EXPORT_SYMBOL_GPL(led_classdev_unregister);
185 264
265void led_blink_set(struct led_classdev *led_cdev,
266 unsigned long *delay_on,
267 unsigned long *delay_off)
268{
269 if (led_cdev->blink_set &&
270 led_cdev->blink_set(led_cdev, delay_on, delay_off))
271 return;
272
273 /* blink with 1 Hz as default if nothing specified */
274 if (!*delay_on && !*delay_off)
275 *delay_on = *delay_off = 500;
276
277 led_set_software_blink(led_cdev, *delay_on, *delay_off);
278}
279EXPORT_SYMBOL(led_blink_set);
280
281void led_brightness_set(struct led_classdev *led_cdev,
282 enum led_brightness brightness)
283{
284 led_stop_software_blink(led_cdev);
285 led_cdev->brightness_set(led_cdev, brightness);
286}
287EXPORT_SYMBOL(led_brightness_set);
288
186static int __init leds_init(void) 289static int __init leds_init(void)
187{ 290{
188 leds_class = class_create(THIS_MODULE, "leds"); 291 leds_class = class_create(THIS_MODULE, "leds");
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index f1c00db88b5..c41eb6180c9 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -113,7 +113,7 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger)
113 if (led_cdev->trigger->deactivate) 113 if (led_cdev->trigger->deactivate)
114 led_cdev->trigger->deactivate(led_cdev); 114 led_cdev->trigger->deactivate(led_cdev);
115 led_cdev->trigger = NULL; 115 led_cdev->trigger = NULL;
116 led_set_brightness(led_cdev, LED_OFF); 116 led_brightness_set(led_cdev, LED_OFF);
117 } 117 }
118 if (trigger) { 118 if (trigger) {
119 write_lock_irqsave(&trigger->leddev_list_lock, flags); 119 write_lock_irqsave(&trigger->leddev_list_lock, flags);
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index ea57e05d08f..4d9fa38d9ff 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -316,7 +316,7 @@ static struct of_platform_driver of_gpio_leds_driver = {
316 316
317static int __init gpio_led_init(void) 317static int __init gpio_led_init(void)
318{ 318{
319 int ret; 319 int ret = 0;
320 320
321#ifdef CONFIG_LEDS_GPIO_PLATFORM 321#ifdef CONFIG_LEDS_GPIO_PLATFORM
322 ret = platform_driver_register(&gpio_led_driver); 322 ret = platform_driver_register(&gpio_led_driver);
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
new file mode 100644
index 00000000000..3782f31f06d
--- /dev/null
+++ b/drivers/leds/leds-lp5521.c
@@ -0,0 +1,821 @@
1/*
2 * LP5521 LED chip driver.
3 *
4 * Copyright (C) 2010 Nokia Corporation
5 *
6 * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/i2c.h>
26#include <linux/mutex.h>
27#include <linux/gpio.h>
28#include <linux/interrupt.h>
29#include <linux/delay.h>
30#include <linux/ctype.h>
31#include <linux/spinlock.h>
32#include <linux/wait.h>
33#include <linux/leds.h>
34#include <linux/leds-lp5521.h>
35#include <linux/workqueue.h>
36#include <linux/slab.h>
37
38#define LP5521_PROGRAM_LENGTH 32 /* in bytes */
39
40#define LP5521_MAX_LEDS 3 /* Maximum number of LEDs */
41#define LP5521_MAX_ENGINES 3 /* Maximum number of engines */
42
43#define LP5521_ENG_MASK_BASE 0x30 /* 00110000 */
44#define LP5521_ENG_STATUS_MASK 0x07 /* 00000111 */
45
46#define LP5521_CMD_LOAD 0x15 /* 00010101 */
47#define LP5521_CMD_RUN 0x2a /* 00101010 */
48#define LP5521_CMD_DIRECT 0x3f /* 00111111 */
49#define LP5521_CMD_DISABLED 0x00 /* 00000000 */
50
51/* Registers */
52#define LP5521_REG_ENABLE 0x00
53#define LP5521_REG_OP_MODE 0x01
54#define LP5521_REG_R_PWM 0x02
55#define LP5521_REG_G_PWM 0x03
56#define LP5521_REG_B_PWM 0x04
57#define LP5521_REG_R_CURRENT 0x05
58#define LP5521_REG_G_CURRENT 0x06
59#define LP5521_REG_B_CURRENT 0x07
60#define LP5521_REG_CONFIG 0x08
61#define LP5521_REG_R_CHANNEL_PC 0x09
62#define LP5521_REG_G_CHANNEL_PC 0x0A
63#define LP5521_REG_B_CHANNEL_PC 0x0B
64#define LP5521_REG_STATUS 0x0C
65#define LP5521_REG_RESET 0x0D
66#define LP5521_REG_GPO 0x0E
67#define LP5521_REG_R_PROG_MEM 0x10
68#define LP5521_REG_G_PROG_MEM 0x30
69#define LP5521_REG_B_PROG_MEM 0x50
70
71#define LP5521_PROG_MEM_BASE LP5521_REG_R_PROG_MEM
72#define LP5521_PROG_MEM_SIZE 0x20
73
74/* Base register to set LED current */
75#define LP5521_REG_LED_CURRENT_BASE LP5521_REG_R_CURRENT
76
77/* Base register to set the brightness */
78#define LP5521_REG_LED_PWM_BASE LP5521_REG_R_PWM
79
80/* Bits in ENABLE register */
81#define LP5521_MASTER_ENABLE 0x40 /* Chip master enable */
82#define LP5521_LOGARITHMIC_PWM 0x80 /* Logarithmic PWM adjustment */
83#define LP5521_EXEC_RUN 0x2A
84
85/* Bits in CONFIG register */
86#define LP5521_PWM_HF 0x40 /* PWM: 0 = 256Hz, 1 = 558Hz */
87#define LP5521_PWRSAVE_EN 0x20 /* 1 = Power save mode */
88#define LP5521_CP_MODE_OFF 0 /* Charge pump (CP) off */
89#define LP5521_CP_MODE_BYPASS 8 /* CP forced to bypass mode */
90#define LP5521_CP_MODE_1X5 0x10 /* CP forced to 1.5x mode */
91#define LP5521_CP_MODE_AUTO 0x18 /* Automatic mode selection */
92#define LP5521_R_TO_BATT 4 /* R out: 0 = CP, 1 = Vbat */
93#define LP5521_CLK_SRC_EXT 0 /* Ext-clk source (CLK_32K) */
94#define LP5521_CLK_INT 1 /* Internal clock */
95#define LP5521_CLK_AUTO 2 /* Automatic clock selection */
96
97/* Status */
98#define LP5521_EXT_CLK_USED 0x08
99
100struct lp5521_engine {
101 const struct attribute_group *attributes;
102 int id;
103 u8 mode;
104 u8 prog_page;
105 u8 engine_mask;
106};
107
108struct lp5521_led {
109 int id;
110 u8 chan_nr;
111 u8 led_current;
112 u8 max_current;
113 struct led_classdev cdev;
114 struct work_struct brightness_work;
115 u8 brightness;
116};
117
118struct lp5521_chip {
119 struct lp5521_platform_data *pdata;
120 struct mutex lock; /* Serialize control */
121 struct i2c_client *client;
122 struct lp5521_engine engines[LP5521_MAX_ENGINES];
123 struct lp5521_led leds[LP5521_MAX_LEDS];
124 u8 num_channels;
125 u8 num_leds;
126};
127
128#define cdev_to_led(c) container_of(c, struct lp5521_led, cdev)
129#define engine_to_lp5521(eng) container_of((eng), struct lp5521_chip, \
130 engines[(eng)->id - 1])
131#define led_to_lp5521(led) container_of((led), struct lp5521_chip, \
132 leds[(led)->id])
133
134static void lp5521_led_brightness_work(struct work_struct *work);
135
136static inline int lp5521_write(struct i2c_client *client, u8 reg, u8 value)
137{
138 return i2c_smbus_write_byte_data(client, reg, value);
139}
140
141static int lp5521_read(struct i2c_client *client, u8 reg, u8 *buf)
142{
143 s32 ret;
144
145 ret = i2c_smbus_read_byte_data(client, reg);
146 if (ret < 0)
147 return -EIO;
148
149 *buf = ret;
150 return 0;
151}
152
153static int lp5521_set_engine_mode(struct lp5521_engine *engine, u8 mode)
154{
155 struct lp5521_chip *chip = engine_to_lp5521(engine);
156 struct i2c_client *client = chip->client;
157 int ret;
158 u8 engine_state;
159
160 /* Only transition between RUN and DIRECT mode are handled here */
161 if (mode == LP5521_CMD_LOAD)
162 return 0;
163
164 if (mode == LP5521_CMD_DISABLED)
165 mode = LP5521_CMD_DIRECT;
166
167 ret = lp5521_read(client, LP5521_REG_OP_MODE, &engine_state);
168
169 /* set mode only for this engine */
170 engine_state &= ~(engine->engine_mask);
171 mode &= engine->engine_mask;
172 engine_state |= mode;
173 ret |= lp5521_write(client, LP5521_REG_OP_MODE, engine_state);
174
175 return ret;
176}
177
178static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
179{
180 struct lp5521_chip *chip = engine_to_lp5521(eng);
181 struct i2c_client *client = chip->client;
182 int ret;
183 int addr;
184 u8 mode;
185
186 /* move current engine to direct mode and remember the state */
187 ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT);
188 usleep_range(1000, 10000);
189 ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode);
190
191 /* For loading, all the engines to load mode */
192 lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
193 usleep_range(1000, 10000);
194 lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_LOAD);
195 usleep_range(1000, 10000);
196
197 addr = LP5521_PROG_MEM_BASE + eng->prog_page * LP5521_PROG_MEM_SIZE;
198 i2c_smbus_write_i2c_block_data(client,
199 addr,
200 LP5521_PROG_MEM_SIZE,
201 pattern);
202
203 ret |= lp5521_write(client, LP5521_REG_OP_MODE, mode);
204 return ret;
205}
206
207static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr)
208{
209 return lp5521_write(chip->client,
210 LP5521_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr,
211 curr);
212}
213
214static void lp5521_init_engine(struct lp5521_chip *chip,
215 const struct attribute_group *attr_group)
216{
217 int i;
218 for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
219 chip->engines[i].id = i + 1;
220 chip->engines[i].engine_mask = LP5521_ENG_MASK_BASE >> (i * 2);
221 chip->engines[i].prog_page = i;
222 chip->engines[i].attributes = &attr_group[i];
223 }
224}
225
226static int lp5521_configure(struct i2c_client *client,
227 const struct attribute_group *attr_group)
228{
229 struct lp5521_chip *chip = i2c_get_clientdata(client);
230 int ret;
231
232 lp5521_init_engine(chip, attr_group);
233
234 lp5521_write(client, LP5521_REG_RESET, 0xff);
235
236 usleep_range(10000, 20000);
237
238 /* Set all PWMs to direct control mode */
239 ret = lp5521_write(client, LP5521_REG_OP_MODE, 0x3F);
240
241 /* Enable auto-powersave, set charge pump to auto, red to battery */
242 ret |= lp5521_write(client, LP5521_REG_CONFIG,
243 LP5521_PWRSAVE_EN | LP5521_CP_MODE_AUTO | LP5521_R_TO_BATT);
244
245 /* Initialize all channels PWM to zero -> leds off */
246 ret |= lp5521_write(client, LP5521_REG_R_PWM, 0);
247 ret |= lp5521_write(client, LP5521_REG_G_PWM, 0);
248 ret |= lp5521_write(client, LP5521_REG_B_PWM, 0);
249
250 /* Set engines are set to run state when OP_MODE enables engines */
251 ret |= lp5521_write(client, LP5521_REG_ENABLE,
252 LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM |
253 LP5521_EXEC_RUN);
254 /* enable takes 500us */
255 usleep_range(500, 20000);
256
257 return ret;
258}
259
260static int lp5521_run_selftest(struct lp5521_chip *chip, char *buf)
261{
262 int ret;
263 u8 status;
264
265 ret = lp5521_read(chip->client, LP5521_REG_STATUS, &status);
266 if (ret < 0)
267 return ret;
268
269 /* Check that ext clock is really in use if requested */
270 if (chip->pdata && chip->pdata->clock_mode == LP5521_CLOCK_EXT)
271 if ((status & LP5521_EXT_CLK_USED) == 0)
272 return -EIO;
273 return 0;
274}
275
276static void lp5521_set_brightness(struct led_classdev *cdev,
277 enum led_brightness brightness)
278{
279 struct lp5521_led *led = cdev_to_led(cdev);
280 led->brightness = (u8)brightness;
281 schedule_work(&led->brightness_work);
282}
283
284static void lp5521_led_brightness_work(struct work_struct *work)
285{
286 struct lp5521_led *led = container_of(work,
287 struct lp5521_led,
288 brightness_work);
289 struct lp5521_chip *chip = led_to_lp5521(led);
290 struct i2c_client *client = chip->client;
291
292 mutex_lock(&chip->lock);
293 lp5521_write(client, LP5521_REG_LED_PWM_BASE + led->chan_nr,
294 led->brightness);
295 mutex_unlock(&chip->lock);
296}
297
298/* Detect the chip by setting its ENABLE register and reading it back. */
299static int lp5521_detect(struct i2c_client *client)
300{
301 int ret;
302 u8 buf;
303
304 ret = lp5521_write(client, LP5521_REG_ENABLE,
305 LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM);
306 if (ret)
307 return ret;
308 usleep_range(1000, 10000);
309 ret = lp5521_read(client, LP5521_REG_ENABLE, &buf);
310 if (ret)
311 return ret;
312 if (buf != (LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM))
313 return -ENODEV;
314
315 return 0;
316}
317
318/* Set engine mode and create appropriate sysfs attributes, if required. */
319static int lp5521_set_mode(struct lp5521_engine *engine, u8 mode)
320{
321 struct lp5521_chip *chip = engine_to_lp5521(engine);
322 struct i2c_client *client = chip->client;
323 struct device *dev = &client->dev;
324 int ret = 0;
325
326 /* if in that mode already do nothing, except for run */
327 if (mode == engine->mode && mode != LP5521_CMD_RUN)
328 return 0;
329
330 if (mode == LP5521_CMD_RUN) {
331 ret = lp5521_set_engine_mode(engine, LP5521_CMD_RUN);
332 } else if (mode == LP5521_CMD_LOAD) {
333 lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
334 lp5521_set_engine_mode(engine, LP5521_CMD_LOAD);
335
336 ret = sysfs_create_group(&dev->kobj, engine->attributes);
337 if (ret)
338 return ret;
339 } else if (mode == LP5521_CMD_DISABLED) {
340 lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
341 }
342
343 /* remove load attribute from sysfs if not in load mode */
344 if (engine->mode == LP5521_CMD_LOAD && mode != LP5521_CMD_LOAD)
345 sysfs_remove_group(&dev->kobj, engine->attributes);
346
347 engine->mode = mode;
348
349 return ret;
350}
351
352static int lp5521_do_store_load(struct lp5521_engine *engine,
353 const char *buf, size_t len)
354{
355 struct lp5521_chip *chip = engine_to_lp5521(engine);
356 struct i2c_client *client = chip->client;
357 int ret, nrchars, offset = 0, i = 0;
358 char c[3];
359 unsigned cmd;
360 u8 pattern[LP5521_PROGRAM_LENGTH] = {0};
361
362 while ((offset < len - 1) && (i < LP5521_PROGRAM_LENGTH)) {
363 /* separate sscanfs because length is working only for %s */
364 ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
365 ret = sscanf(c, "%2x", &cmd);
366 if (ret != 1)
367 goto fail;
368 pattern[i] = (u8)cmd;
369
370 offset += nrchars;
371 i++;
372 }
373
374 /* Each instruction is 16bit long. Check that length is even */
375 if (i % 2)
376 goto fail;
377
378 mutex_lock(&chip->lock);
379 ret = lp5521_load_program(engine, pattern);
380 mutex_unlock(&chip->lock);
381
382 if (ret) {
383 dev_err(&client->dev, "failed loading pattern\n");
384 return ret;
385 }
386
387 return len;
388fail:
389 dev_err(&client->dev, "wrong pattern format\n");
390 return -EINVAL;
391}
392
393static ssize_t store_engine_load(struct device *dev,
394 struct device_attribute *attr,
395 const char *buf, size_t len, int nr)
396{
397 struct i2c_client *client = to_i2c_client(dev);
398 struct lp5521_chip *chip = i2c_get_clientdata(client);
399 return lp5521_do_store_load(&chip->engines[nr - 1], buf, len);
400}
401
402#define store_load(nr) \
403static ssize_t store_engine##nr##_load(struct device *dev, \
404 struct device_attribute *attr, \
405 const char *buf, size_t len) \
406{ \
407 return store_engine_load(dev, attr, buf, len, nr); \
408}
409store_load(1)
410store_load(2)
411store_load(3)
412
413static ssize_t show_engine_mode(struct device *dev,
414 struct device_attribute *attr,
415 char *buf, int nr)
416{
417 struct i2c_client *client = to_i2c_client(dev);
418 struct lp5521_chip *chip = i2c_get_clientdata(client);
419 switch (chip->engines[nr - 1].mode) {
420 case LP5521_CMD_RUN:
421 return sprintf(buf, "run\n");
422 case LP5521_CMD_LOAD:
423 return sprintf(buf, "load\n");
424 case LP5521_CMD_DISABLED:
425 return sprintf(buf, "disabled\n");
426 default:
427 return sprintf(buf, "disabled\n");
428 }
429}
430
431#define show_mode(nr) \
432static ssize_t show_engine##nr##_mode(struct device *dev, \
433 struct device_attribute *attr, \
434 char *buf) \
435{ \
436 return show_engine_mode(dev, attr, buf, nr); \
437}
438show_mode(1)
439show_mode(2)
440show_mode(3)
441
442static ssize_t store_engine_mode(struct device *dev,
443 struct device_attribute *attr,
444 const char *buf, size_t len, int nr)
445{
446 struct i2c_client *client = to_i2c_client(dev);
447 struct lp5521_chip *chip = i2c_get_clientdata(client);
448 struct lp5521_engine *engine = &chip->engines[nr - 1];
449 mutex_lock(&chip->lock);
450
451 if (!strncmp(buf, "run", 3))
452 lp5521_set_mode(engine, LP5521_CMD_RUN);
453 else if (!strncmp(buf, "load", 4))
454 lp5521_set_mode(engine, LP5521_CMD_LOAD);
455 else if (!strncmp(buf, "disabled", 8))
456 lp5521_set_mode(engine, LP5521_CMD_DISABLED);
457
458 mutex_unlock(&chip->lock);
459 return len;
460}
461
462#define store_mode(nr) \
463static ssize_t store_engine##nr##_mode(struct device *dev, \
464 struct device_attribute *attr, \
465 const char *buf, size_t len) \
466{ \
467 return store_engine_mode(dev, attr, buf, len, nr); \
468}
469store_mode(1)
470store_mode(2)
471store_mode(3)
472
473static ssize_t show_max_current(struct device *dev,
474 struct device_attribute *attr,
475 char *buf)
476{
477 struct led_classdev *led_cdev = dev_get_drvdata(dev);
478 struct lp5521_led *led = cdev_to_led(led_cdev);
479
480 return sprintf(buf, "%d\n", led->max_current);
481}
482
483static ssize_t show_current(struct device *dev,
484 struct device_attribute *attr,
485 char *buf)
486{
487 struct led_classdev *led_cdev = dev_get_drvdata(dev);
488 struct lp5521_led *led = cdev_to_led(led_cdev);
489
490 return sprintf(buf, "%d\n", led->led_current);
491}
492
493static ssize_t store_current(struct device *dev,
494 struct device_attribute *attr,
495 const char *buf, size_t len)
496{
497 struct led_classdev *led_cdev = dev_get_drvdata(dev);
498 struct lp5521_led *led = cdev_to_led(led_cdev);
499 struct lp5521_chip *chip = led_to_lp5521(led);
500 ssize_t ret;
501 unsigned long curr;
502
503 if (strict_strtoul(buf, 0, &curr))
504 return -EINVAL;
505
506 if (curr > led->max_current)
507 return -EINVAL;
508
509 mutex_lock(&chip->lock);
510 ret = lp5521_set_led_current(chip, led->id, curr);
511 mutex_unlock(&chip->lock);
512
513 if (ret < 0)
514 return ret;
515
516 led->led_current = (u8)curr;
517
518 return len;
519}
520
521static ssize_t lp5521_selftest(struct device *dev,
522 struct device_attribute *attr,
523 char *buf)
524{
525 struct i2c_client *client = to_i2c_client(dev);
526 struct lp5521_chip *chip = i2c_get_clientdata(client);
527 int ret;
528
529 mutex_lock(&chip->lock);
530 ret = lp5521_run_selftest(chip, buf);
531 mutex_unlock(&chip->lock);
532 return sprintf(buf, "%s\n", ret ? "FAIL" : "OK");
533}
534
535/* led class device attributes */
536static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current);
537static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
538
539static struct attribute *lp5521_led_attributes[] = {
540 &dev_attr_led_current.attr,
541 &dev_attr_max_current.attr,
542 NULL,
543};
544
545static struct attribute_group lp5521_led_attribute_group = {
546 .attrs = lp5521_led_attributes
547};
548
549/* device attributes */
550static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO,
551 show_engine1_mode, store_engine1_mode);
552static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO,
553 show_engine2_mode, store_engine2_mode);
554static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO,
555 show_engine3_mode, store_engine3_mode);
556static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load);
557static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load);
558static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load);
559static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL);
560
561static struct attribute *lp5521_attributes[] = {
562 &dev_attr_engine1_mode.attr,
563 &dev_attr_engine2_mode.attr,
564 &dev_attr_engine3_mode.attr,
565 &dev_attr_selftest.attr,
566 NULL
567};
568
569static struct attribute *lp5521_engine1_attributes[] = {
570 &dev_attr_engine1_load.attr,
571 NULL
572};
573
574static struct attribute *lp5521_engine2_attributes[] = {
575 &dev_attr_engine2_load.attr,
576 NULL
577};
578
579static struct attribute *lp5521_engine3_attributes[] = {
580 &dev_attr_engine3_load.attr,
581 NULL
582};
583
584static const struct attribute_group lp5521_group = {
585 .attrs = lp5521_attributes,
586};
587
588static const struct attribute_group lp5521_engine_group[] = {
589 {.attrs = lp5521_engine1_attributes },
590 {.attrs = lp5521_engine2_attributes },
591 {.attrs = lp5521_engine3_attributes },
592};
593
594static int lp5521_register_sysfs(struct i2c_client *client)
595{
596 struct device *dev = &client->dev;
597 return sysfs_create_group(&dev->kobj, &lp5521_group);
598}
599
600static void lp5521_unregister_sysfs(struct i2c_client *client)
601{
602 struct lp5521_chip *chip = i2c_get_clientdata(client);
603 struct device *dev = &client->dev;
604 int i;
605
606 sysfs_remove_group(&dev->kobj, &lp5521_group);
607
608 for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
609 if (chip->engines[i].mode == LP5521_CMD_LOAD)
610 sysfs_remove_group(&dev->kobj,
611 chip->engines[i].attributes);
612 }
613
614 for (i = 0; i < chip->num_leds; i++)
615 sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
616 &lp5521_led_attribute_group);
617}
618
619static int __init lp5521_init_led(struct lp5521_led *led,
620 struct i2c_client *client,
621 int chan, struct lp5521_platform_data *pdata)
622{
623 struct device *dev = &client->dev;
624 char name[32];
625 int res;
626
627 if (chan >= LP5521_MAX_LEDS)
628 return -EINVAL;
629
630 if (pdata->led_config[chan].led_current == 0)
631 return 0;
632
633 led->led_current = pdata->led_config[chan].led_current;
634 led->max_current = pdata->led_config[chan].max_current;
635 led->chan_nr = pdata->led_config[chan].chan_nr;
636
637 if (led->chan_nr >= LP5521_MAX_LEDS) {
638 dev_err(dev, "Use channel numbers between 0 and %d\n",
639 LP5521_MAX_LEDS - 1);
640 return -EINVAL;
641 }
642
643 snprintf(name, sizeof(name), "%s:channel%d", client->name, chan);
644 led->cdev.brightness_set = lp5521_set_brightness;
645 led->cdev.name = name;
646 res = led_classdev_register(dev, &led->cdev);
647 if (res < 0) {
648 dev_err(dev, "couldn't register led on channel %d\n", chan);
649 return res;
650 }
651
652 res = sysfs_create_group(&led->cdev.dev->kobj,
653 &lp5521_led_attribute_group);
654 if (res < 0) {
655 dev_err(dev, "couldn't register current attribute\n");
656 led_classdev_unregister(&led->cdev);
657 return res;
658 }
659 return 0;
660}
661
662static int lp5521_probe(struct i2c_client *client,
663 const struct i2c_device_id *id)
664{
665 struct lp5521_chip *chip;
666 struct lp5521_platform_data *pdata;
667 int ret, i, led;
668
669 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
670 if (!chip)
671 return -ENOMEM;
672
673 i2c_set_clientdata(client, chip);
674 chip->client = client;
675
676 pdata = client->dev.platform_data;
677
678 if (!pdata) {
679 dev_err(&client->dev, "no platform data\n");
680 ret = -EINVAL;
681 goto fail1;
682 }
683
684 mutex_init(&chip->lock);
685
686 chip->pdata = pdata;
687
688 if (pdata->setup_resources) {
689 ret = pdata->setup_resources();
690 if (ret < 0)
691 goto fail1;
692 }
693
694 if (pdata->enable) {
695 pdata->enable(0);
696 usleep_range(1000, 10000);
697 pdata->enable(1);
698 usleep_range(1000, 10000); /* Spec says min 500us */
699 }
700
701 ret = lp5521_detect(client);
702
703 if (ret) {
704 dev_err(&client->dev, "Chip not found\n");
705 goto fail2;
706 }
707
708 dev_info(&client->dev, "%s programmable led chip found\n", id->name);
709
710 ret = lp5521_configure(client, lp5521_engine_group);
711 if (ret < 0) {
712 dev_err(&client->dev, "error configuring chip\n");
713 goto fail2;
714 }
715
716 /* Initialize leds */
717 chip->num_channels = pdata->num_channels;
718 chip->num_leds = 0;
719 led = 0;
720 for (i = 0; i < pdata->num_channels; i++) {
721 /* Do not initialize channels that are not connected */
722 if (pdata->led_config[i].led_current == 0)
723 continue;
724
725 ret = lp5521_init_led(&chip->leds[led], client, i, pdata);
726 if (ret) {
727 dev_err(&client->dev, "error initializing leds\n");
728 goto fail3;
729 }
730 chip->num_leds++;
731
732 chip->leds[led].id = led;
733 /* Set initial LED current */
734 lp5521_set_led_current(chip, led,
735 chip->leds[led].led_current);
736
737 INIT_WORK(&(chip->leds[led].brightness_work),
738 lp5521_led_brightness_work);
739
740 led++;
741 }
742
743 ret = lp5521_register_sysfs(client);
744 if (ret) {
745 dev_err(&client->dev, "registering sysfs failed\n");
746 goto fail3;
747 }
748 return ret;
749fail3:
750 for (i = 0; i < chip->num_leds; i++) {
751 led_classdev_unregister(&chip->leds[i].cdev);
752 cancel_work_sync(&chip->leds[i].brightness_work);
753 }
754fail2:
755 if (pdata->enable)
756 pdata->enable(0);
757 if (pdata->release_resources)
758 pdata->release_resources();
759fail1:
760 kfree(chip);
761 return ret;
762}
763
764static int lp5521_remove(struct i2c_client *client)
765{
766 struct lp5521_chip *chip = i2c_get_clientdata(client);
767 int i;
768
769 lp5521_unregister_sysfs(client);
770
771 for (i = 0; i < chip->num_leds; i++) {
772 led_classdev_unregister(&chip->leds[i].cdev);
773 cancel_work_sync(&chip->leds[i].brightness_work);
774 }
775
776 if (chip->pdata->enable)
777 chip->pdata->enable(0);
778 if (chip->pdata->release_resources)
779 chip->pdata->release_resources();
780 kfree(chip);
781 return 0;
782}
783
784static const struct i2c_device_id lp5521_id[] = {
785 { "lp5521", 0 }, /* Three channel chip */
786 { }
787};
788MODULE_DEVICE_TABLE(i2c, lp5521_id);
789
790static struct i2c_driver lp5521_driver = {
791 .driver = {
792 .name = "lp5521",
793 },
794 .probe = lp5521_probe,
795 .remove = lp5521_remove,
796 .id_table = lp5521_id,
797};
798
799static int __init lp5521_init(void)
800{
801 int ret;
802
803 ret = i2c_add_driver(&lp5521_driver);
804
805 if (ret < 0)
806 printk(KERN_ALERT "Adding lp5521 driver failed\n");
807
808 return ret;
809}
810
811static void __exit lp5521_exit(void)
812{
813 i2c_del_driver(&lp5521_driver);
814}
815
816module_init(lp5521_init);
817module_exit(lp5521_exit);
818
819MODULE_AUTHOR("Mathias Nyman, Yuri Zaporozhets, Samu Onkalo");
820MODULE_DESCRIPTION("LP5521 LED engine");
821MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
new file mode 100644
index 00000000000..1e11fcc08b2
--- /dev/null
+++ b/drivers/leds/leds-lp5523.c
@@ -0,0 +1,1065 @@
1/*
2 * lp5523.c - LP5523 LED Driver
3 *
4 * Copyright (C) 2010 Nokia Corporation
5 *
6 * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/i2c.h>
26#include <linux/mutex.h>
27#include <linux/gpio.h>
28#include <linux/interrupt.h>
29#include <linux/delay.h>
30#include <linux/ctype.h>
31#include <linux/spinlock.h>
32#include <linux/wait.h>
33#include <linux/leds.h>
34#include <linux/leds-lp5523.h>
35#include <linux/workqueue.h>
36#include <linux/slab.h>
37
38#define LP5523_REG_ENABLE 0x00
39#define LP5523_REG_OP_MODE 0x01
40#define LP5523_REG_RATIOMETRIC_MSB 0x02
41#define LP5523_REG_RATIOMETRIC_LSB 0x03
42#define LP5523_REG_ENABLE_LEDS_MSB 0x04
43#define LP5523_REG_ENABLE_LEDS_LSB 0x05
44#define LP5523_REG_LED_CNTRL_BASE 0x06
45#define LP5523_REG_LED_PWM_BASE 0x16
46#define LP5523_REG_LED_CURRENT_BASE 0x26
47#define LP5523_REG_CONFIG 0x36
48#define LP5523_REG_CHANNEL1_PC 0x37
49#define LP5523_REG_CHANNEL2_PC 0x38
50#define LP5523_REG_CHANNEL3_PC 0x39
51#define LP5523_REG_STATUS 0x3a
52#define LP5523_REG_GPO 0x3b
53#define LP5523_REG_VARIABLE 0x3c
54#define LP5523_REG_RESET 0x3d
55#define LP5523_REG_TEMP_CTRL 0x3e
56#define LP5523_REG_TEMP_READ 0x3f
57#define LP5523_REG_TEMP_WRITE 0x40
58#define LP5523_REG_LED_TEST_CTRL 0x41
59#define LP5523_REG_LED_TEST_ADC 0x42
60#define LP5523_REG_ENG1_VARIABLE 0x45
61#define LP5523_REG_ENG2_VARIABLE 0x46
62#define LP5523_REG_ENG3_VARIABLE 0x47
63#define LP5523_REG_MASTER_FADER1 0x48
64#define LP5523_REG_MASTER_FADER2 0x49
65#define LP5523_REG_MASTER_FADER3 0x4a
66#define LP5523_REG_CH1_PROG_START 0x4c
67#define LP5523_REG_CH2_PROG_START 0x4d
68#define LP5523_REG_CH3_PROG_START 0x4e
69#define LP5523_REG_PROG_PAGE_SEL 0x4f
70#define LP5523_REG_PROG_MEM 0x50
71
72#define LP5523_CMD_LOAD 0x15 /* 00010101 */
73#define LP5523_CMD_RUN 0x2a /* 00101010 */
74#define LP5523_CMD_DISABLED 0x00 /* 00000000 */
75
76#define LP5523_ENABLE 0x40
77#define LP5523_AUTO_INC 0x40
78#define LP5523_PWR_SAVE 0x20
79#define LP5523_PWM_PWR_SAVE 0x04
80#define LP5523_CP_1 0x08
81#define LP5523_CP_1_5 0x10
82#define LP5523_CP_AUTO 0x18
83#define LP5523_INT_CLK 0x01
84#define LP5523_AUTO_CLK 0x02
85#define LP5523_EN_LEDTEST 0x80
86#define LP5523_LEDTEST_DONE 0x80
87
88#define LP5523_DEFAULT_CURRENT 50 /* microAmps */
89#define LP5523_PROGRAM_LENGTH 32 /* in bytes */
90#define LP5523_PROGRAM_PAGES 6
91#define LP5523_ADC_SHORTCIRC_LIM 80
92
93#define LP5523_LEDS 9
94#define LP5523_ENGINES 3
95
96#define LP5523_ENG_MASK_BASE 0x30 /* 00110000 */
97
98#define LP5523_ENG_STATUS_MASK 0x07 /* 00000111 */
99
100#define LP5523_IRQ_FLAGS IRQF_TRIGGER_FALLING
101
102#define LP5523_EXT_CLK_USED 0x08
103
104#define LED_ACTIVE(mux, led) (!!(mux & (0x0001 << led)))
105#define SHIFT_MASK(id) (((id) - 1) * 2)
106
107struct lp5523_engine {
108 const struct attribute_group *attributes;
109 int id;
110 u8 mode;
111 u8 prog_page;
112 u8 mux_page;
113 u16 led_mux;
114 u8 engine_mask;
115};
116
117struct lp5523_led {
118 int id;
119 u8 chan_nr;
120 u8 led_current;
121 u8 max_current;
122 struct led_classdev cdev;
123 struct work_struct brightness_work;
124 u8 brightness;
125};
126
127struct lp5523_chip {
128 struct mutex lock; /* Serialize control */
129 struct i2c_client *client;
130 struct lp5523_engine engines[LP5523_ENGINES];
131 struct lp5523_led leds[LP5523_LEDS];
132 struct lp5523_platform_data *pdata;
133 u8 num_channels;
134 u8 num_leds;
135};
136
137#define cdev_to_led(c) container_of(c, struct lp5523_led, cdev)
138
139static struct lp5523_chip *engine_to_lp5523(struct lp5523_engine *engine)
140{
141 return container_of(engine, struct lp5523_chip,
142 engines[engine->id - 1]);
143}
144
145static struct lp5523_chip *led_to_lp5523(struct lp5523_led *led)
146{
147 return container_of(led, struct lp5523_chip,
148 leds[led->id]);
149}
150
151static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode);
152static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode);
153static int lp5523_load_program(struct lp5523_engine *engine, u8 *pattern);
154
155static void lp5523_led_brightness_work(struct work_struct *work);
156
157static int lp5523_write(struct i2c_client *client, u8 reg, u8 value)
158{
159 return i2c_smbus_write_byte_data(client, reg, value);
160}
161
162static int lp5523_read(struct i2c_client *client, u8 reg, u8 *buf)
163{
164 s32 ret = i2c_smbus_read_byte_data(client, reg);
165
166 if (ret < 0)
167 return -EIO;
168
169 *buf = ret;
170 return 0;
171}
172
173static int lp5523_detect(struct i2c_client *client)
174{
175 int ret;
176 u8 buf;
177
178 ret = lp5523_write(client, LP5523_REG_ENABLE, 0x40);
179 if (ret)
180 return ret;
181 ret = lp5523_read(client, LP5523_REG_ENABLE, &buf);
182 if (ret)
183 return ret;
184 if (buf == 0x40)
185 return 0;
186 else
187 return -ENODEV;
188}
189
190static int lp5523_configure(struct i2c_client *client)
191{
192 struct lp5523_chip *chip = i2c_get_clientdata(client);
193 int ret = 0;
194 u8 status;
195
196 /* one pattern per engine setting led mux start and stop addresses */
197 u8 pattern[][LP5523_PROGRAM_LENGTH] = {
198 { 0x9c, 0x30, 0x9c, 0xb0, 0x9d, 0x80, 0xd8, 0x00, 0},
199 { 0x9c, 0x40, 0x9c, 0xc0, 0x9d, 0x80, 0xd8, 0x00, 0},
200 { 0x9c, 0x50, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0},
201 };
202
203 lp5523_write(client, LP5523_REG_RESET, 0xff);
204
205 usleep_range(10000, 100000);
206
207 ret |= lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE);
208 /* Chip startup time after reset is 500 us */
209 usleep_range(1000, 10000);
210
211 ret |= lp5523_write(client, LP5523_REG_CONFIG,
212 LP5523_AUTO_INC | LP5523_PWR_SAVE |
213 LP5523_CP_AUTO | LP5523_AUTO_CLK |
214 LP5523_PWM_PWR_SAVE);
215
216 /* turn on all leds */
217 ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_MSB, 0x01);
218 ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_LSB, 0xff);
219
220 /* hardcode 32 bytes of memory for each engine from program memory */
221 ret |= lp5523_write(client, LP5523_REG_CH1_PROG_START, 0x00);
222 ret |= lp5523_write(client, LP5523_REG_CH2_PROG_START, 0x10);
223 ret |= lp5523_write(client, LP5523_REG_CH3_PROG_START, 0x20);
224
225 /* write led mux address space for each channel */
226 ret |= lp5523_load_program(&chip->engines[0], pattern[0]);
227 ret |= lp5523_load_program(&chip->engines[1], pattern[1]);
228 ret |= lp5523_load_program(&chip->engines[2], pattern[2]);
229
230 if (ret) {
231 dev_err(&client->dev, "could not load mux programs\n");
232 return -1;
233 }
234
235 /* set all engines exec state and mode to run 00101010 */
236 ret |= lp5523_write(client, LP5523_REG_ENABLE,
237 (LP5523_CMD_RUN | LP5523_ENABLE));
238
239 ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_RUN);
240
241 if (ret) {
242 dev_err(&client->dev, "could not start mux programs\n");
243 return -1;
244 }
245
246 /* Wait 3ms and check the engine status */
247 usleep_range(3000, 20000);
248 lp5523_read(client, LP5523_REG_STATUS, &status);
249 status &= LP5523_ENG_STATUS_MASK;
250
251 if (status == LP5523_ENG_STATUS_MASK) {
252 dev_dbg(&client->dev, "all engines configured\n");
253 } else {
254 dev_info(&client->dev, "status == %x\n", status);
255 dev_err(&client->dev, "cound not configure LED engine\n");
256 return -1;
257 }
258
259 dev_info(&client->dev, "disabling engines\n");
260
261 ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_DISABLED);
262
263 return ret;
264}
265
266static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode)
267{
268 struct lp5523_chip *chip = engine_to_lp5523(engine);
269 struct i2c_client *client = chip->client;
270 int ret;
271 u8 engine_state;
272
273 ret = lp5523_read(client, LP5523_REG_OP_MODE, &engine_state);
274 if (ret)
275 goto fail;
276
277 engine_state &= ~(engine->engine_mask);
278
279 /* set mode only for this engine */
280 mode &= engine->engine_mask;
281
282 engine_state |= mode;
283
284 ret |= lp5523_write(client, LP5523_REG_OP_MODE, engine_state);
285fail:
286 return ret;
287}
288
289static int lp5523_load_mux(struct lp5523_engine *engine, u16 mux)
290{
291 struct lp5523_chip *chip = engine_to_lp5523(engine);
292 struct i2c_client *client = chip->client;
293 int ret = 0;
294
295 ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
296
297 ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL, engine->mux_page);
298 ret |= lp5523_write(client, LP5523_REG_PROG_MEM,
299 (u8)(mux >> 8));
300 ret |= lp5523_write(client, LP5523_REG_PROG_MEM + 1, (u8)(mux));
301 engine->led_mux = mux;
302
303 return ret;
304}
305
306static int lp5523_load_program(struct lp5523_engine *engine, u8 *pattern)
307{
308 struct lp5523_chip *chip = engine_to_lp5523(engine);
309 struct i2c_client *client = chip->client;
310
311 int ret = 0;
312
313 ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
314
315 ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL,
316 engine->prog_page);
317 ret |= i2c_smbus_write_i2c_block_data(client, LP5523_REG_PROG_MEM,
318 LP5523_PROGRAM_LENGTH, pattern);
319
320 return ret;
321}
322
323static int lp5523_run_program(struct lp5523_engine *engine)
324{
325 struct lp5523_chip *chip = engine_to_lp5523(engine);
326 struct i2c_client *client = chip->client;
327 int ret;
328
329 ret = lp5523_write(client, LP5523_REG_ENABLE,
330 LP5523_CMD_RUN | LP5523_ENABLE);
331 if (ret)
332 goto fail;
333
334 ret = lp5523_set_engine_mode(engine, LP5523_CMD_RUN);
335fail:
336 return ret;
337}
338
339static int lp5523_mux_parse(const char *buf, u16 *mux, size_t len)
340{
341 int i;
342 u16 tmp_mux = 0;
343 len = len < LP5523_LEDS ? len : LP5523_LEDS;
344 for (i = 0; i < len; i++) {
345 switch (buf[i]) {
346 case '1':
347 tmp_mux |= (1 << i);
348 break;
349 case '0':
350 break;
351 case '\n':
352 i = len;
353 break;
354 default:
355 return -1;
356 }
357 }
358 *mux = tmp_mux;
359
360 return 0;
361}
362
363static void lp5523_mux_to_array(u16 led_mux, char *array)
364{
365 int i, pos = 0;
366 for (i = 0; i < LP5523_LEDS; i++)
367 pos += sprintf(array + pos, "%x", LED_ACTIVE(led_mux, i));
368
369 array[pos] = '\0';
370}
371
372/*--------------------------------------------------------------*/
373/* Sysfs interface */
374/*--------------------------------------------------------------*/
375
376static ssize_t show_engine_leds(struct device *dev,
377 struct device_attribute *attr,
378 char *buf, int nr)
379{
380 struct i2c_client *client = to_i2c_client(dev);
381 struct lp5523_chip *chip = i2c_get_clientdata(client);
382 char mux[LP5523_LEDS + 1];
383
384 lp5523_mux_to_array(chip->engines[nr - 1].led_mux, mux);
385
386 return sprintf(buf, "%s\n", mux);
387}
388
389#define show_leds(nr) \
390static ssize_t show_engine##nr##_leds(struct device *dev, \
391 struct device_attribute *attr, \
392 char *buf) \
393{ \
394 return show_engine_leds(dev, attr, buf, nr); \
395}
396show_leds(1)
397show_leds(2)
398show_leds(3)
399
400static ssize_t store_engine_leds(struct device *dev,
401 struct device_attribute *attr,
402 const char *buf, size_t len, int nr)
403{
404 struct i2c_client *client = to_i2c_client(dev);
405 struct lp5523_chip *chip = i2c_get_clientdata(client);
406 u16 mux = 0;
407
408 if (lp5523_mux_parse(buf, &mux, len))
409 return -EINVAL;
410
411 if (lp5523_load_mux(&chip->engines[nr - 1], mux))
412 return -EINVAL;
413
414 return len;
415}
416
417#define store_leds(nr) \
418static ssize_t store_engine##nr##_leds(struct device *dev, \
419 struct device_attribute *attr, \
420 const char *buf, size_t len) \
421{ \
422 return store_engine_leds(dev, attr, buf, len, nr); \
423}
424store_leds(1)
425store_leds(2)
426store_leds(3)
427
428static ssize_t lp5523_selftest(struct device *dev,
429 struct device_attribute *attr,
430 char *buf)
431{
432 struct i2c_client *client = to_i2c_client(dev);
433 struct lp5523_chip *chip = i2c_get_clientdata(client);
434 int i, ret, pos = 0;
435 int led = 0;
436 u8 status, adc, vdd;
437
438 mutex_lock(&chip->lock);
439
440 ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
441 if (ret < 0)
442 goto fail;
443
444 /* Check that ext clock is really in use if requested */
445 if ((chip->pdata) && (chip->pdata->clock_mode == LP5523_CLOCK_EXT))
446 if ((status & LP5523_EXT_CLK_USED) == 0)
447 goto fail;
448
449 /* Measure VDD (i.e. VBAT) first (channel 16 corresponds to VDD) */
450 lp5523_write(chip->client, LP5523_REG_LED_TEST_CTRL,
451 LP5523_EN_LEDTEST | 16);
452 usleep_range(3000, 10000);
453 ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
454 if (!(status & LP5523_LEDTEST_DONE))
455 usleep_range(3000, 10000);
456
457 ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd);
458 vdd--; /* There may be some fluctuation in measurement */
459
460 for (i = 0; i < LP5523_LEDS; i++) {
461 /* Skip non-existing channels */
462 if (chip->pdata->led_config[i].led_current == 0)
463 continue;
464
465 /* Set default current */
466 lp5523_write(chip->client,
467 LP5523_REG_LED_CURRENT_BASE + i,
468 chip->pdata->led_config[i].led_current);
469
470 lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0xff);
471 /* let current stabilize 2ms before measurements start */
472 usleep_range(2000, 10000);
473 lp5523_write(chip->client,
474 LP5523_REG_LED_TEST_CTRL,
475 LP5523_EN_LEDTEST | i);
476 /* ledtest takes 2.7ms */
477 usleep_range(3000, 10000);
478 ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
479 if (!(status & LP5523_LEDTEST_DONE))
480 usleep_range(3000, 10000);
481 ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc);
482
483 if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM)
484 pos += sprintf(buf + pos, "LED %d FAIL\n", i);
485
486 lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0x00);
487
488 /* Restore current */
489 lp5523_write(chip->client,
490 LP5523_REG_LED_CURRENT_BASE + i,
491 chip->leds[led].led_current);
492 led++;
493 }
494 if (pos == 0)
495 pos = sprintf(buf, "OK\n");
496 goto release_lock;
497fail:
498 pos = sprintf(buf, "FAIL\n");
499
500release_lock:
501 mutex_unlock(&chip->lock);
502
503 return pos;
504}
505
506static void lp5523_set_brightness(struct led_classdev *cdev,
507 enum led_brightness brightness)
508{
509 struct lp5523_led *led = cdev_to_led(cdev);
510
511 led->brightness = (u8)brightness;
512
513 schedule_work(&led->brightness_work);
514}
515
516static void lp5523_led_brightness_work(struct work_struct *work)
517{
518 struct lp5523_led *led = container_of(work,
519 struct lp5523_led,
520 brightness_work);
521 struct lp5523_chip *chip = led_to_lp5523(led);
522 struct i2c_client *client = chip->client;
523
524 mutex_lock(&chip->lock);
525
526 lp5523_write(client, LP5523_REG_LED_PWM_BASE + led->chan_nr,
527 led->brightness);
528
529 mutex_unlock(&chip->lock);
530}
531
532static int lp5523_do_store_load(struct lp5523_engine *engine,
533 const char *buf, size_t len)
534{
535 struct lp5523_chip *chip = engine_to_lp5523(engine);
536 struct i2c_client *client = chip->client;
537 int ret, nrchars, offset = 0, i = 0;
538 char c[3];
539 unsigned cmd;
540 u8 pattern[LP5523_PROGRAM_LENGTH] = {0};
541
542 while ((offset < len - 1) && (i < LP5523_PROGRAM_LENGTH)) {
543 /* separate sscanfs because length is working only for %s */
544 ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
545 ret = sscanf(c, "%2x", &cmd);
546 if (ret != 1)
547 goto fail;
548 pattern[i] = (u8)cmd;
549
550 offset += nrchars;
551 i++;
552 }
553
554 /* Each instruction is 16bit long. Check that length is even */
555 if (i % 2)
556 goto fail;
557
558 mutex_lock(&chip->lock);
559
560 ret = lp5523_load_program(engine, pattern);
561 mutex_unlock(&chip->lock);
562
563 if (ret) {
564 dev_err(&client->dev, "failed loading pattern\n");
565 return ret;
566 }
567
568 return len;
569fail:
570 dev_err(&client->dev, "wrong pattern format\n");
571 return -EINVAL;
572}
573
574static ssize_t store_engine_load(struct device *dev,
575 struct device_attribute *attr,
576 const char *buf, size_t len, int nr)
577{
578 struct i2c_client *client = to_i2c_client(dev);
579 struct lp5523_chip *chip = i2c_get_clientdata(client);
580 return lp5523_do_store_load(&chip->engines[nr - 1], buf, len);
581}
582
583#define store_load(nr) \
584static ssize_t store_engine##nr##_load(struct device *dev, \
585 struct device_attribute *attr, \
586 const char *buf, size_t len) \
587{ \
588 return store_engine_load(dev, attr, buf, len, nr); \
589}
590store_load(1)
591store_load(2)
592store_load(3)
593
594static ssize_t show_engine_mode(struct device *dev,
595 struct device_attribute *attr,
596 char *buf, int nr)
597{
598 struct i2c_client *client = to_i2c_client(dev);
599 struct lp5523_chip *chip = i2c_get_clientdata(client);
600 switch (chip->engines[nr - 1].mode) {
601 case LP5523_CMD_RUN:
602 return sprintf(buf, "run\n");
603 case LP5523_CMD_LOAD:
604 return sprintf(buf, "load\n");
605 case LP5523_CMD_DISABLED:
606 return sprintf(buf, "disabled\n");
607 default:
608 return sprintf(buf, "disabled\n");
609 }
610}
611
612#define show_mode(nr) \
613static ssize_t show_engine##nr##_mode(struct device *dev, \
614 struct device_attribute *attr, \
615 char *buf) \
616{ \
617 return show_engine_mode(dev, attr, buf, nr); \
618}
619show_mode(1)
620show_mode(2)
621show_mode(3)
622
623static ssize_t store_engine_mode(struct device *dev,
624 struct device_attribute *attr,
625 const char *buf, size_t len, int nr)
626{
627 struct i2c_client *client = to_i2c_client(dev);
628 struct lp5523_chip *chip = i2c_get_clientdata(client);
629 struct lp5523_engine *engine = &chip->engines[nr - 1];
630 mutex_lock(&chip->lock);
631
632 if (!strncmp(buf, "run", 3))
633 lp5523_set_mode(engine, LP5523_CMD_RUN);
634 else if (!strncmp(buf, "load", 4))
635 lp5523_set_mode(engine, LP5523_CMD_LOAD);
636 else if (!strncmp(buf, "disabled", 8))
637 lp5523_set_mode(engine, LP5523_CMD_DISABLED);
638
639 mutex_unlock(&chip->lock);
640 return len;
641}
642
643#define store_mode(nr) \
644static ssize_t store_engine##nr##_mode(struct device *dev, \
645 struct device_attribute *attr, \
646 const char *buf, size_t len) \
647{ \
648 return store_engine_mode(dev, attr, buf, len, nr); \
649}
650store_mode(1)
651store_mode(2)
652store_mode(3)
653
654static ssize_t show_max_current(struct device *dev,
655 struct device_attribute *attr,
656 char *buf)
657{
658 struct led_classdev *led_cdev = dev_get_drvdata(dev);
659 struct lp5523_led *led = cdev_to_led(led_cdev);
660
661 return sprintf(buf, "%d\n", led->max_current);
662}
663
664static ssize_t show_current(struct device *dev,
665 struct device_attribute *attr,
666 char *buf)
667{
668 struct led_classdev *led_cdev = dev_get_drvdata(dev);
669 struct lp5523_led *led = cdev_to_led(led_cdev);
670
671 return sprintf(buf, "%d\n", led->led_current);
672}
673
674static ssize_t store_current(struct device *dev,
675 struct device_attribute *attr,
676 const char *buf, size_t len)
677{
678 struct led_classdev *led_cdev = dev_get_drvdata(dev);
679 struct lp5523_led *led = cdev_to_led(led_cdev);
680 struct lp5523_chip *chip = led_to_lp5523(led);
681 ssize_t ret;
682 unsigned long curr;
683
684 if (strict_strtoul(buf, 0, &curr))
685 return -EINVAL;
686
687 if (curr > led->max_current)
688 return -EINVAL;
689
690 mutex_lock(&chip->lock);
691 ret = lp5523_write(chip->client,
692 LP5523_REG_LED_CURRENT_BASE + led->chan_nr,
693 (u8)curr);
694 mutex_unlock(&chip->lock);
695
696 if (ret < 0)
697 return ret;
698
699 led->led_current = (u8)curr;
700
701 return len;
702}
703
704/* led class device attributes */
705static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current);
706static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
707
708static struct attribute *lp5523_led_attributes[] = {
709 &dev_attr_led_current.attr,
710 &dev_attr_max_current.attr,
711 NULL,
712};
713
714static struct attribute_group lp5523_led_attribute_group = {
715 .attrs = lp5523_led_attributes
716};
717
718/* device attributes */
719static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO,
720 show_engine1_mode, store_engine1_mode);
721static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO,
722 show_engine2_mode, store_engine2_mode);
723static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO,
724 show_engine3_mode, store_engine3_mode);
725static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUGO,
726 show_engine1_leds, store_engine1_leds);
727static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUGO,
728 show_engine2_leds, store_engine2_leds);
729static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUGO,
730 show_engine3_leds, store_engine3_leds);
731static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load);
732static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load);
733static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load);
734static DEVICE_ATTR(selftest, S_IRUGO, lp5523_selftest, NULL);
735
736static struct attribute *lp5523_attributes[] = {
737 &dev_attr_engine1_mode.attr,
738 &dev_attr_engine2_mode.attr,
739 &dev_attr_engine3_mode.attr,
740 &dev_attr_selftest.attr,
741 NULL
742};
743
744static struct attribute *lp5523_engine1_attributes[] = {
745 &dev_attr_engine1_load.attr,
746 &dev_attr_engine1_leds.attr,
747 NULL
748};
749
750static struct attribute *lp5523_engine2_attributes[] = {
751 &dev_attr_engine2_load.attr,
752 &dev_attr_engine2_leds.attr,
753 NULL
754};
755
756static struct attribute *lp5523_engine3_attributes[] = {
757 &dev_attr_engine3_load.attr,
758 &dev_attr_engine3_leds.attr,
759 NULL
760};
761
762static const struct attribute_group lp5523_group = {
763 .attrs = lp5523_attributes,
764};
765
766static const struct attribute_group lp5523_engine_group[] = {
767 {.attrs = lp5523_engine1_attributes },
768 {.attrs = lp5523_engine2_attributes },
769 {.attrs = lp5523_engine3_attributes },
770};
771
772static int lp5523_register_sysfs(struct i2c_client *client)
773{
774 struct device *dev = &client->dev;
775 int ret;
776
777 ret = sysfs_create_group(&dev->kobj, &lp5523_group);
778 if (ret < 0)
779 return ret;
780
781 return 0;
782}
783
784static void lp5523_unregister_sysfs(struct i2c_client *client)
785{
786 struct lp5523_chip *chip = i2c_get_clientdata(client);
787 struct device *dev = &client->dev;
788 int i;
789
790 sysfs_remove_group(&dev->kobj, &lp5523_group);
791
792 for (i = 0; i < ARRAY_SIZE(chip->engines); i++)
793 if (chip->engines[i].mode == LP5523_CMD_LOAD)
794 sysfs_remove_group(&dev->kobj, &lp5523_engine_group[i]);
795
796 for (i = 0; i < chip->num_leds; i++)
797 sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
798 &lp5523_led_attribute_group);
799}
800
801/*--------------------------------------------------------------*/
802/* Set chip operating mode */
803/*--------------------------------------------------------------*/
804static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode)
805{
806 /* engine to chip */
807 struct lp5523_chip *chip = engine_to_lp5523(engine);
808 struct i2c_client *client = chip->client;
809 struct device *dev = &client->dev;
810 int ret = 0;
811
812 /* if in that mode already do nothing, except for run */
813 if (mode == engine->mode && mode != LP5523_CMD_RUN)
814 return 0;
815
816 if (mode == LP5523_CMD_RUN) {
817 ret = lp5523_run_program(engine);
818 } else if (mode == LP5523_CMD_LOAD) {
819 lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
820 lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
821
822 ret = sysfs_create_group(&dev->kobj, engine->attributes);
823 if (ret)
824 return ret;
825 } else if (mode == LP5523_CMD_DISABLED) {
826 lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
827 }
828
829 /* remove load attribute from sysfs if not in load mode */
830 if (engine->mode == LP5523_CMD_LOAD && mode != LP5523_CMD_LOAD)
831 sysfs_remove_group(&dev->kobj, engine->attributes);
832
833 engine->mode = mode;
834
835 return ret;
836}
837
838/*--------------------------------------------------------------*/
839/* Probe, Attach, Remove */
840/*--------------------------------------------------------------*/
841static int __init lp5523_init_engine(struct lp5523_engine *engine, int id)
842{
843 if (id < 1 || id > LP5523_ENGINES)
844 return -1;
845 engine->id = id;
846 engine->engine_mask = LP5523_ENG_MASK_BASE >> SHIFT_MASK(id);
847 engine->prog_page = id - 1;
848 engine->mux_page = id + 2;
849 engine->attributes = &lp5523_engine_group[id - 1];
850
851 return 0;
852}
853
854static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev,
855 int chan, struct lp5523_platform_data *pdata)
856{
857 char name[32];
858 int res;
859
860 if (chan >= LP5523_LEDS)
861 return -EINVAL;
862
863 if (pdata->led_config[chan].led_current) {
864 led->led_current = pdata->led_config[chan].led_current;
865 led->max_current = pdata->led_config[chan].max_current;
866 led->chan_nr = pdata->led_config[chan].chan_nr;
867
868 if (led->chan_nr >= LP5523_LEDS) {
869 dev_err(dev, "Use channel numbers between 0 and %d\n",
870 LP5523_LEDS - 1);
871 return -EINVAL;
872 }
873
874 snprintf(name, 32, "lp5523:channel%d", chan);
875
876 led->cdev.name = name;
877 led->cdev.brightness_set = lp5523_set_brightness;
878 res = led_classdev_register(dev, &led->cdev);
879 if (res < 0) {
880 dev_err(dev, "couldn't register led on channel %d\n",
881 chan);
882 return res;
883 }
884 res = sysfs_create_group(&led->cdev.dev->kobj,
885 &lp5523_led_attribute_group);
886 if (res < 0) {
887 dev_err(dev, "couldn't register current attribute\n");
888 led_classdev_unregister(&led->cdev);
889 return res;
890 }
891 } else {
892 led->led_current = 0;
893 }
894 return 0;
895}
896
897static struct i2c_driver lp5523_driver;
898
899static int lp5523_probe(struct i2c_client *client,
900 const struct i2c_device_id *id)
901{
902 struct lp5523_chip *chip;
903 struct lp5523_platform_data *pdata;
904 int ret, i, led;
905
906 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
907 if (!chip)
908 return -ENOMEM;
909
910 i2c_set_clientdata(client, chip);
911 chip->client = client;
912
913 pdata = client->dev.platform_data;
914
915 if (!pdata) {
916 dev_err(&client->dev, "no platform data\n");
917 ret = -EINVAL;
918 goto fail1;
919 }
920
921 mutex_init(&chip->lock);
922
923 chip->pdata = pdata;
924
925 if (pdata->setup_resources) {
926 ret = pdata->setup_resources();
927 if (ret < 0)
928 goto fail1;
929 }
930
931 if (pdata->enable) {
932 pdata->enable(0);
933 usleep_range(1000, 10000);
934 pdata->enable(1);
935 usleep_range(1000, 10000); /* Spec says min 500us */
936 }
937
938 ret = lp5523_detect(client);
939 if (ret)
940 goto fail2;
941
942 dev_info(&client->dev, "LP5523 Programmable led chip found\n");
943
944 /* Initialize engines */
945 for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
946 ret = lp5523_init_engine(&chip->engines[i], i + 1);
947 if (ret) {
948 dev_err(&client->dev, "error initializing engine\n");
949 goto fail2;
950 }
951 }
952 ret = lp5523_configure(client);
953 if (ret < 0) {
954 dev_err(&client->dev, "error configuring chip\n");
955 goto fail2;
956 }
957
958 /* Initialize leds */
959 chip->num_channels = pdata->num_channels;
960 chip->num_leds = 0;
961 led = 0;
962 for (i = 0; i < pdata->num_channels; i++) {
963 /* Do not initialize channels that are not connected */
964 if (pdata->led_config[i].led_current == 0)
965 continue;
966
967 ret = lp5523_init_led(&chip->leds[led], &client->dev, i, pdata);
968 if (ret) {
969 dev_err(&client->dev, "error initializing leds\n");
970 goto fail3;
971 }
972 chip->num_leds++;
973
974 chip->leds[led].id = led;
975 /* Set LED current */
976 lp5523_write(client,
977 LP5523_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr,
978 chip->leds[led].led_current);
979
980 INIT_WORK(&(chip->leds[led].brightness_work),
981 lp5523_led_brightness_work);
982
983 led++;
984 }
985
986 ret = lp5523_register_sysfs(client);
987 if (ret) {
988 dev_err(&client->dev, "registering sysfs failed\n");
989 goto fail3;
990 }
991 return ret;
992fail3:
993 for (i = 0; i < chip->num_leds; i++) {
994 led_classdev_unregister(&chip->leds[i].cdev);
995 cancel_work_sync(&chip->leds[i].brightness_work);
996 }
997fail2:
998 if (pdata->enable)
999 pdata->enable(0);
1000 if (pdata->release_resources)
1001 pdata->release_resources();
1002fail1:
1003 kfree(chip);
1004 return ret;
1005}
1006
1007static int lp5523_remove(struct i2c_client *client)
1008{
1009 struct lp5523_chip *chip = i2c_get_clientdata(client);
1010 int i;
1011
1012 lp5523_unregister_sysfs(client);
1013
1014 for (i = 0; i < chip->num_leds; i++) {
1015 led_classdev_unregister(&chip->leds[i].cdev);
1016 cancel_work_sync(&chip->leds[i].brightness_work);
1017 }
1018
1019 if (chip->pdata->enable)
1020 chip->pdata->enable(0);
1021 if (chip->pdata->release_resources)
1022 chip->pdata->release_resources();
1023 kfree(chip);
1024 return 0;
1025}
1026
1027static const struct i2c_device_id lp5523_id[] = {
1028 { "lp5523", 0 },
1029 { }
1030};
1031
1032MODULE_DEVICE_TABLE(i2c, lp5523_id);
1033
1034static struct i2c_driver lp5523_driver = {
1035 .driver = {
1036 .name = "lp5523",
1037 },
1038 .probe = lp5523_probe,
1039 .remove = lp5523_remove,
1040 .id_table = lp5523_id,
1041};
1042
1043static int __init lp5523_init(void)
1044{
1045 int ret;
1046
1047 ret = i2c_add_driver(&lp5523_driver);
1048
1049 if (ret < 0)
1050 printk(KERN_ALERT "Adding lp5523 driver failed\n");
1051
1052 return ret;
1053}
1054
1055static void __exit lp5523_exit(void)
1056{
1057 i2c_del_driver(&lp5523_driver);
1058}
1059
1060module_init(lp5523_init);
1061module_exit(lp5523_exit);
1062
1063MODULE_AUTHOR("Mathias Nyman <mathias.nyman@nokia.com>");
1064MODULE_DESCRIPTION("LP5523 LED engine");
1065MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-net5501.c b/drivers/leds/leds-net5501.c
index 3063f591f0d..1739557a903 100644
--- a/drivers/leds/leds-net5501.c
+++ b/drivers/leds/leds-net5501.c
@@ -92,3 +92,5 @@ unmap:
92} 92}
93 93
94arch_initcall(soekris_init); 94arch_initcall(soekris_init);
95
96MODULE_LICENSE("GPL");
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index 82b77bd482f..b09bcbeade9 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -12,73 +12,25 @@
12 */ 12 */
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/jiffies.h>
16#include <linux/kernel.h> 15#include <linux/kernel.h>
17#include <linux/init.h> 16#include <linux/init.h>
18#include <linux/list.h>
19#include <linux/spinlock.h>
20#include <linux/device.h> 17#include <linux/device.h>
21#include <linux/sysdev.h>
22#include <linux/timer.h>
23#include <linux/ctype.h> 18#include <linux/ctype.h>
24#include <linux/leds.h> 19#include <linux/leds.h>
25#include <linux/slab.h>
26#include "leds.h" 20#include "leds.h"
27 21
28struct timer_trig_data {
29 int brightness_on; /* LED brightness during "on" period.
30 * (LED_OFF < brightness_on <= LED_FULL)
31 */
32 unsigned long delay_on; /* milliseconds on */
33 unsigned long delay_off; /* milliseconds off */
34 struct timer_list timer;
35};
36
37static void led_timer_function(unsigned long data)
38{
39 struct led_classdev *led_cdev = (struct led_classdev *) data;
40 struct timer_trig_data *timer_data = led_cdev->trigger_data;
41 unsigned long brightness;
42 unsigned long delay;
43
44 if (!timer_data->delay_on || !timer_data->delay_off) {
45 led_set_brightness(led_cdev, LED_OFF);
46 return;
47 }
48
49 brightness = led_get_brightness(led_cdev);
50 if (!brightness) {
51 /* Time to switch the LED on. */
52 brightness = timer_data->brightness_on;
53 delay = timer_data->delay_on;
54 } else {
55 /* Store the current brightness value to be able
56 * to restore it when the delay_off period is over.
57 */
58 timer_data->brightness_on = brightness;
59 brightness = LED_OFF;
60 delay = timer_data->delay_off;
61 }
62
63 led_set_brightness(led_cdev, brightness);
64
65 mod_timer(&timer_data->timer, jiffies + msecs_to_jiffies(delay));
66}
67
68static ssize_t led_delay_on_show(struct device *dev, 22static ssize_t led_delay_on_show(struct device *dev,
69 struct device_attribute *attr, char *buf) 23 struct device_attribute *attr, char *buf)
70{ 24{
71 struct led_classdev *led_cdev = dev_get_drvdata(dev); 25 struct led_classdev *led_cdev = dev_get_drvdata(dev);
72 struct timer_trig_data *timer_data = led_cdev->trigger_data;
73 26
74 return sprintf(buf, "%lu\n", timer_data->delay_on); 27 return sprintf(buf, "%lu\n", led_cdev->blink_delay_on);
75} 28}
76 29
77static ssize_t led_delay_on_store(struct device *dev, 30static ssize_t led_delay_on_store(struct device *dev,
78 struct device_attribute *attr, const char *buf, size_t size) 31 struct device_attribute *attr, const char *buf, size_t size)
79{ 32{
80 struct led_classdev *led_cdev = dev_get_drvdata(dev); 33 struct led_classdev *led_cdev = dev_get_drvdata(dev);
81 struct timer_trig_data *timer_data = led_cdev->trigger_data;
82 int ret = -EINVAL; 34 int ret = -EINVAL;
83 char *after; 35 char *after;
84 unsigned long state = simple_strtoul(buf, &after, 10); 36 unsigned long state = simple_strtoul(buf, &after, 10);
@@ -88,21 +40,7 @@ static ssize_t led_delay_on_store(struct device *dev,
88 count++; 40 count++;
89 41
90 if (count == size) { 42 if (count == size) {
91 if (timer_data->delay_on != state) { 43 led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
92 /* the new value differs from the previous */
93 timer_data->delay_on = state;
94
95 /* deactivate previous settings */
96 del_timer_sync(&timer_data->timer);
97
98 /* try to activate hardware acceleration, if any */
99 if (!led_cdev->blink_set ||
100 led_cdev->blink_set(led_cdev,
101 &timer_data->delay_on, &timer_data->delay_off)) {
102 /* no hardware acceleration, blink via timer */
103 mod_timer(&timer_data->timer, jiffies + 1);
104 }
105 }
106 ret = count; 44 ret = count;
107 } 45 }
108 46
@@ -113,16 +51,14 @@ static ssize_t led_delay_off_show(struct device *dev,
113 struct device_attribute *attr, char *buf) 51 struct device_attribute *attr, char *buf)
114{ 52{
115 struct led_classdev *led_cdev = dev_get_drvdata(dev); 53 struct led_classdev *led_cdev = dev_get_drvdata(dev);
116 struct timer_trig_data *timer_data = led_cdev->trigger_data;
117 54
118 return sprintf(buf, "%lu\n", timer_data->delay_off); 55 return sprintf(buf, "%lu\n", led_cdev->blink_delay_off);
119} 56}
120 57
121static ssize_t led_delay_off_store(struct device *dev, 58static ssize_t led_delay_off_store(struct device *dev,
122 struct device_attribute *attr, const char *buf, size_t size) 59 struct device_attribute *attr, const char *buf, size_t size)
123{ 60{
124 struct led_classdev *led_cdev = dev_get_drvdata(dev); 61 struct led_classdev *led_cdev = dev_get_drvdata(dev);
125 struct timer_trig_data *timer_data = led_cdev->trigger_data;
126 int ret = -EINVAL; 62 int ret = -EINVAL;
127 char *after; 63 char *after;
128 unsigned long state = simple_strtoul(buf, &after, 10); 64 unsigned long state = simple_strtoul(buf, &after, 10);
@@ -132,21 +68,7 @@ static ssize_t led_delay_off_store(struct device *dev,
132 count++; 68 count++;
133 69
134 if (count == size) { 70 if (count == size) {
135 if (timer_data->delay_off != state) { 71 led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
136 /* the new value differs from the previous */
137 timer_data->delay_off = state;
138
139 /* deactivate previous settings */
140 del_timer_sync(&timer_data->timer);
141
142 /* try to activate hardware acceleration, if any */
143 if (!led_cdev->blink_set ||
144 led_cdev->blink_set(led_cdev,
145 &timer_data->delay_on, &timer_data->delay_off)) {
146 /* no hardware acceleration, blink via timer */
147 mod_timer(&timer_data->timer, jiffies + 1);
148 }
149 }
150 ret = count; 72 ret = count;
151 } 73 }
152 74
@@ -158,60 +80,34 @@ static DEVICE_ATTR(delay_off, 0644, led_delay_off_show, led_delay_off_store);
158 80
159static void timer_trig_activate(struct led_classdev *led_cdev) 81static void timer_trig_activate(struct led_classdev *led_cdev)
160{ 82{
161 struct timer_trig_data *timer_data;
162 int rc; 83 int rc;
163 84
164 timer_data = kzalloc(sizeof(struct timer_trig_data), GFP_KERNEL); 85 led_cdev->trigger_data = NULL;
165 if (!timer_data)
166 return;
167
168 timer_data->brightness_on = led_get_brightness(led_cdev);
169 if (timer_data->brightness_on == LED_OFF)
170 timer_data->brightness_on = led_cdev->max_brightness;
171 led_cdev->trigger_data = timer_data;
172
173 init_timer(&timer_data->timer);
174 timer_data->timer.function = led_timer_function;
175 timer_data->timer.data = (unsigned long) led_cdev;
176 86
177 rc = device_create_file(led_cdev->dev, &dev_attr_delay_on); 87 rc = device_create_file(led_cdev->dev, &dev_attr_delay_on);
178 if (rc) 88 if (rc)
179 goto err_out; 89 return;
180 rc = device_create_file(led_cdev->dev, &dev_attr_delay_off); 90 rc = device_create_file(led_cdev->dev, &dev_attr_delay_off);
181 if (rc) 91 if (rc)
182 goto err_out_delayon; 92 goto err_out_delayon;
183 93
184 /* If there is hardware support for blinking, start one 94 led_cdev->trigger_data = (void *)1;
185 * user friendly blink rate chosen by the driver.
186 */
187 if (led_cdev->blink_set)
188 led_cdev->blink_set(led_cdev,
189 &timer_data->delay_on, &timer_data->delay_off);
190 95
191 return; 96 return;
192 97
193err_out_delayon: 98err_out_delayon:
194 device_remove_file(led_cdev->dev, &dev_attr_delay_on); 99 device_remove_file(led_cdev->dev, &dev_attr_delay_on);
195err_out:
196 led_cdev->trigger_data = NULL;
197 kfree(timer_data);
198} 100}
199 101
200static void timer_trig_deactivate(struct led_classdev *led_cdev) 102static void timer_trig_deactivate(struct led_classdev *led_cdev)
201{ 103{
202 struct timer_trig_data *timer_data = led_cdev->trigger_data; 104 if (led_cdev->trigger_data) {
203 unsigned long on = 0, off = 0;
204
205 if (timer_data) {
206 device_remove_file(led_cdev->dev, &dev_attr_delay_on); 105 device_remove_file(led_cdev->dev, &dev_attr_delay_on);
207 device_remove_file(led_cdev->dev, &dev_attr_delay_off); 106 device_remove_file(led_cdev->dev, &dev_attr_delay_off);
208 del_timer_sync(&timer_data->timer);
209 kfree(timer_data);
210 } 107 }
211 108
212 /* If there is hardware support for blinking, stop it */ 109 /* Stop blinking */
213 if (led_cdev->blink_set) 110 led_brightness_set(led_cdev, LED_OFF);
214 led_cdev->blink_set(led_cdev, &on, &off);
215} 111}
216 112
217static struct led_trigger timer_led_trigger = { 113static struct led_trigger timer_led_trigger = {
diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c
index 44469662517..f5f4da3d0b6 100644
--- a/drivers/macintosh/adb-iop.c
+++ b/drivers/macintosh/adb-iop.c
@@ -80,7 +80,7 @@ static void adb_iop_end_req(struct adb_request *req, int state)
80static void adb_iop_complete(struct iop_msg *msg) 80static void adb_iop_complete(struct iop_msg *msg)
81{ 81{
82 struct adb_request *req; 82 struct adb_request *req;
83 uint flags; 83 unsigned long flags;
84 84
85 local_irq_save(flags); 85 local_irq_save(flags);
86 86
@@ -103,7 +103,7 @@ static void adb_iop_listen(struct iop_msg *msg)
103{ 103{
104 struct adb_iopmsg *amsg = (struct adb_iopmsg *) msg->message; 104 struct adb_iopmsg *amsg = (struct adb_iopmsg *) msg->message;
105 struct adb_request *req; 105 struct adb_request *req;
106 uint flags; 106 unsigned long flags;
107#ifdef DEBUG_ADB_IOP 107#ifdef DEBUG_ADB_IOP
108 int i; 108 int i;
109#endif 109#endif
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4e957f3140a..324a3663fcd 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -706,7 +706,7 @@ static struct mdk_personality *find_pers(int level, char *clevel)
706/* return the offset of the super block in 512byte sectors */ 706/* return the offset of the super block in 512byte sectors */
707static inline sector_t calc_dev_sboffset(struct block_device *bdev) 707static inline sector_t calc_dev_sboffset(struct block_device *bdev)
708{ 708{
709 sector_t num_sectors = bdev->bd_inode->i_size / 512; 709 sector_t num_sectors = i_size_read(bdev->bd_inode) / 512;
710 return MD_NEW_SIZE_SECTORS(num_sectors); 710 return MD_NEW_SIZE_SECTORS(num_sectors);
711} 711}
712 712
@@ -1386,7 +1386,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1386 */ 1386 */
1387 switch(minor_version) { 1387 switch(minor_version) {
1388 case 0: 1388 case 0:
1389 sb_start = rdev->bdev->bd_inode->i_size >> 9; 1389 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1390 sb_start -= 8*2; 1390 sb_start -= 8*2;
1391 sb_start &= ~(sector_t)(4*2-1); 1391 sb_start &= ~(sector_t)(4*2-1);
1392 break; 1392 break;
@@ -1472,7 +1472,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1472 ret = 0; 1472 ret = 0;
1473 } 1473 }
1474 if (minor_version) 1474 if (minor_version)
1475 rdev->sectors = (rdev->bdev->bd_inode->i_size >> 9) - 1475 rdev->sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
1476 le64_to_cpu(sb->data_offset); 1476 le64_to_cpu(sb->data_offset);
1477 else 1477 else
1478 rdev->sectors = rdev->sb_start; 1478 rdev->sectors = rdev->sb_start;
@@ -1680,7 +1680,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1680 return 0; /* component must fit device */ 1680 return 0; /* component must fit device */
1681 if (rdev->sb_start < rdev->data_offset) { 1681 if (rdev->sb_start < rdev->data_offset) {
1682 /* minor versions 1 and 2; superblock before data */ 1682 /* minor versions 1 and 2; superblock before data */
1683 max_sectors = rdev->bdev->bd_inode->i_size >> 9; 1683 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
1684 max_sectors -= rdev->data_offset; 1684 max_sectors -= rdev->data_offset;
1685 if (!num_sectors || num_sectors > max_sectors) 1685 if (!num_sectors || num_sectors > max_sectors)
1686 num_sectors = max_sectors; 1686 num_sectors = max_sectors;
@@ -1690,7 +1690,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1690 } else { 1690 } else {
1691 /* minor version 0; superblock after data */ 1691 /* minor version 0; superblock after data */
1692 sector_t sb_start; 1692 sector_t sb_start;
1693 sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2; 1693 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
1694 sb_start &= ~(sector_t)(4*2 - 1); 1694 sb_start &= ~(sector_t)(4*2 - 1);
1695 max_sectors = rdev->sectors + sb_start - rdev->sb_start; 1695 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1696 if (!num_sectors || num_sectors > max_sectors) 1696 if (!num_sectors || num_sectors > max_sectors)
@@ -2584,7 +2584,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2584 if (!sectors) 2584 if (!sectors)
2585 return -EBUSY; 2585 return -EBUSY;
2586 } else if (!sectors) 2586 } else if (!sectors)
2587 sectors = (rdev->bdev->bd_inode->i_size >> 9) - 2587 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
2588 rdev->data_offset; 2588 rdev->data_offset;
2589 } 2589 }
2590 if (sectors < my_mddev->dev_sectors) 2590 if (sectors < my_mddev->dev_sectors)
@@ -2797,7 +2797,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
2797 2797
2798 kobject_init(&rdev->kobj, &rdev_ktype); 2798 kobject_init(&rdev->kobj, &rdev_ktype);
2799 2799
2800 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 2800 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
2801 if (!size) { 2801 if (!size) {
2802 printk(KERN_WARNING 2802 printk(KERN_WARNING
2803 "md: %s has zero or unknown size, marking faulty!\n", 2803 "md: %s has zero or unknown size, marking faulty!\n",
@@ -5235,8 +5235,8 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
5235 5235
5236 if (!mddev->persistent) { 5236 if (!mddev->persistent) {
5237 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 5237 printk(KERN_INFO "md: nonpersistent superblock ...\n");
5238 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; 5238 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
5239 } else 5239 } else
5240 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 5240 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
5241 rdev->sectors = rdev->sb_start; 5241 rdev->sectors = rdev->sb_start;
5242 5242
@@ -5306,7 +5306,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
5306 if (mddev->persistent) 5306 if (mddev->persistent)
5307 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 5307 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
5308 else 5308 else
5309 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; 5309 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
5310 5310
5311 rdev->sectors = rdev->sb_start; 5311 rdev->sectors = rdev->sb_start;
5312 5312
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index bad2cedb8d9..a28541b2b1a 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -19,7 +19,6 @@ comment "Multimedia core support"
19 19
20config VIDEO_DEV 20config VIDEO_DEV
21 tristate "Video For Linux" 21 tristate "Video For Linux"
22 depends on BKL # used in many drivers for ioctl handling, need to kill
23 ---help--- 22 ---help---
24 V4L core support for video capture and overlay devices, webcams and 23 V4L core support for video capture and overlay devices, webcams and
25 AM/FM radio cards. 24 AM/FM radio cards.
diff --git a/drivers/media/common/saa7146_i2c.c b/drivers/media/common/saa7146_i2c.c
index 3d88542612e..74ee172b5bc 100644
--- a/drivers/media/common/saa7146_i2c.c
+++ b/drivers/media/common/saa7146_i2c.c
@@ -391,7 +391,6 @@ static int saa7146_i2c_xfer(struct i2c_adapter* adapter, struct i2c_msg *msg, in
391 391
392/*****************************************************************************/ 392/*****************************************************************************/
393/* i2c-adapter helper functions */ 393/* i2c-adapter helper functions */
394#include <linux/i2c-id.h>
395 394
396/* exported algorithm data */ 395/* exported algorithm data */
397static struct i2c_algorithm saa7146_algo = { 396static struct i2c_algorithm saa7146_algo = {
diff --git a/drivers/media/dvb/frontends/dibx000_common.c b/drivers/media/dvb/frontends/dibx000_common.c
index a4991026254..2311c0a3406 100644
--- a/drivers/media/dvb/frontends/dibx000_common.c
+++ b/drivers/media/dvb/frontends/dibx000_common.c
@@ -130,6 +130,7 @@ static int i2c_adapter_init(struct i2c_adapter *i2c_adap,
130 struct dibx000_i2c_master *mst) 130 struct dibx000_i2c_master *mst)
131{ 131{
132 strncpy(i2c_adap->name, name, sizeof(i2c_adap->name)); 132 strncpy(i2c_adap->name, name, sizeof(i2c_adap->name));
133 i2c_adap->algo = algo;
133 i2c_adap->algo_data = NULL; 134 i2c_adap->algo_data = NULL;
134 i2c_set_adapdata(i2c_adap, mst); 135 i2c_set_adapdata(i2c_adap, mst);
135 if (i2c_add_adapter(i2c_adap) < 0) 136 if (i2c_add_adapter(i2c_adap) < 0)
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index 2934770dacc..7bc36670071 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -2065,8 +2065,9 @@ static int cafe_pci_probe(struct pci_dev *pdev,
2065 sensor_cfg.clock_speed = 45; 2065 sensor_cfg.clock_speed = 45;
2066 2066
2067 cam->sensor_addr = 0x42; 2067 cam->sensor_addr = 0x42;
2068 cam->sensor = v4l2_i2c_new_subdev(&cam->v4l2_dev, &cam->i2c_adapter, 2068 cam->sensor = v4l2_i2c_new_subdev_cfg(&cam->v4l2_dev, &cam->i2c_adapter,
2069 NULL, "ov7670", cam->sensor_addr, NULL); 2069 "ov7670", "ov7670", 0, &sensor_cfg, cam->sensor_addr,
2070 NULL);
2070 if (cam->sensor == NULL) { 2071 if (cam->sensor == NULL) {
2071 ret = -ENODEV; 2072 ret = -ENODEV;
2072 goto out_smbus; 2073 goto out_smbus;
diff --git a/drivers/media/video/cx231xx/cx231xx-417.c b/drivers/media/video/cx231xx/cx231xx-417.c
index aab21f3ce47..4c7cac3b625 100644
--- a/drivers/media/video/cx231xx/cx231xx-417.c
+++ b/drivers/media/video/cx231xx/cx231xx-417.c
@@ -31,7 +31,6 @@
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/device.h> 32#include <linux/device.h>
33#include <linux/firmware.h> 33#include <linux/firmware.h>
34#include <linux/smp_lock.h>
35#include <linux/vmalloc.h> 34#include <linux/vmalloc.h>
36#include <media/v4l2-common.h> 35#include <media/v4l2-common.h>
37#include <media/v4l2-ioctl.h> 36#include <media/v4l2-ioctl.h>
@@ -1927,10 +1926,9 @@ static int mpeg_open(struct file *file)
1927 dev = h; 1926 dev = h;
1928 } 1927 }
1929 1928
1930 if (dev == NULL) { 1929 if (dev == NULL)
1931 unlock_kernel();
1932 return -ENODEV; 1930 return -ENODEV;
1933 } 1931
1934 mutex_lock(&dev->lock); 1932 mutex_lock(&dev->lock);
1935 1933
1936 /* allocate + initialize per filehandle data */ 1934 /* allocate + initialize per filehandle data */
diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c
index a6cc12f8736..9a98dc55f65 100644
--- a/drivers/media/video/cx23885/cx23885-417.c
+++ b/drivers/media/video/cx23885/cx23885-417.c
@@ -31,7 +31,6 @@
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/device.h> 32#include <linux/device.h>
33#include <linux/firmware.h> 33#include <linux/firmware.h>
34#include <linux/smp_lock.h>
35#include <linux/slab.h> 34#include <linux/slab.h>
36#include <media/v4l2-common.h> 35#include <media/v4l2-common.h>
37#include <media/v4l2-ioctl.h> 36#include <media/v4l2-ioctl.h>
@@ -1576,12 +1575,8 @@ static int mpeg_open(struct file *file)
1576 1575
1577 /* allocate + initialize per filehandle data */ 1576 /* allocate + initialize per filehandle data */
1578 fh = kzalloc(sizeof(*fh), GFP_KERNEL); 1577 fh = kzalloc(sizeof(*fh), GFP_KERNEL);
1579 if (NULL == fh) { 1578 if (!fh)
1580 unlock_kernel();
1581 return -ENOMEM; 1579 return -ENOMEM;
1582 }
1583
1584 lock_kernel();
1585 1580
1586 file->private_data = fh; 1581 file->private_data = fh;
1587 fh->dev = dev; 1582 fh->dev = dev;
@@ -1592,8 +1587,6 @@ static int mpeg_open(struct file *file)
1592 V4L2_FIELD_INTERLACED, 1587 V4L2_FIELD_INTERLACED,
1593 sizeof(struct cx23885_buffer), 1588 sizeof(struct cx23885_buffer),
1594 fh, NULL); 1589 fh, NULL);
1595 unlock_kernel();
1596
1597 return 0; 1590 return 0;
1598} 1591}
1599 1592
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index 93af9c65b48..3cc9f462d08 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -26,7 +26,6 @@
26#include <linux/kmod.h> 26#include <linux/kmod.h>
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/smp_lock.h>
30#include <linux/interrupt.h> 29#include <linux/interrupt.h>
31#include <linux/delay.h> 30#include <linux/delay.h>
32#include <linux/kthread.h> 31#include <linux/kthread.h>
@@ -743,8 +742,6 @@ static int video_open(struct file *file)
743 if (NULL == fh) 742 if (NULL == fh)
744 return -ENOMEM; 743 return -ENOMEM;
745 744
746 lock_kernel();
747
748 file->private_data = fh; 745 file->private_data = fh;
749 fh->dev = dev; 746 fh->dev = dev;
750 fh->radio = radio; 747 fh->radio = radio;
@@ -762,8 +759,6 @@ static int video_open(struct file *file)
762 759
763 dprintk(1, "post videobuf_queue_init()\n"); 760 dprintk(1, "post videobuf_queue_init()\n");
764 761
765 unlock_kernel();
766
767 return 0; 762 return 0;
768} 763}
769 764
diff --git a/drivers/media/video/imx074.c b/drivers/media/video/imx074.c
index 380e459f899..27b5dfdfbb9 100644
--- a/drivers/media/video/imx074.c
+++ b/drivers/media/video/imx074.c
@@ -451,7 +451,6 @@ static int imx074_probe(struct i2c_client *client,
451 ret = imx074_video_probe(icd, client); 451 ret = imx074_video_probe(icd, client);
452 if (ret < 0) { 452 if (ret < 0) {
453 icd->ops = NULL; 453 icd->ops = NULL;
454 i2c_set_clientdata(client, NULL);
455 kfree(priv); 454 kfree(priv);
456 return ret; 455 return ret;
457 } 456 }
@@ -468,7 +467,6 @@ static int imx074_remove(struct i2c_client *client)
468 icd->ops = NULL; 467 icd->ops = NULL;
469 if (icl->free_bus) 468 if (icl->free_bus)
470 icl->free_bus(icl); 469 icl->free_bus(icl);
471 i2c_set_clientdata(client, NULL);
472 client->driver = NULL; 470 client->driver = NULL;
473 kfree(priv); 471 kfree(priv);
474 472
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index 5a000c65ae9..ce4a7537590 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -44,7 +44,6 @@
44#include <linux/errno.h> 44#include <linux/errno.h>
45#include <linux/slab.h> 45#include <linux/slab.h>
46#include <linux/i2c.h> 46#include <linux/i2c.h>
47#include <linux/i2c-id.h>
48#include <linux/workqueue.h> 47#include <linux/workqueue.h>
49 48
50#include <media/ir-core.h> 49#include <media/ir-core.h>
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c
index 4a27862da30..072bd2d1cfa 100644
--- a/drivers/media/video/mx2_camera.c
+++ b/drivers/media/video/mx2_camera.c
@@ -31,6 +31,7 @@
31 31
32#include <media/v4l2-common.h> 32#include <media/v4l2-common.h>
33#include <media/v4l2-dev.h> 33#include <media/v4l2-dev.h>
34#include <media/videobuf-core.h>
34#include <media/videobuf-dma-contig.h> 35#include <media/videobuf-dma-contig.h>
35#include <media/soc_camera.h> 36#include <media/soc_camera.h>
36#include <media/soc_mediabus.h> 37#include <media/soc_mediabus.h>
@@ -903,8 +904,6 @@ static int mx2_camera_set_crop(struct soc_camera_device *icd,
903static int mx2_camera_set_fmt(struct soc_camera_device *icd, 904static int mx2_camera_set_fmt(struct soc_camera_device *icd,
904 struct v4l2_format *f) 905 struct v4l2_format *f)
905{ 906{
906 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
907 struct mx2_camera_dev *pcdev = ici->priv;
908 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 907 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
909 const struct soc_camera_format_xlate *xlate; 908 const struct soc_camera_format_xlate *xlate;
910 struct v4l2_pix_format *pix = &f->fmt.pix; 909 struct v4l2_pix_format *pix = &f->fmt.pix;
@@ -943,8 +942,6 @@ static int mx2_camera_set_fmt(struct soc_camera_device *icd,
943static int mx2_camera_try_fmt(struct soc_camera_device *icd, 942static int mx2_camera_try_fmt(struct soc_camera_device *icd,
944 struct v4l2_format *f) 943 struct v4l2_format *f)
945{ 944{
946 struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
947 struct mx2_camera_dev *pcdev = ici->priv;
948 struct v4l2_subdev *sd = soc_camera_to_subdev(icd); 945 struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
949 const struct soc_camera_format_xlate *xlate; 946 const struct soc_camera_format_xlate *xlate;
950 struct v4l2_pix_format *pix = &f->fmt.pix; 947 struct v4l2_pix_format *pix = &f->fmt.pix;
@@ -1024,13 +1021,13 @@ static int mx2_camera_querycap(struct soc_camera_host *ici,
1024 return 0; 1021 return 0;
1025} 1022}
1026 1023
1027static int mx2_camera_reqbufs(struct soc_camera_file *icf, 1024static int mx2_camera_reqbufs(struct soc_camera_device *icd,
1028 struct v4l2_requestbuffers *p) 1025 struct v4l2_requestbuffers *p)
1029{ 1026{
1030 int i; 1027 int i;
1031 1028
1032 for (i = 0; i < p->count; i++) { 1029 for (i = 0; i < p->count; i++) {
1033 struct mx2_buffer *buf = container_of(icf->vb_vidq.bufs[i], 1030 struct mx2_buffer *buf = container_of(icd->vb_vidq.bufs[i],
1034 struct mx2_buffer, vb); 1031 struct mx2_buffer, vb);
1035 INIT_LIST_HEAD(&buf->vb.queue); 1032 INIT_LIST_HEAD(&buf->vb.queue);
1036 } 1033 }
@@ -1151,9 +1148,9 @@ err_out:
1151 1148
1152static unsigned int mx2_camera_poll(struct file *file, poll_table *pt) 1149static unsigned int mx2_camera_poll(struct file *file, poll_table *pt)
1153{ 1150{
1154 struct soc_camera_file *icf = file->private_data; 1151 struct soc_camera_device *icd = file->private_data;
1155 1152
1156 return videobuf_poll_stream(file, &icf->vb_vidq, pt); 1153 return videobuf_poll_stream(file, &icd->vb_vidq, pt);
1157} 1154}
1158 1155
1159static struct soc_camera_host_ops mx2_soc_camera_host_ops = { 1156static struct soc_camera_host_ops mx2_soc_camera_host_ops = {
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index 29c5fc34813..aa871c2936b 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -27,6 +27,7 @@
27 27
28#include <mach/ipu.h> 28#include <mach/ipu.h>
29#include <mach/mx3_camera.h> 29#include <mach/mx3_camera.h>
30#include <mach/dma.h>
30 31
31#define MX3_CAM_DRV_NAME "mx3-camera" 32#define MX3_CAM_DRV_NAME "mx3-camera"
32 33
@@ -638,6 +639,9 @@ static bool chan_filter(struct dma_chan *chan, void *arg)
638 struct dma_chan_request *rq = arg; 639 struct dma_chan_request *rq = arg;
639 struct mx3_camera_pdata *pdata; 640 struct mx3_camera_pdata *pdata;
640 641
642 if (!imx_dma_is_ipu(chan))
643 return false;
644
641 if (!rq) 645 if (!rq)
642 return false; 646 return false;
643 647
diff --git a/drivers/media/video/omap1_camera.c b/drivers/media/video/omap1_camera.c
index 7c30e62b50d..cbfd07f2d9d 100644
--- a/drivers/media/video/omap1_camera.c
+++ b/drivers/media/video/omap1_camera.c
@@ -235,7 +235,7 @@ static void free_buffer(struct videobuf_queue *vq, struct omap1_cam_buf *buf,
235 235
236 BUG_ON(in_interrupt()); 236 BUG_ON(in_interrupt());
237 237
238 videobuf_waiton(vb, 0, 0); 238 videobuf_waiton(vq, vb, 0, 0);
239 239
240 if (vb_mode == OMAP1_CAM_DMA_CONTIG) { 240 if (vb_mode == OMAP1_CAM_DMA_CONTIG) {
241 videobuf_dma_contig_free(vq, vb); 241 videobuf_dma_contig_free(vq, vb);
@@ -504,7 +504,7 @@ static void omap1_videobuf_queue(struct videobuf_queue *vq,
504 * empty. Since the transfer of the DMA programming register set 504 * empty. Since the transfer of the DMA programming register set
505 * content to the DMA working register set is done automatically 505 * content to the DMA working register set is done automatically
506 * by the DMA hardware, this can pretty well happen while we 506 * by the DMA hardware, this can pretty well happen while we
507 * are keeping the lock here. Levae fetching it from the queue 507 * are keeping the lock here. Leave fetching it from the queue
508 * to be done when a next DMA interrupt occures instead. 508 * to be done when a next DMA interrupt occures instead.
509 */ 509 */
510 return; 510 return;
@@ -1365,12 +1365,12 @@ static void omap1_cam_init_videobuf(struct videobuf_queue *q,
1365 videobuf_queue_dma_contig_init(q, &omap1_videobuf_ops, 1365 videobuf_queue_dma_contig_init(q, &omap1_videobuf_ops,
1366 icd->dev.parent, &pcdev->lock, 1366 icd->dev.parent, &pcdev->lock,
1367 V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE, 1367 V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
1368 sizeof(struct omap1_cam_buf), icd); 1368 sizeof(struct omap1_cam_buf), icd, NULL);
1369 else 1369 else
1370 videobuf_queue_sg_init(q, &omap1_videobuf_ops, 1370 videobuf_queue_sg_init(q, &omap1_videobuf_ops,
1371 icd->dev.parent, &pcdev->lock, 1371 icd->dev.parent, &pcdev->lock,
1372 V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE, 1372 V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
1373 sizeof(struct omap1_cam_buf), icd); 1373 sizeof(struct omap1_cam_buf), icd, NULL);
1374 1374
1375 /* use videobuf mode (auto)selected with the module parameter */ 1375 /* use videobuf mode (auto)selected with the module parameter */
1376 pcdev->vb_mode = sg_mode ? OMAP1_CAM_DMA_SG : OMAP1_CAM_DMA_CONTIG; 1376 pcdev->vb_mode = sg_mode ? OMAP1_CAM_DMA_SG : OMAP1_CAM_DMA_CONTIG;
@@ -1386,7 +1386,7 @@ static void omap1_cam_init_videobuf(struct videobuf_queue *q,
1386 } 1386 }
1387} 1387}
1388 1388
1389static int omap1_cam_reqbufs(struct soc_camera_file *icf, 1389static int omap1_cam_reqbufs(struct soc_camera_device *icd,
1390 struct v4l2_requestbuffers *p) 1390 struct v4l2_requestbuffers *p)
1391{ 1391{
1392 int i; 1392 int i;
@@ -1398,7 +1398,7 @@ static int omap1_cam_reqbufs(struct soc_camera_file *icf,
1398 * it hadn't triggered 1398 * it hadn't triggered
1399 */ 1399 */
1400 for (i = 0; i < p->count; i++) { 1400 for (i = 0; i < p->count; i++) {
1401 struct omap1_cam_buf *buf = container_of(icf->vb_vidq.bufs[i], 1401 struct omap1_cam_buf *buf = container_of(icd->vb_vidq.bufs[i],
1402 struct omap1_cam_buf, vb); 1402 struct omap1_cam_buf, vb);
1403 buf->inwork = 0; 1403 buf->inwork = 0;
1404 INIT_LIST_HEAD(&buf->vb.queue); 1404 INIT_LIST_HEAD(&buf->vb.queue);
@@ -1485,10 +1485,10 @@ static int omap1_cam_set_bus_param(struct soc_camera_device *icd,
1485 1485
1486static unsigned int omap1_cam_poll(struct file *file, poll_table *pt) 1486static unsigned int omap1_cam_poll(struct file *file, poll_table *pt)
1487{ 1487{
1488 struct soc_camera_file *icf = file->private_data; 1488 struct soc_camera_device *icd = file->private_data;
1489 struct omap1_cam_buf *buf; 1489 struct omap1_cam_buf *buf;
1490 1490
1491 buf = list_entry(icf->vb_vidq.stream.next, struct omap1_cam_buf, 1491 buf = list_entry(icd->vb_vidq.stream.next, struct omap1_cam_buf,
1492 vb.stream); 1492 vb.stream);
1493 1493
1494 poll_wait(file, &buf->vb.done, pt); 1494 poll_wait(file, &buf->vb.done, pt);
diff --git a/drivers/media/video/ov6650.c b/drivers/media/video/ov6650.c
index b7cfeab0948..cf93de98806 100644
--- a/drivers/media/video/ov6650.c
+++ b/drivers/media/video/ov6650.c
@@ -754,7 +754,7 @@ static int ov6650_g_fmt(struct v4l2_subdev *sd,
754 754
755static bool is_unscaled_ok(int width, int height, struct v4l2_rect *rect) 755static bool is_unscaled_ok(int width, int height, struct v4l2_rect *rect)
756{ 756{
757 return (width > rect->width >> 1 || height > rect->height >> 1); 757 return width > rect->width >> 1 || height > rect->height >> 1;
758} 758}
759 759
760static u8 to_clkrc(struct v4l2_fract *timeperframe, 760static u8 to_clkrc(struct v4l2_fract *timeperframe,
@@ -840,8 +840,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
840 coma_mask |= COMA_BW | COMA_BYTE_SWAP | COMA_WORD_SWAP; 840 coma_mask |= COMA_BW | COMA_BYTE_SWAP | COMA_WORD_SWAP;
841 coma_set |= COMA_RAW_RGB | COMA_RGB; 841 coma_set |= COMA_RAW_RGB | COMA_RGB;
842 break; 842 break;
843 case 0:
844 break;
845 default: 843 default:
846 dev_err(&client->dev, "Pixel format not handled: 0x%x\n", code); 844 dev_err(&client->dev, "Pixel format not handled: 0x%x\n", code);
847 return -EINVAL; 845 return -EINVAL;
@@ -1176,7 +1174,6 @@ static int ov6650_probe(struct i2c_client *client,
1176 1174
1177 if (ret) { 1175 if (ret) {
1178 icd->ops = NULL; 1176 icd->ops = NULL;
1179 i2c_set_clientdata(client, NULL);
1180 kfree(priv); 1177 kfree(priv);
1181 } 1178 }
1182 1179
@@ -1187,7 +1184,6 @@ static int ov6650_remove(struct i2c_client *client)
1187{ 1184{
1188 struct ov6650 *priv = to_ov6650(client); 1185 struct ov6650 *priv = to_ov6650(client);
1189 1186
1190 i2c_set_clientdata(client, NULL);
1191 kfree(priv); 1187 kfree(priv);
1192 return 0; 1188 return 0;
1193} 1189}
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index 10a6cbf6a79..0911cb580e1 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -6661,6 +6661,18 @@ struct pci_device_id saa7134_pci_tbl[] = {
6661 .subdevice = 0x2804, 6661 .subdevice = 0x2804,
6662 .driver_data = SAA7134_BOARD_TECHNOTREND_BUDGET_T3000, 6662 .driver_data = SAA7134_BOARD_TECHNOTREND_BUDGET_T3000,
6663 }, { 6663 }, {
6664 .vendor = PCI_VENDOR_ID_PHILIPS,
6665 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
6666 .subvendor = 0x5ace, /* Beholder Intl. Ltd. */
6667 .subdevice = 0x7190,
6668 .driver_data = SAA7134_BOARD_BEHOLD_H7,
6669 }, {
6670 .vendor = PCI_VENDOR_ID_PHILIPS,
6671 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
6672 .subvendor = 0x5ace, /* Beholder Intl. Ltd. */
6673 .subdevice = 0x7090,
6674 .driver_data = SAA7134_BOARD_BEHOLD_A7,
6675 }, {
6664 /* --- boards without eeprom + subsystem ID --- */ 6676 /* --- boards without eeprom + subsystem ID --- */
6665 .vendor = PCI_VENDOR_ID_PHILIPS, 6677 .vendor = PCI_VENDOR_ID_PHILIPS,
6666 .device = PCI_DEVICE_ID_PHILIPS_SAA7134, 6678 .device = PCI_DEVICE_ID_PHILIPS_SAA7134,
@@ -6698,18 +6710,6 @@ struct pci_device_id saa7134_pci_tbl[] = {
6698 .subvendor = PCI_ANY_ID, 6710 .subvendor = PCI_ANY_ID,
6699 .subdevice = PCI_ANY_ID, 6711 .subdevice = PCI_ANY_ID,
6700 .driver_data = SAA7134_BOARD_UNKNOWN, 6712 .driver_data = SAA7134_BOARD_UNKNOWN,
6701 }, {
6702 .vendor = PCI_VENDOR_ID_PHILIPS,
6703 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
6704 .subvendor = 0x5ace, /* Beholder Intl. Ltd. */
6705 .subdevice = 0x7190,
6706 .driver_data = SAA7134_BOARD_BEHOLD_H7,
6707 }, {
6708 .vendor = PCI_VENDOR_ID_PHILIPS,
6709 .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
6710 .subvendor = 0x5ace, /* Beholder Intl. Ltd. */
6711 .subdevice = 0x7090,
6712 .driver_data = SAA7134_BOARD_BEHOLD_A7,
6713 },{ 6713 },{
6714 /* --- end of list --- */ 6714 /* --- end of list --- */
6715 } 6715 }
diff --git a/drivers/media/video/se401.c b/drivers/media/video/se401.c
index 41d0166c0f9..41360d7c3e9 100644
--- a/drivers/media/video/se401.c
+++ b/drivers/media/video/se401.c
@@ -31,7 +31,6 @@ static const char version[] = "0.24";
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/vmalloc.h> 32#include <linux/vmalloc.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/smp_lock.h>
35#include <linux/pagemap.h> 34#include <linux/pagemap.h>
36#include <linux/usb.h> 35#include <linux/usb.h>
37#include "se401.h" 36#include "se401.h"
@@ -951,9 +950,9 @@ static int se401_open(struct file *file)
951 struct usb_se401 *se401 = (struct usb_se401 *)dev; 950 struct usb_se401 *se401 = (struct usb_se401 *)dev;
952 int err = 0; 951 int err = 0;
953 952
954 lock_kernel(); 953 mutex_lock(&se401->lock);
955 if (se401->user) { 954 if (se401->user) {
956 unlock_kernel(); 955 mutex_unlock(&se401->lock);
957 return -EBUSY; 956 return -EBUSY;
958 } 957 }
959 se401->fbuf = rvmalloc(se401->maxframesize * SE401_NUMFRAMES); 958 se401->fbuf = rvmalloc(se401->maxframesize * SE401_NUMFRAMES);
@@ -962,7 +961,7 @@ static int se401_open(struct file *file)
962 else 961 else
963 err = -ENOMEM; 962 err = -ENOMEM;
964 se401->user = !err; 963 se401->user = !err;
965 unlock_kernel(); 964 mutex_unlock(&se401->lock);
966 965
967 return err; 966 return err;
968} 967}
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index f07a0f6b71c..b5afe5f841c 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -27,7 +27,6 @@
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/errno.h> 28#include <linux/errno.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/smp_lock.h>
31 30
32#include <linux/usb.h> 31#include <linux/usb.h>
33#include <linux/mm.h> 32#include <linux/mm.h>
@@ -673,14 +672,11 @@ static int v4l_stk_open(struct file *fp)
673 vdev = video_devdata(fp); 672 vdev = video_devdata(fp);
674 dev = vdev_to_camera(vdev); 673 dev = vdev_to_camera(vdev);
675 674
676 lock_kernel();
677 if (dev == NULL || !is_present(dev)) { 675 if (dev == NULL || !is_present(dev)) {
678 unlock_kernel();
679 return -ENXIO; 676 return -ENXIO;
680 } 677 }
681 fp->private_data = dev; 678 fp->private_data = dev;
682 usb_autopm_get_interface(dev->interface); 679 usb_autopm_get_interface(dev->interface);
683 unlock_kernel();
684 680
685 return 0; 681 return 0;
686} 682}
diff --git a/drivers/media/video/tlg2300/pd-main.c b/drivers/media/video/tlg2300/pd-main.c
index 4555f4a5f4c..c91424c0c13 100644
--- a/drivers/media/video/tlg2300/pd-main.c
+++ b/drivers/media/video/tlg2300/pd-main.c
@@ -36,7 +36,6 @@
36#include <linux/string.h> 36#include <linux/string.h>
37#include <linux/types.h> 37#include <linux/types.h>
38#include <linux/firmware.h> 38#include <linux/firmware.h>
39#include <linux/smp_lock.h>
40 39
41#include "vendorcmds.h" 40#include "vendorcmds.h"
42#include "pd-common.h" 41#include "pd-common.h"
@@ -485,15 +484,11 @@ static void poseidon_disconnect(struct usb_interface *interface)
485 /*unregister v4l2 device */ 484 /*unregister v4l2 device */
486 v4l2_device_unregister(&pd->v4l2_dev); 485 v4l2_device_unregister(&pd->v4l2_dev);
487 486
488 lock_kernel(); 487 pd_dvb_usb_device_exit(pd);
489 { 488 poseidon_fm_exit(pd);
490 pd_dvb_usb_device_exit(pd);
491 poseidon_fm_exit(pd);
492 489
493 poseidon_audio_free(pd); 490 poseidon_audio_free(pd);
494 pd_video_exit(pd); 491 pd_video_exit(pd);
495 }
496 unlock_kernel();
497 492
498 usb_set_intfdata(interface, NULL); 493 usb_set_intfdata(interface, NULL);
499 kref_put(&pd->kref, poseidon_delete); 494 kref_put(&pd->kref, poseidon_delete);
diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c
index 5d6fd01f918..dc17cce2fbb 100644
--- a/drivers/media/video/usbvideo/vicam.c
+++ b/drivers/media/video/usbvideo/vicam.c
@@ -43,7 +43,6 @@
43#include <linux/vmalloc.h> 43#include <linux/vmalloc.h>
44#include <linux/mm.h> 44#include <linux/mm.h>
45#include <linux/slab.h> 45#include <linux/slab.h>
46#include <linux/smp_lock.h>
47#include <linux/mutex.h> 46#include <linux/mutex.h>
48#include <linux/firmware.h> 47#include <linux/firmware.h>
49#include <linux/ihex.h> 48#include <linux/ihex.h>
@@ -483,29 +482,28 @@ vicam_open(struct file *file)
483 return -EINVAL; 482 return -EINVAL;
484 } 483 }
485 484
486 /* the videodev_lock held above us protects us from 485 /* cam_lock/open_count protects us from simultaneous opens
487 * simultaneous opens...for now. we probably shouldn't 486 * ... for now. we probably shouldn't rely on this fact forever.
488 * rely on this fact forever.
489 */ 487 */
490 488
491 lock_kernel(); 489 mutex_lock(&cam->cam_lock);
492 if (cam->open_count > 0) { 490 if (cam->open_count > 0) {
493 printk(KERN_INFO 491 printk(KERN_INFO
494 "vicam_open called on already opened camera"); 492 "vicam_open called on already opened camera");
495 unlock_kernel(); 493 mutex_unlock(&cam->cam_lock);
496 return -EBUSY; 494 return -EBUSY;
497 } 495 }
498 496
499 cam->raw_image = kmalloc(VICAM_MAX_READ_SIZE, GFP_KERNEL); 497 cam->raw_image = kmalloc(VICAM_MAX_READ_SIZE, GFP_KERNEL);
500 if (!cam->raw_image) { 498 if (!cam->raw_image) {
501 unlock_kernel(); 499 mutex_unlock(&cam->cam_lock);
502 return -ENOMEM; 500 return -ENOMEM;
503 } 501 }
504 502
505 cam->framebuf = rvmalloc(VICAM_MAX_FRAME_SIZE * VICAM_FRAMES); 503 cam->framebuf = rvmalloc(VICAM_MAX_FRAME_SIZE * VICAM_FRAMES);
506 if (!cam->framebuf) { 504 if (!cam->framebuf) {
507 kfree(cam->raw_image); 505 kfree(cam->raw_image);
508 unlock_kernel(); 506 mutex_unlock(&cam->cam_lock);
509 return -ENOMEM; 507 return -ENOMEM;
510 } 508 }
511 509
@@ -513,10 +511,17 @@ vicam_open(struct file *file)
513 if (!cam->cntrlbuf) { 511 if (!cam->cntrlbuf) {
514 kfree(cam->raw_image); 512 kfree(cam->raw_image);
515 rvfree(cam->framebuf, VICAM_MAX_FRAME_SIZE * VICAM_FRAMES); 513 rvfree(cam->framebuf, VICAM_MAX_FRAME_SIZE * VICAM_FRAMES);
516 unlock_kernel(); 514 mutex_unlock(&cam->cam_lock);
517 return -ENOMEM; 515 return -ENOMEM;
518 } 516 }
519 517
518 cam->needsDummyRead = 1;
519 cam->open_count++;
520
521 file->private_data = cam;
522 mutex_unlock(&cam->cam_lock);
523
524
520 // First upload firmware, then turn the camera on 525 // First upload firmware, then turn the camera on
521 526
522 if (!cam->is_initialized) { 527 if (!cam->is_initialized) {
@@ -527,12 +532,6 @@ vicam_open(struct file *file)
527 532
528 set_camera_power(cam, 1); 533 set_camera_power(cam, 1);
529 534
530 cam->needsDummyRead = 1;
531 cam->open_count++;
532
533 file->private_data = cam;
534 unlock_kernel();
535
536 return 0; 535 return 0;
537} 536}
538 537
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 0ca7978654b..03f7f4670e9 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -25,7 +25,6 @@
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/kmod.h> 26#include <linux/kmod.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/smp_lock.h>
29#include <asm/uaccess.h> 28#include <asm/uaccess.h>
30#include <asm/system.h> 29#include <asm/system.h>
31 30
@@ -247,10 +246,12 @@ static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
247 mutex_unlock(vdev->lock); 246 mutex_unlock(vdev->lock);
248 } else if (vdev->fops->ioctl) { 247 } else if (vdev->fops->ioctl) {
249 /* TODO: convert all drivers to unlocked_ioctl */ 248 /* TODO: convert all drivers to unlocked_ioctl */
250 lock_kernel(); 249 static DEFINE_MUTEX(v4l2_ioctl_mutex);
250
251 mutex_lock(&v4l2_ioctl_mutex);
251 if (video_is_registered(vdev)) 252 if (video_is_registered(vdev))
252 ret = vdev->fops->ioctl(filp, cmd, arg); 253 ret = vdev->fops->ioctl(filp, cmd, arg);
253 unlock_kernel(); 254 mutex_unlock(&v4l2_ioctl_mutex);
254 } else 255 } else
255 ret = -ENOTTY; 256 ret = -ENOTTY;
256 257
diff --git a/drivers/media/video/zoran/zoran.h b/drivers/media/video/zoran/zoran.h
index 37fe16181e3..27f05551183 100644
--- a/drivers/media/video/zoran/zoran.h
+++ b/drivers/media/video/zoran/zoran.h
@@ -388,6 +388,7 @@ struct zoran {
388 struct videocodec *vfe; /* video front end */ 388 struct videocodec *vfe; /* video front end */
389 389
390 struct mutex resource_lock; /* prevent evil stuff */ 390 struct mutex resource_lock; /* prevent evil stuff */
391 struct mutex other_lock; /* please merge with above */
391 392
392 u8 initialized; /* flag if zoran has been correctly initialized */ 393 u8 initialized; /* flag if zoran has been correctly initialized */
393 int user; /* number of current users */ 394 int user; /* number of current users */
diff --git a/drivers/media/video/zoran/zoran_card.c b/drivers/media/video/zoran/zoran_card.c
index 0aac376c3f7..7e6d62467ea 100644
--- a/drivers/media/video/zoran/zoran_card.c
+++ b/drivers/media/video/zoran/zoran_card.c
@@ -1227,6 +1227,7 @@ static int __devinit zoran_probe(struct pci_dev *pdev,
1227 snprintf(ZR_DEVNAME(zr), sizeof(ZR_DEVNAME(zr)), "MJPEG[%u]", zr->id); 1227 snprintf(ZR_DEVNAME(zr), sizeof(ZR_DEVNAME(zr)), "MJPEG[%u]", zr->id);
1228 spin_lock_init(&zr->spinlock); 1228 spin_lock_init(&zr->spinlock);
1229 mutex_init(&zr->resource_lock); 1229 mutex_init(&zr->resource_lock);
1230 mutex_init(&zr->other_lock);
1230 if (pci_enable_device(pdev)) 1231 if (pci_enable_device(pdev))
1231 goto zr_unreg; 1232 goto zr_unreg;
1232 pci_read_config_byte(zr->pci_dev, PCI_CLASS_REVISION, &zr->revision); 1233 pci_read_config_byte(zr->pci_dev, PCI_CLASS_REVISION, &zr->revision);
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index 401082b853f..67a52e844ae 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -49,7 +49,6 @@
49#include <linux/module.h> 49#include <linux/module.h>
50#include <linux/delay.h> 50#include <linux/delay.h>
51#include <linux/slab.h> 51#include <linux/slab.h>
52#include <linux/smp_lock.h>
53#include <linux/pci.h> 52#include <linux/pci.h>
54#include <linux/vmalloc.h> 53#include <linux/vmalloc.h>
55#include <linux/wait.h> 54#include <linux/wait.h>
@@ -913,7 +912,7 @@ static int zoran_open(struct file *file)
913 dprintk(2, KERN_INFO "%s: %s(%s, pid=[%d]), users(-)=%d\n", 912 dprintk(2, KERN_INFO "%s: %s(%s, pid=[%d]), users(-)=%d\n",
914 ZR_DEVNAME(zr), __func__, current->comm, task_pid_nr(current), zr->user + 1); 913 ZR_DEVNAME(zr), __func__, current->comm, task_pid_nr(current), zr->user + 1);
915 914
916 lock_kernel(); 915 mutex_lock(&zr->other_lock);
917 916
918 if (zr->user >= 2048) { 917 if (zr->user >= 2048) {
919 dprintk(1, KERN_ERR "%s: too many users (%d) on device\n", 918 dprintk(1, KERN_ERR "%s: too many users (%d) on device\n",
@@ -963,14 +962,14 @@ static int zoran_open(struct file *file)
963 file->private_data = fh; 962 file->private_data = fh;
964 fh->zr = zr; 963 fh->zr = zr;
965 zoran_open_init_session(fh); 964 zoran_open_init_session(fh);
966 unlock_kernel(); 965 mutex_unlock(&zr->other_lock);
967 966
968 return 0; 967 return 0;
969 968
970fail_fh: 969fail_fh:
971 kfree(fh); 970 kfree(fh);
972fail_unlock: 971fail_unlock:
973 unlock_kernel(); 972 mutex_unlock(&zr->other_lock);
974 973
975 dprintk(2, KERN_INFO "%s: open failed (%d), users(-)=%d\n", 974 dprintk(2, KERN_INFO "%s: open failed (%d), users(-)=%d\n",
976 ZR_DEVNAME(zr), res, zr->user); 975 ZR_DEVNAME(zr), res, zr->user);
@@ -989,7 +988,7 @@ zoran_close(struct file *file)
989 988
990 /* kernel locks (fs/device.c), so don't do that ourselves 989 /* kernel locks (fs/device.c), so don't do that ourselves
991 * (prevents deadlocks) */ 990 * (prevents deadlocks) */
992 /*mutex_lock(&zr->resource_lock);*/ 991 mutex_lock(&zr->other_lock);
993 992
994 zoran_close_end_session(fh); 993 zoran_close_end_session(fh);
995 994
@@ -1023,6 +1022,7 @@ zoran_close(struct file *file)
1023 encoder_call(zr, video, s_routing, 2, 0, 0); 1022 encoder_call(zr, video, s_routing, 2, 0, 0);
1024 } 1023 }
1025 } 1024 }
1025 mutex_unlock(&zr->other_lock);
1026 1026
1027 file->private_data = NULL; 1027 file->private_data = NULL;
1028 kfree(fh->overlay_mask); 1028 kfree(fh->overlay_mask);
@@ -3370,11 +3370,26 @@ static const struct v4l2_ioctl_ops zoran_ioctl_ops = {
3370#endif 3370#endif
3371}; 3371};
3372 3372
3373/* please use zr->resource_lock consistently and kill this wrapper */
3374static long zoran_ioctl(struct file *file, unsigned int cmd,
3375 unsigned long arg)
3376{
3377 struct zoran_fh *fh = file->private_data;
3378 struct zoran *zr = fh->zr;
3379 int ret;
3380
3381 mutex_lock(&zr->other_lock);
3382 ret = video_ioctl2(file, cmd, arg);
3383 mutex_unlock(&zr->other_lock);
3384
3385 return ret;
3386}
3387
3373static const struct v4l2_file_operations zoran_fops = { 3388static const struct v4l2_file_operations zoran_fops = {
3374 .owner = THIS_MODULE, 3389 .owner = THIS_MODULE,
3375 .open = zoran_open, 3390 .open = zoran_open,
3376 .release = zoran_close, 3391 .release = zoran_close,
3377 .ioctl = video_ioctl2, 3392 .unlocked_ioctl = zoran_ioctl,
3378 .read = zoran_read, 3393 .read = zoran_read,
3379 .write = zoran_write, 3394 .write = zoran_write,
3380 .mmap = zoran_mmap, 3395 .mmap = zoran_mmap,
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c
index f9b91ba8900..644d4cd071c 100644
--- a/drivers/misc/apds9802als.c
+++ b/drivers/misc/apds9802als.c
@@ -123,7 +123,7 @@ static ssize_t als_sensing_range_store(struct device *dev,
123{ 123{
124 struct i2c_client *client = to_i2c_client(dev); 124 struct i2c_client *client = to_i2c_client(dev);
125 struct als_data *data = i2c_get_clientdata(client); 125 struct als_data *data = i2c_get_clientdata(client);
126 unsigned int ret_val; 126 int ret_val;
127 unsigned long val; 127 unsigned long val;
128 128
129 if (strict_strtoul(buf, 10, &val)) 129 if (strict_strtoul(buf, 10, &val))
@@ -251,7 +251,6 @@ static int apds9802als_probe(struct i2c_client *client,
251 251
252 return res; 252 return res;
253als_error1: 253als_error1:
254 i2c_set_clientdata(client, NULL);
255 kfree(data); 254 kfree(data);
256 return res; 255 return res;
257} 256}
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index cee632e645e..d79a972f2c7 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -649,7 +649,7 @@ static ssize_t bh1770_power_state_store(struct device *dev,
649{ 649{
650 struct bh1770_chip *chip = dev_get_drvdata(dev); 650 struct bh1770_chip *chip = dev_get_drvdata(dev);
651 unsigned long value; 651 unsigned long value;
652 size_t ret; 652 ssize_t ret;
653 653
654 if (strict_strtoul(buf, 0, &value)) 654 if (strict_strtoul(buf, 0, &value))
655 return -EINVAL; 655 return -EINVAL;
@@ -659,8 +659,12 @@ static ssize_t bh1770_power_state_store(struct device *dev,
659 pm_runtime_get_sync(dev); 659 pm_runtime_get_sync(dev);
660 660
661 ret = bh1770_lux_rate(chip, chip->lux_rate_index); 661 ret = bh1770_lux_rate(chip, chip->lux_rate_index);
662 ret |= bh1770_lux_interrupt_control(chip, BH1770_ENABLE); 662 if (ret < 0) {
663 pm_runtime_put(dev);
664 goto leave;
665 }
663 666
667 ret = bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
664 if (ret < 0) { 668 if (ret < 0) {
665 pm_runtime_put(dev); 669 pm_runtime_put(dev);
666 goto leave; 670 goto leave;
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
index 34fe835921c..ca47e628507 100644
--- a/drivers/misc/isl29020.c
+++ b/drivers/misc/isl29020.c
@@ -87,7 +87,7 @@ static ssize_t als_sensing_range_store(struct device *dev,
87 struct device_attribute *attr, const char *buf, size_t count) 87 struct device_attribute *attr, const char *buf, size_t count)
88{ 88{
89 struct i2c_client *client = to_i2c_client(dev); 89 struct i2c_client *client = to_i2c_client(dev);
90 unsigned int ret_val; 90 int ret_val;
91 unsigned long val; 91 unsigned long val;
92 92
93 if (strict_strtoul(buf, 10, &val)) 93 if (strict_strtoul(buf, 10, &val))
@@ -106,6 +106,8 @@ static ssize_t als_sensing_range_store(struct device *dev,
106 val = 4; 106 val = 4;
107 107
108 ret_val = i2c_smbus_read_byte_data(client, 0x00); 108 ret_val = i2c_smbus_read_byte_data(client, 0x00);
109 if (ret_val < 0)
110 return ret_val;
109 111
110 ret_val &= 0xFC; /*reset the bit before setting them */ 112 ret_val &= 0xFC; /*reset the bit before setting them */
111 ret_val |= val - 1; 113 ret_val |= val - 1;
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 43579b3b24a..53363108994 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -3043,7 +3043,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
3043 atl1_pcie_patch(adapter); 3043 atl1_pcie_patch(adapter);
3044 /* assume we have no link for now */ 3044 /* assume we have no link for now */
3045 netif_carrier_off(netdev); 3045 netif_carrier_off(netdev);
3046 netif_stop_queue(netdev);
3047 3046
3048 setup_timer(&adapter->phy_config_timer, atl1_phy_config, 3047 setup_timer(&adapter->phy_config_timer, atl1_phy_config,
3049 (unsigned long)adapter); 3048 (unsigned long)adapter);
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 9eea225deca..863e73a85fb 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -20,8 +20,8 @@
20 * (you will need to reboot afterwards) */ 20 * (you will need to reboot afterwards) */
21/* #define BNX2X_STOP_ON_ERROR */ 21/* #define BNX2X_STOP_ON_ERROR */
22 22
23#define DRV_MODULE_VERSION "1.60.00-3" 23#define DRV_MODULE_VERSION "1.60.00-4"
24#define DRV_MODULE_RELDATE "2010/10/19" 24#define DRV_MODULE_RELDATE "2010/11/01"
25#define BNX2X_BC_VER 0x040200 25#define BNX2X_BC_VER 0x040200
26 26
27#define BNX2X_MULTI_QUEUE 27#define BNX2X_MULTI_QUEUE
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 459614d2d7b..94d5f59d5a6 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -1680,7 +1680,7 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1680 rc = XMIT_PLAIN; 1680 rc = XMIT_PLAIN;
1681 1681
1682 else { 1682 else {
1683 if (skb->protocol == htons(ETH_P_IPV6)) { 1683 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
1684 rc = XMIT_CSUM_V6; 1684 rc = XMIT_CSUM_V6;
1685 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 1685 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1686 rc |= XMIT_CSUM_TCP; 1686 rc |= XMIT_CSUM_TCP;
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 18c8e23a0e8..4cfd4e9b558 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -244,7 +244,14 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
244 244
245 u16 xgxs_config_tx[4]; /* 0x1A0 */ 245 u16 xgxs_config_tx[4]; /* 0x1A0 */
246 246
247 u32 Reserved1[57]; /* 0x1A8 */ 247 u32 Reserved1[56]; /* 0x1A8 */
248 u32 default_cfg; /* 0x288 */
249 /* Enable BAM on KR */
250#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
251#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
252#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
253#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
254
248 u32 speed_capability_mask2; /* 0x28C */ 255 u32 speed_capability_mask2; /* 0x28C */
249#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF 256#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
250#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0 257#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 2326774df84..58091961925 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -610,7 +610,7 @@ static u8 bnx2x_bmac_enable(struct link_params *params,
610 /* reset and unreset the BigMac */ 610 /* reset and unreset the BigMac */
611 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 611 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
612 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 612 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
613 udelay(10); 613 msleep(1);
614 614
615 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 615 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
616 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 616 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -3525,13 +3525,19 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
3525 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); 3525 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
3526 3526
3527 /* Enable CL37 BAM */ 3527 /* Enable CL37 BAM */
3528 bnx2x_cl45_read(bp, phy, 3528 if (REG_RD(bp, params->shmem_base +
3529 MDIO_AN_DEVAD, 3529 offsetof(struct shmem_region, dev_info.
3530 MDIO_AN_REG_8073_BAM, &val); 3530 port_hw_config[params->port].default_cfg)) &
3531 bnx2x_cl45_write(bp, phy, 3531 PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
3532 MDIO_AN_DEVAD,
3533 MDIO_AN_REG_8073_BAM, val | 1);
3534 3532
3533 bnx2x_cl45_read(bp, phy,
3534 MDIO_AN_DEVAD,
3535 MDIO_AN_REG_8073_BAM, &val);
3536 bnx2x_cl45_write(bp, phy,
3537 MDIO_AN_DEVAD,
3538 MDIO_AN_REG_8073_BAM, val | 1);
3539 DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
3540 }
3535 if (params->loopback_mode == LOOPBACK_EXT) { 3541 if (params->loopback_mode == LOOPBACK_EXT) {
3536 bnx2x_807x_force_10G(bp, phy); 3542 bnx2x_807x_force_10G(bp, phy);
3537 DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n"); 3543 DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
@@ -5302,7 +5308,7 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
5302{ 5308{
5303 struct bnx2x *bp = params->bp; 5309 struct bnx2x *bp = params->bp;
5304 u16 autoneg_val, an_1000_val, an_10_100_val; 5310 u16 autoneg_val, an_1000_val, an_10_100_val;
5305 bnx2x_wait_reset_complete(bp, phy); 5311
5306 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, 5312 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
5307 1 << NIG_LATCH_BC_ENABLE_MI_INT); 5313 1 << NIG_LATCH_BC_ENABLE_MI_INT);
5308 5314
@@ -5431,6 +5437,7 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
5431 5437
5432 /* HW reset */ 5438 /* HW reset */
5433 bnx2x_ext_phy_hw_reset(bp, params->port); 5439 bnx2x_ext_phy_hw_reset(bp, params->port);
5440 bnx2x_wait_reset_complete(bp, phy);
5434 5441
5435 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); 5442 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
5436 return bnx2x_848xx_cmn_config_init(phy, params, vars); 5443 return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -5441,7 +5448,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
5441 struct link_vars *vars) 5448 struct link_vars *vars)
5442{ 5449{
5443 struct bnx2x *bp = params->bp; 5450 struct bnx2x *bp = params->bp;
5444 u8 port = params->port, initialize = 1; 5451 u8 port, initialize = 1;
5445 u16 val; 5452 u16 val;
5446 u16 temp; 5453 u16 temp;
5447 u32 actual_phy_selection; 5454 u32 actual_phy_selection;
@@ -5450,11 +5457,16 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
5450 /* This is just for MDIO_CTL_REG_84823_MEDIA register. */ 5457 /* This is just for MDIO_CTL_REG_84823_MEDIA register. */
5451 5458
5452 msleep(1); 5459 msleep(1);
5460 if (CHIP_IS_E2(bp))
5461 port = BP_PATH(bp);
5462 else
5463 port = params->port;
5453 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 5464 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5454 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 5465 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
5455 port); 5466 port);
5456 msleep(200); /* 100 is not enough */ 5467 bnx2x_wait_reset_complete(bp, phy);
5457 5468 /* Wait for GPHY to come out of reset */
5469 msleep(50);
5458 /* BCM84823 requires that XGXS links up first @ 10G for normal 5470 /* BCM84823 requires that XGXS links up first @ 10G for normal
5459 behavior */ 5471 behavior */
5460 temp = vars->line_speed; 5472 temp = vars->line_speed;
@@ -5625,7 +5637,11 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
5625 struct link_params *params) 5637 struct link_params *params)
5626{ 5638{
5627 struct bnx2x *bp = params->bp; 5639 struct bnx2x *bp = params->bp;
5628 u8 port = params->port; 5640 u8 port;
5641 if (CHIP_IS_E2(bp))
5642 port = BP_PATH(bp);
5643 else
5644 port = params->port;
5629 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 5645 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5630 MISC_REGISTERS_GPIO_OUTPUT_LOW, 5646 MISC_REGISTERS_GPIO_OUTPUT_LOW,
5631 port); 5647 port);
@@ -6928,7 +6944,7 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6928 u8 reset_ext_phy) 6944 u8 reset_ext_phy)
6929{ 6945{
6930 struct bnx2x *bp = params->bp; 6946 struct bnx2x *bp = params->bp;
6931 u8 phy_index, port = params->port; 6947 u8 phy_index, port = params->port, clear_latch_ind = 0;
6932 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port); 6948 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
6933 /* disable attentions */ 6949 /* disable attentions */
6934 vars->link_status = 0; 6950 vars->link_status = 0;
@@ -6966,9 +6982,18 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6966 params->phy[phy_index].link_reset( 6982 params->phy[phy_index].link_reset(
6967 &params->phy[phy_index], 6983 &params->phy[phy_index],
6968 params); 6984 params);
6985 if (params->phy[phy_index].flags &
6986 FLAGS_REARM_LATCH_SIGNAL)
6987 clear_latch_ind = 1;
6969 } 6988 }
6970 } 6989 }
6971 6990
6991 if (clear_latch_ind) {
6992 /* Clear latching indication */
6993 bnx2x_rearm_latch_signal(bp, port, 0);
6994 bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4,
6995 1 << NIG_LATCH_BC_ENABLE_MI_INT);
6996 }
6972 if (params->phy[INT_PHY].link_reset) 6997 if (params->phy[INT_PHY].link_reset)
6973 params->phy[INT_PHY].link_reset( 6998 params->phy[INT_PHY].link_reset(
6974 &params->phy[INT_PHY], params); 6999 &params->phy[INT_PHY], params);
@@ -6999,6 +7024,7 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
6999 s8 port; 7024 s8 port;
7000 s8 port_of_path = 0; 7025 s8 port_of_path = 0;
7001 7026
7027 bnx2x_ext_phy_hw_reset(bp, 0);
7002 /* PART1 - Reset both phys */ 7028 /* PART1 - Reset both phys */
7003 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7029 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7004 u32 shmem_base, shmem2_base; 7030 u32 shmem_base, shmem2_base;
@@ -7021,7 +7047,8 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7021 return -EINVAL; 7047 return -EINVAL;
7022 } 7048 }
7023 /* disable attentions */ 7049 /* disable attentions */
7024 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 7050 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
7051 port_of_path*4,
7025 (NIG_MASK_XGXS0_LINK_STATUS | 7052 (NIG_MASK_XGXS0_LINK_STATUS |
7026 NIG_MASK_XGXS0_LINK10G | 7053 NIG_MASK_XGXS0_LINK10G |
7027 NIG_MASK_SERDES0_LINK_STATUS | 7054 NIG_MASK_SERDES0_LINK_STATUS |
@@ -7132,7 +7159,7 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
7132 (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT))); 7159 (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
7133 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); 7160 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
7134 7161
7135 bnx2x_ext_phy_hw_reset(bp, 1); 7162 bnx2x_ext_phy_hw_reset(bp, 0);
7136 msleep(5); 7163 msleep(5);
7137 for (port = 0; port < PORT_MAX; port++) { 7164 for (port = 0; port < PORT_MAX; port++) {
7138 u32 shmem_base, shmem2_base; 7165 u32 shmem_base, shmem2_base;
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 8427533fe31..8b4cea57a6c 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -33,6 +33,9 @@ MODULE_LICENSE("GPL");
33MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>"); 33MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
34MODULE_DESCRIPTION("CAIF SPI driver"); 34MODULE_DESCRIPTION("CAIF SPI driver");
35 35
36/* Returns the number of padding bytes for alignment. */
37#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
38
36static int spi_loop; 39static int spi_loop;
37module_param(spi_loop, bool, S_IRUGO); 40module_param(spi_loop, bool, S_IRUGO);
38MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode."); 41MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
@@ -41,7 +44,10 @@ MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
41module_param(spi_frm_align, int, S_IRUGO); 44module_param(spi_frm_align, int, S_IRUGO);
42MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment."); 45MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment.");
43 46
44/* SPI padding options. */ 47/*
48 * SPI padding options.
49 * Warning: must be a base of 2 (& operation used) and can not be zero !
50 */
45module_param(spi_up_head_align, int, S_IRUGO); 51module_param(spi_up_head_align, int, S_IRUGO);
46MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment."); 52MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment.");
47 53
@@ -240,15 +246,13 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
240static const struct file_operations dbgfs_state_fops = { 246static const struct file_operations dbgfs_state_fops = {
241 .open = dbgfs_open, 247 .open = dbgfs_open,
242 .read = dbgfs_state, 248 .read = dbgfs_state,
243 .owner = THIS_MODULE, 249 .owner = THIS_MODULE
244 .llseek = default_llseek,
245}; 250};
246 251
247static const struct file_operations dbgfs_frame_fops = { 252static const struct file_operations dbgfs_frame_fops = {
248 .open = dbgfs_open, 253 .open = dbgfs_open,
249 .read = dbgfs_frame, 254 .read = dbgfs_frame,
250 .owner = THIS_MODULE, 255 .owner = THIS_MODULE
251 .llseek = default_llseek,
252}; 256};
253 257
254static inline void dev_debugfs_add(struct cfspi *cfspi) 258static inline void dev_debugfs_add(struct cfspi *cfspi)
@@ -337,6 +341,9 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
337 u8 *dst = buf; 341 u8 *dst = buf;
338 caif_assert(buf); 342 caif_assert(buf);
339 343
344 if (cfspi->slave && !cfspi->slave_talked)
345 cfspi->slave_talked = true;
346
340 do { 347 do {
341 struct sk_buff *skb; 348 struct sk_buff *skb;
342 struct caif_payload_info *info; 349 struct caif_payload_info *info;
@@ -357,8 +364,8 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
357 * Compute head offset i.e. number of bytes to add to 364 * Compute head offset i.e. number of bytes to add to
358 * get the start of the payload aligned. 365 * get the start of the payload aligned.
359 */ 366 */
360 if (spi_up_head_align) { 367 if (spi_up_head_align > 1) {
361 spad = 1 + ((info->hdr_len + 1) & spi_up_head_align); 368 spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
362 *dst = (u8)(spad - 1); 369 *dst = (u8)(spad - 1);
363 dst += spad; 370 dst += spad;
364 } 371 }
@@ -373,7 +380,7 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
373 * Compute tail offset i.e. number of bytes to add to 380 * Compute tail offset i.e. number of bytes to add to
374 * get the complete CAIF frame aligned. 381 * get the complete CAIF frame aligned.
375 */ 382 */
376 epad = (skb->len + spad) & spi_up_tail_align; 383 epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
377 dst += epad; 384 dst += epad;
378 385
379 dev_kfree_skb(skb); 386 dev_kfree_skb(skb);
@@ -417,14 +424,14 @@ int cfspi_xmitlen(struct cfspi *cfspi)
417 * Compute head offset i.e. number of bytes to add to 424 * Compute head offset i.e. number of bytes to add to
418 * get the start of the payload aligned. 425 * get the start of the payload aligned.
419 */ 426 */
420 if (spi_up_head_align) 427 if (spi_up_head_align > 1)
421 spad = 1 + ((info->hdr_len + 1) & spi_up_head_align); 428 spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
422 429
423 /* 430 /*
424 * Compute tail offset i.e. number of bytes to add to 431 * Compute tail offset i.e. number of bytes to add to
425 * get the complete CAIF frame aligned. 432 * get the complete CAIF frame aligned.
426 */ 433 */
427 epad = (skb->len + spad) & spi_up_tail_align; 434 epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
428 435
429 if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) { 436 if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) {
430 skb_queue_tail(&cfspi->chead, skb); 437 skb_queue_tail(&cfspi->chead, skb);
@@ -433,6 +440,7 @@ int cfspi_xmitlen(struct cfspi *cfspi)
433 } else { 440 } else {
434 /* Put back packet. */ 441 /* Put back packet. */
435 skb_queue_head(&cfspi->qhead, skb); 442 skb_queue_head(&cfspi->qhead, skb);
443 break;
436 } 444 }
437 } while (pkts <= CAIF_MAX_SPI_PKTS); 445 } while (pkts <= CAIF_MAX_SPI_PKTS);
438 446
@@ -453,6 +461,15 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
453{ 461{
454 struct cfspi *cfspi = (struct cfspi *)ifc->priv; 462 struct cfspi *cfspi = (struct cfspi *)ifc->priv;
455 463
464 /*
465 * The slave device is the master on the link. Interrupts before the
466 * slave has transmitted are considered spurious.
467 */
468 if (cfspi->slave && !cfspi->slave_talked) {
469 printk(KERN_WARNING "CFSPI: Spurious SS interrupt.\n");
470 return;
471 }
472
456 if (!in_interrupt()) 473 if (!in_interrupt())
457 spin_lock(&cfspi->lock); 474 spin_lock(&cfspi->lock);
458 if (assert) { 475 if (assert) {
@@ -465,7 +482,8 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
465 spin_unlock(&cfspi->lock); 482 spin_unlock(&cfspi->lock);
466 483
467 /* Wake up the xfer thread. */ 484 /* Wake up the xfer thread. */
468 wake_up_interruptible(&cfspi->wait); 485 if (assert)
486 wake_up_interruptible(&cfspi->wait);
469} 487}
470 488
471static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc) 489static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
@@ -523,7 +541,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
523 * Compute head offset i.e. number of bytes added to 541 * Compute head offset i.e. number of bytes added to
524 * get the start of the payload aligned. 542 * get the start of the payload aligned.
525 */ 543 */
526 if (spi_down_head_align) { 544 if (spi_down_head_align > 1) {
527 spad = 1 + *src; 545 spad = 1 + *src;
528 src += spad; 546 src += spad;
529 } 547 }
@@ -564,7 +582,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
564 * Compute tail offset i.e. number of bytes added to 582 * Compute tail offset i.e. number of bytes added to
565 * get the complete CAIF frame aligned. 583 * get the complete CAIF frame aligned.
566 */ 584 */
567 epad = (pkt_len + spad) & spi_down_tail_align; 585 epad = PAD_POW2((pkt_len + spad), spi_down_tail_align);
568 src += epad; 586 src += epad;
569 } while ((src - buf) < len); 587 } while ((src - buf) < len);
570 588
@@ -625,11 +643,20 @@ int cfspi_spi_probe(struct platform_device *pdev)
625 cfspi->ndev = ndev; 643 cfspi->ndev = ndev;
626 cfspi->pdev = pdev; 644 cfspi->pdev = pdev;
627 645
628 /* Set flow info */ 646 /* Set flow info. */
629 cfspi->flow_off_sent = 0; 647 cfspi->flow_off_sent = 0;
630 cfspi->qd_low_mark = LOW_WATER_MARK; 648 cfspi->qd_low_mark = LOW_WATER_MARK;
631 cfspi->qd_high_mark = HIGH_WATER_MARK; 649 cfspi->qd_high_mark = HIGH_WATER_MARK;
632 650
651 /* Set slave info. */
652 if (!strncmp(cfspi_spi_driver.driver.name, "cfspi_sspi", 10)) {
653 cfspi->slave = true;
654 cfspi->slave_talked = false;
655 } else {
656 cfspi->slave = false;
657 cfspi->slave_talked = false;
658 }
659
633 /* Assign the SPI device. */ 660 /* Assign the SPI device. */
634 cfspi->dev = dev; 661 cfspi->dev = dev;
635 /* Assign the device ifc to this SPI interface. */ 662 /* Assign the device ifc to this SPI interface. */
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
index 2111dbfea6f..1b9943a4eda 100644
--- a/drivers/net/caif/caif_spi_slave.c
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -36,10 +36,15 @@ static inline int forward_to_spi_cmd(struct cfspi *cfspi)
36#endif 36#endif
37 37
38int spi_frm_align = 2; 38int spi_frm_align = 2;
39int spi_up_head_align = 1; 39
40int spi_up_tail_align; 40/*
41int spi_down_head_align = 3; 41 * SPI padding options.
42int spi_down_tail_align = 1; 42 * Warning: must be a base of 2 (& operation used) and can not be zero !
43 */
44int spi_up_head_align = 1 << 1;
45int spi_up_tail_align = 1 << 0;
46int spi_down_head_align = 1 << 2;
47int spi_down_tail_align = 1 << 1;
43 48
44#ifdef CONFIG_DEBUG_FS 49#ifdef CONFIG_DEBUG_FS
45static inline void debugfs_store_prev(struct cfspi *cfspi) 50static inline void debugfs_store_prev(struct cfspi *cfspi)
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 407d4e27207..046d846c652 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -3341,7 +3341,6 @@ static int __devinit init_one(struct pci_dev *pdev,
3341 adapter->name = adapter->port[i]->name; 3341 adapter->name = adapter->port[i]->name;
3342 3342
3343 __set_bit(i, &adapter->registered_device_map); 3343 __set_bit(i, &adapter->registered_device_map);
3344 netif_tx_stop_all_queues(adapter->port[i]);
3345 } 3344 }
3346 } 3345 }
3347 if (!adapter->registered_device_map) { 3346 if (!adapter->registered_device_map) {
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index f17703f410b..f50bc98310f 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -3736,7 +3736,6 @@ static int __devinit init_one(struct pci_dev *pdev,
3736 3736
3737 __set_bit(i, &adapter->registered_device_map); 3737 __set_bit(i, &adapter->registered_device_map);
3738 adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i; 3738 adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
3739 netif_tx_stop_all_queues(adapter->port[i]);
3740 } 3739 }
3741 } 3740 }
3742 if (!adapter->registered_device_map) { 3741 if (!adapter->registered_device_map) {
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 555ecc5a2e9..c3449bbc585 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -753,7 +753,9 @@ static int cxgb4vf_open(struct net_device *dev)
753 if (err) 753 if (err)
754 return err; 754 return err;
755 set_bit(pi->port_id, &adapter->open_device_map); 755 set_bit(pi->port_id, &adapter->open_device_map);
756 link_start(dev); 756 err = link_start(dev);
757 if (err)
758 return err;
757 netif_tx_start_all_queues(dev); 759 netif_tx_start_all_queues(dev);
758 return 0; 760 return 0;
759} 761}
@@ -1103,18 +1105,6 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
1103 return 0; 1105 return 0;
1104} 1106}
1105 1107
1106/*
1107 * Return a TX Queue on which to send the specified skb.
1108 */
1109static u16 cxgb4vf_select_queue(struct net_device *dev, struct sk_buff *skb)
1110{
1111 /*
1112 * XXX For now just use the default hash but we probably want to
1113 * XXX look at other possibilities ...
1114 */
1115 return skb_tx_hash(dev, skb);
1116}
1117
1118#ifdef CONFIG_NET_POLL_CONTROLLER 1108#ifdef CONFIG_NET_POLL_CONTROLLER
1119/* 1109/*
1120 * Poll all of our receive queues. This is called outside of normal interrupt 1110 * Poll all of our receive queues. This is called outside of normal interrupt
@@ -2075,6 +2065,22 @@ static int adap_init0(struct adapter *adapter)
2075 } 2065 }
2076 2066
2077 /* 2067 /*
2068 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2069 * 2.6.31 and later we can't call pci_reset_function() in order to
2070 * issue an FLR because of a self- deadlock on the device semaphore.
2071 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2072 * cases where they're needed -- for instance, some versions of KVM
2073 * fail to reset "Assigned Devices" when the VM reboots. Therefore we
2074 * use the firmware based reset in order to reset any per function
2075 * state.
2076 */
2077 err = t4vf_fw_reset(adapter);
2078 if (err < 0) {
2079 dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
2080 return err;
2081 }
2082
2083 /*
2078 * Grab basic operational parameters. These will predominantly have 2084 * Grab basic operational parameters. These will predominantly have
2079 * been set up by the Physical Function Driver or will be hard coded 2085 * been set up by the Physical Function Driver or will be hard coded
2080 * into the adapter. We just have to live with them ... Note that 2086 * into the adapter. We just have to live with them ... Note that
@@ -2417,7 +2423,6 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
2417 .ndo_get_stats = cxgb4vf_get_stats, 2423 .ndo_get_stats = cxgb4vf_get_stats,
2418 .ndo_set_rx_mode = cxgb4vf_set_rxmode, 2424 .ndo_set_rx_mode = cxgb4vf_set_rxmode,
2419 .ndo_set_mac_address = cxgb4vf_set_mac_addr, 2425 .ndo_set_mac_address = cxgb4vf_set_mac_addr,
2420 .ndo_select_queue = cxgb4vf_select_queue,
2421 .ndo_validate_addr = eth_validate_addr, 2426 .ndo_validate_addr = eth_validate_addr,
2422 .ndo_do_ioctl = cxgb4vf_do_ioctl, 2427 .ndo_do_ioctl = cxgb4vf_do_ioctl,
2423 .ndo_change_mtu = cxgb4vf_change_mtu, 2428 .ndo_change_mtu = cxgb4vf_change_mtu,
@@ -2600,7 +2605,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2600 pi->xact_addr_filt = -1; 2605 pi->xact_addr_filt = -1;
2601 pi->rx_offload = RX_CSO; 2606 pi->rx_offload = RX_CSO;
2602 netif_carrier_off(netdev); 2607 netif_carrier_off(netdev);
2603 netif_tx_stop_all_queues(netdev);
2604 netdev->irq = pdev->irq; 2608 netdev->irq = pdev->irq;
2605 2609
2606 netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | 2610 netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
@@ -2625,7 +2629,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2625 netdev->do_ioctl = cxgb4vf_do_ioctl; 2629 netdev->do_ioctl = cxgb4vf_do_ioctl;
2626 netdev->change_mtu = cxgb4vf_change_mtu; 2630 netdev->change_mtu = cxgb4vf_change_mtu;
2627 netdev->set_mac_address = cxgb4vf_set_mac_addr; 2631 netdev->set_mac_address = cxgb4vf_set_mac_addr;
2628 netdev->select_queue = cxgb4vf_select_queue;
2629#ifdef CONFIG_NET_POLL_CONTROLLER 2632#ifdef CONFIG_NET_POLL_CONTROLLER
2630 netdev->poll_controller = cxgb4vf_poll_controller; 2633 netdev->poll_controller = cxgb4vf_poll_controller;
2631#endif 2634#endif
@@ -2844,6 +2847,14 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = {
2844 CH_DEVICE(0x4800, 0), /* T440-dbg */ 2847 CH_DEVICE(0x4800, 0), /* T440-dbg */
2845 CH_DEVICE(0x4801, 0), /* T420-cr */ 2848 CH_DEVICE(0x4801, 0), /* T420-cr */
2846 CH_DEVICE(0x4802, 0), /* T422-cr */ 2849 CH_DEVICE(0x4802, 0), /* T422-cr */
2850 CH_DEVICE(0x4803, 0), /* T440-cr */
2851 CH_DEVICE(0x4804, 0), /* T420-bch */
2852 CH_DEVICE(0x4805, 0), /* T440-bch */
2853 CH_DEVICE(0x4806, 0), /* T460-ch */
2854 CH_DEVICE(0x4807, 0), /* T420-so */
2855 CH_DEVICE(0x4808, 0), /* T420-cx */
2856 CH_DEVICE(0x4809, 0), /* T420-bt */
2857 CH_DEVICE(0x480a, 0), /* T404-bt */
2847 { 0, } 2858 { 0, }
2848}; 2859};
2849 2860
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index f10864ddafb..ecf0770bf0f 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -154,13 +154,14 @@ enum {
154 */ 154 */
155 RX_COPY_THRES = 256, 155 RX_COPY_THRES = 256,
156 RX_PULL_LEN = 128, 156 RX_PULL_LEN = 128,
157};
158 157
159/* 158 /*
160 * Can't define this in the above enum because PKTSHIFT isn't a constant in 159 * Main body length for sk_buffs used for RX Ethernet packets with
161 * the VF Driver ... 160 * fragments. Should be >= RX_PULL_LEN but possibly bigger to give
162 */ 161 * pskb_may_pull() some room.
163#define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT) 162 */
163 RX_SKB_LEN = 512,
164};
164 165
165/* 166/*
166 * Software state per TX descriptor. 167 * Software state per TX descriptor.
@@ -1355,6 +1356,67 @@ out_free:
1355} 1356}
1356 1357
1357/** 1358/**
1359 * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
1360 * @gl: the gather list
1361 * @skb_len: size of sk_buff main body if it carries fragments
1362 * @pull_len: amount of data to move to the sk_buff's main body
1363 *
1364 * Builds an sk_buff from the given packet gather list. Returns the
1365 * sk_buff or %NULL if sk_buff allocation failed.
1366 */
1367struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
1368 unsigned int skb_len, unsigned int pull_len)
1369{
1370 struct sk_buff *skb;
1371 struct skb_shared_info *ssi;
1372
1373 /*
1374 * If the ingress packet is small enough, allocate an skb large enough
1375 * for all of the data and copy it inline. Otherwise, allocate an skb
1376 * with enough room to pull in the header and reference the rest of
1377 * the data via the skb fragment list.
1378 *
1379 * Below we rely on RX_COPY_THRES being less than the smallest Rx
1380 * buff! size, which is expected since buffers are at least
1381 * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one
1382 * fragment.
1383 */
1384 if (gl->tot_len <= RX_COPY_THRES) {
1385 /* small packets have only one fragment */
1386 skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
1387 if (unlikely(!skb))
1388 goto out;
1389 __skb_put(skb, gl->tot_len);
1390 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1391 } else {
1392 skb = alloc_skb(skb_len, GFP_ATOMIC);
1393 if (unlikely(!skb))
1394 goto out;
1395 __skb_put(skb, pull_len);
1396 skb_copy_to_linear_data(skb, gl->va, pull_len);
1397
1398 ssi = skb_shinfo(skb);
1399 ssi->frags[0].page = gl->frags[0].page;
1400 ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
1401 ssi->frags[0].size = gl->frags[0].size - pull_len;
1402 if (gl->nfrags > 1)
1403 memcpy(&ssi->frags[1], &gl->frags[1],
1404 (gl->nfrags-1) * sizeof(skb_frag_t));
1405 ssi->nr_frags = gl->nfrags;
1406
1407 skb->len = gl->tot_len;
1408 skb->data_len = skb->len - pull_len;
1409 skb->truesize += skb->data_len;
1410
1411 /* Get a reference for the last page, we don't own it */
1412 get_page(gl->frags[gl->nfrags - 1].page);
1413 }
1414
1415out:
1416 return skb;
1417}
1418
1419/**
1358 * t4vf_pktgl_free - free a packet gather list 1420 * t4vf_pktgl_free - free a packet gather list
1359 * @gl: the gather list 1421 * @gl: the gather list
1360 * 1422 *
@@ -1463,10 +1525,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1463{ 1525{
1464 struct sk_buff *skb; 1526 struct sk_buff *skb;
1465 struct port_info *pi; 1527 struct port_info *pi;
1466 struct skb_shared_info *ssi;
1467 const struct cpl_rx_pkt *pkt = (void *)&rsp[1]; 1528 const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
1468 bool csum_ok = pkt->csum_calc && !pkt->err_vec; 1529 bool csum_ok = pkt->csum_calc && !pkt->err_vec;
1469 unsigned int len = be16_to_cpu(pkt->len);
1470 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1530 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1471 1531
1472 /* 1532 /*
@@ -1481,42 +1541,14 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1481 } 1541 }
1482 1542
1483 /* 1543 /*
1484 * If the ingress packet is small enough, allocate an skb large enough 1544 * Convert the Packet Gather List into an skb.
1485 * for all of the data and copy it inline. Otherwise, allocate an skb
1486 * with enough room to pull in the header and reference the rest of
1487 * the data via the skb fragment list.
1488 */ 1545 */
1489 if (len <= RX_COPY_THRES) { 1546 skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
1490 /* small packets have only one fragment */ 1547 if (unlikely(!skb)) {
1491 skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC); 1548 t4vf_pktgl_free(gl);
1492 if (!skb) 1549 rxq->stats.rx_drops++;
1493 goto nomem; 1550 return 0;
1494 __skb_put(skb, gl->frags[0].size);
1495 skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size);
1496 } else {
1497 skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC);
1498 if (!skb)
1499 goto nomem;
1500 __skb_put(skb, RX_PKT_PULL_LEN);
1501 skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN);
1502
1503 ssi = skb_shinfo(skb);
1504 ssi->frags[0].page = gl->frags[0].page;
1505 ssi->frags[0].page_offset = (gl->frags[0].page_offset +
1506 RX_PKT_PULL_LEN);
1507 ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN;
1508 if (gl->nfrags > 1)
1509 memcpy(&ssi->frags[1], &gl->frags[1],
1510 (gl->nfrags-1) * sizeof(skb_frag_t));
1511 ssi->nr_frags = gl->nfrags;
1512 skb->len = len + PKTSHIFT;
1513 skb->data_len = skb->len - RX_PKT_PULL_LEN;
1514 skb->truesize += skb->data_len;
1515
1516 /* Get a reference for the last page, we don't own it */
1517 get_page(gl->frags[gl->nfrags - 1].page);
1518 } 1551 }
1519
1520 __skb_pull(skb, PKTSHIFT); 1552 __skb_pull(skb, PKTSHIFT);
1521 skb->protocol = eth_type_trans(skb, rspq->netdev); 1553 skb->protocol = eth_type_trans(skb, rspq->netdev);
1522 skb_record_rx_queue(skb, rspq->idx); 1554 skb_record_rx_queue(skb, rspq->idx);
@@ -1549,11 +1581,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1549 netif_receive_skb(skb); 1581 netif_receive_skb(skb);
1550 1582
1551 return 0; 1583 return 0;
1552
1553nomem:
1554 t4vf_pktgl_free(gl);
1555 rxq->stats.rx_drops++;
1556 return 0;
1557} 1584}
1558 1585
1559/** 1586/**
@@ -1679,6 +1706,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
1679 } 1706 }
1680 len = RSPD_LEN(len); 1707 len = RSPD_LEN(len);
1681 } 1708 }
1709 gl.tot_len = len;
1682 1710
1683 /* 1711 /*
1684 * Gather packet fragments. 1712 * Gather packet fragments.
diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/cxgb4vf/t4vf_common.h
index 873cb7d86c5..a65c80aed1f 100644
--- a/drivers/net/cxgb4vf/t4vf_common.h
+++ b/drivers/net/cxgb4vf/t4vf_common.h
@@ -235,6 +235,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
235int __devinit t4vf_wait_dev_ready(struct adapter *); 235int __devinit t4vf_wait_dev_ready(struct adapter *);
236int __devinit t4vf_port_init(struct adapter *, int); 236int __devinit t4vf_port_init(struct adapter *, int);
237 237
238int t4vf_fw_reset(struct adapter *);
238int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *); 239int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
239int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *); 240int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
240 241
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index ea1c123f0cb..e306c20dfae 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -326,6 +326,25 @@ int __devinit t4vf_port_init(struct adapter *adapter, int pidx)
326} 326}
327 327
328/** 328/**
329 * t4vf_fw_reset - issue a reset to FW
330 * @adapter: the adapter
331 *
332 * Issues a reset command to FW. For a Physical Function this would
333 * result in the Firmware reseting all of its state. For a Virtual
334 * Function this just resets the state associated with the VF.
335 */
336int t4vf_fw_reset(struct adapter *adapter)
337{
338 struct fw_reset_cmd cmd;
339
340 memset(&cmd, 0, sizeof(cmd));
341 cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) |
342 FW_CMD_WRITE);
343 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
344 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
345}
346
347/**
329 * t4vf_query_params - query FW or device parameters 348 * t4vf_query_params - query FW or device parameters
330 * @adapter: the adapter 349 * @adapter: the adapter
331 * @nparams: the number of parameters 350 * @nparams: the number of parameters
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 5c566ebc54b..3bc8e276ba4 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -635,9 +635,10 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
635 if (wol->wolopts & ~WAKE_MAGIC) 635 if (wol->wolopts & ~WAKE_MAGIC)
636 return -EINVAL; 636 return -EINVAL;
637 637
638 device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
639
638 spin_lock_irqsave(&priv->bflock, flags); 640 spin_lock_irqsave(&priv->bflock, flags);
639 priv->wol_en = wol->wolopts & WAKE_MAGIC ? 1 : 0; 641 priv->wol_en = !!device_may_wakeup(&dev->dev);
640 device_set_wakeup_enable(&dev->dev, priv->wol_en);
641 spin_unlock_irqrestore(&priv->bflock, flags); 642 spin_unlock_irqrestore(&priv->bflock, flags);
642 643
643 return 0; 644 return 0;
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 385dc3204cb..06bb9b79945 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2871,7 +2871,6 @@ static int __devinit emac_probe(struct platform_device *ofdev,
2871 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops); 2871 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2872 2872
2873 netif_carrier_off(ndev); 2873 netif_carrier_off(ndev);
2874 netif_stop_queue(ndev);
2875 2874
2876 err = register_netdev(ndev); 2875 err = register_netdev(ndev);
2877 if (err) { 2876 if (err) {
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 2bd3eb4ee5a..fbad4d81960 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -764,8 +764,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
764#ifdef IXGBE_FCOE 764#ifdef IXGBE_FCOE
765 /* adjust for FCoE Sequence Offload */ 765 /* adjust for FCoE Sequence Offload */
766 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 766 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
767 && (skb->protocol == htons(ETH_P_FCOE)) && 767 && skb_is_gso(skb)
768 skb_is_gso(skb)) { 768 && vlan_get_protocol(skb) ==
769 htons(ETH_P_FCOE)) {
769 hlen = skb_transport_offset(skb) + 770 hlen = skb_transport_offset(skb) +
770 sizeof(struct fc_frame_header) + 771 sizeof(struct fc_frame_header) +
771 sizeof(struct fcoe_crc_eof); 772 sizeof(struct fcoe_crc_eof);
@@ -5823,7 +5824,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5823 5824
5824static int ixgbe_tso(struct ixgbe_adapter *adapter, 5825static int ixgbe_tso(struct ixgbe_adapter *adapter,
5825 struct ixgbe_ring *tx_ring, struct sk_buff *skb, 5826 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
5826 u32 tx_flags, u8 *hdr_len) 5827 u32 tx_flags, u8 *hdr_len, __be16 protocol)
5827{ 5828{
5828 struct ixgbe_adv_tx_context_desc *context_desc; 5829 struct ixgbe_adv_tx_context_desc *context_desc;
5829 unsigned int i; 5830 unsigned int i;
@@ -5841,7 +5842,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5841 l4len = tcp_hdrlen(skb); 5842 l4len = tcp_hdrlen(skb);
5842 *hdr_len += l4len; 5843 *hdr_len += l4len;
5843 5844
5844 if (skb->protocol == htons(ETH_P_IP)) { 5845 if (protocol == htons(ETH_P_IP)) {
5845 struct iphdr *iph = ip_hdr(skb); 5846 struct iphdr *iph = ip_hdr(skb);
5846 iph->tot_len = 0; 5847 iph->tot_len = 0;
5847 iph->check = 0; 5848 iph->check = 0;
@@ -5880,7 +5881,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5880 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | 5881 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
5881 IXGBE_ADVTXD_DTYP_CTXT); 5882 IXGBE_ADVTXD_DTYP_CTXT);
5882 5883
5883 if (skb->protocol == htons(ETH_P_IP)) 5884 if (protocol == htons(ETH_P_IP))
5884 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 5885 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
5885 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 5886 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5886 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 5887 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
@@ -5906,16 +5907,10 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5906 return false; 5907 return false;
5907} 5908}
5908 5909
5909static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb) 5910static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5911 __be16 protocol)
5910{ 5912{
5911 u32 rtn = 0; 5913 u32 rtn = 0;
5912 __be16 protocol;
5913
5914 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
5915 protocol = ((const struct vlan_ethhdr *)skb->data)->
5916 h_vlan_encapsulated_proto;
5917 else
5918 protocol = skb->protocol;
5919 5914
5920 switch (protocol) { 5915 switch (protocol) {
5921 case cpu_to_be16(ETH_P_IP): 5916 case cpu_to_be16(ETH_P_IP):
@@ -5943,7 +5938,7 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
5943 default: 5938 default:
5944 if (unlikely(net_ratelimit())) 5939 if (unlikely(net_ratelimit()))
5945 e_warn(probe, "partial checksum but proto=%x!\n", 5940 e_warn(probe, "partial checksum but proto=%x!\n",
5946 skb->protocol); 5941 protocol);
5947 break; 5942 break;
5948 } 5943 }
5949 5944
@@ -5952,7 +5947,8 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
5952 5947
5953static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, 5948static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5954 struct ixgbe_ring *tx_ring, 5949 struct ixgbe_ring *tx_ring,
5955 struct sk_buff *skb, u32 tx_flags) 5950 struct sk_buff *skb, u32 tx_flags,
5951 __be16 protocol)
5956{ 5952{
5957 struct ixgbe_adv_tx_context_desc *context_desc; 5953 struct ixgbe_adv_tx_context_desc *context_desc;
5958 unsigned int i; 5954 unsigned int i;
@@ -5981,7 +5977,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5981 IXGBE_ADVTXD_DTYP_CTXT); 5977 IXGBE_ADVTXD_DTYP_CTXT);
5982 5978
5983 if (skb->ip_summed == CHECKSUM_PARTIAL) 5979 if (skb->ip_summed == CHECKSUM_PARTIAL)
5984 type_tucmd_mlhl |= ixgbe_psum(adapter, skb); 5980 type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
5985 5981
5986 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 5982 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
5987 /* use index zero for tx checksum offload */ 5983 /* use index zero for tx checksum offload */
@@ -6179,7 +6175,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6179} 6175}
6180 6176
6181static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, 6177static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6182 int queue, u32 tx_flags) 6178 int queue, u32 tx_flags, __be16 protocol)
6183{ 6179{
6184 struct ixgbe_atr_input atr_input; 6180 struct ixgbe_atr_input atr_input;
6185 struct tcphdr *th; 6181 struct tcphdr *th;
@@ -6190,7 +6186,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6190 u8 l4type = 0; 6186 u8 l4type = 0;
6191 6187
6192 /* Right now, we support IPv4 only */ 6188 /* Right now, we support IPv4 only */
6193 if (skb->protocol != htons(ETH_P_IP)) 6189 if (protocol != htons(ETH_P_IP))
6194 return; 6190 return;
6195 /* check if we're UDP or TCP */ 6191 /* check if we're UDP or TCP */
6196 if (iph->protocol == IPPROTO_TCP) { 6192 if (iph->protocol == IPPROTO_TCP) {
@@ -6257,10 +6253,13 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6257{ 6253{
6258 struct ixgbe_adapter *adapter = netdev_priv(dev); 6254 struct ixgbe_adapter *adapter = netdev_priv(dev);
6259 int txq = smp_processor_id(); 6255 int txq = smp_processor_id();
6260
6261#ifdef IXGBE_FCOE 6256#ifdef IXGBE_FCOE
6262 if ((skb->protocol == htons(ETH_P_FCOE)) || 6257 __be16 protocol;
6263 (skb->protocol == htons(ETH_P_FIP))) { 6258
6259 protocol = vlan_get_protocol(skb);
6260
6261 if ((protocol == htons(ETH_P_FCOE)) ||
6262 (protocol == htons(ETH_P_FIP))) {
6264 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 6263 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
6265 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); 6264 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
6266 txq += adapter->ring_feature[RING_F_FCOE].mask; 6265 txq += adapter->ring_feature[RING_F_FCOE].mask;
@@ -6303,6 +6302,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6303 int tso; 6302 int tso;
6304 int count = 0; 6303 int count = 0;
6305 unsigned int f; 6304 unsigned int f;
6305 __be16 protocol;
6306
6307 protocol = vlan_get_protocol(skb);
6306 6308
6307 if (vlan_tx_tag_present(skb)) { 6309 if (vlan_tx_tag_present(skb)) {
6308 tx_flags |= vlan_tx_tag_get(skb); 6310 tx_flags |= vlan_tx_tag_get(skb);
@@ -6323,8 +6325,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6323 /* for FCoE with DCB, we force the priority to what 6325 /* for FCoE with DCB, we force the priority to what
6324 * was specified by the switch */ 6326 * was specified by the switch */
6325 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && 6327 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
6326 (skb->protocol == htons(ETH_P_FCOE) || 6328 (protocol == htons(ETH_P_FCOE) ||
6327 skb->protocol == htons(ETH_P_FIP))) { 6329 protocol == htons(ETH_P_FIP))) {
6328#ifdef CONFIG_IXGBE_DCB 6330#ifdef CONFIG_IXGBE_DCB
6329 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 6331 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6330 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK 6332 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
@@ -6334,7 +6336,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6334 } 6336 }
6335#endif 6337#endif
6336 /* flag for FCoE offloads */ 6338 /* flag for FCoE offloads */
6337 if (skb->protocol == htons(ETH_P_FCOE)) 6339 if (protocol == htons(ETH_P_FCOE))
6338 tx_flags |= IXGBE_TX_FLAGS_FCOE; 6340 tx_flags |= IXGBE_TX_FLAGS_FCOE;
6339 } 6341 }
6340#endif 6342#endif
@@ -6368,9 +6370,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6368 tx_flags |= IXGBE_TX_FLAGS_FSO; 6370 tx_flags |= IXGBE_TX_FLAGS_FSO;
6369#endif /* IXGBE_FCOE */ 6371#endif /* IXGBE_FCOE */
6370 } else { 6372 } else {
6371 if (skb->protocol == htons(ETH_P_IP)) 6373 if (protocol == htons(ETH_P_IP))
6372 tx_flags |= IXGBE_TX_FLAGS_IPV4; 6374 tx_flags |= IXGBE_TX_FLAGS_IPV4;
6373 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); 6375 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
6376 protocol);
6374 if (tso < 0) { 6377 if (tso < 0) {
6375 dev_kfree_skb_any(skb); 6378 dev_kfree_skb_any(skb);
6376 return NETDEV_TX_OK; 6379 return NETDEV_TX_OK;
@@ -6378,7 +6381,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6378 6381
6379 if (tso) 6382 if (tso)
6380 tx_flags |= IXGBE_TX_FLAGS_TSO; 6383 tx_flags |= IXGBE_TX_FLAGS_TSO;
6381 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && 6384 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
6385 protocol) &&
6382 (skb->ip_summed == CHECKSUM_PARTIAL)) 6386 (skb->ip_summed == CHECKSUM_PARTIAL))
6383 tx_flags |= IXGBE_TX_FLAGS_CSUM; 6387 tx_flags |= IXGBE_TX_FLAGS_CSUM;
6384 } 6388 }
@@ -6392,7 +6396,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6392 test_bit(__IXGBE_FDIR_INIT_DONE, 6396 test_bit(__IXGBE_FDIR_INIT_DONE,
6393 &tx_ring->reinit_state)) { 6397 &tx_ring->reinit_state)) {
6394 ixgbe_atr(adapter, skb, tx_ring->queue_index, 6398 ixgbe_atr(adapter, skb, tx_ring->queue_index,
6395 tx_flags); 6399 tx_flags, protocol);
6396 tx_ring->atr_count = 0; 6400 tx_ring->atr_count = 0;
6397 } 6401 }
6398 } 6402 }
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index d85edf3119c..c57d9a43cec 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -2955,11 +2955,7 @@ jme_init_one(struct pci_dev *pdev,
2955 * Tell stack that we are not ready to work until open() 2955 * Tell stack that we are not ready to work until open()
2956 */ 2956 */
2957 netif_carrier_off(netdev); 2957 netif_carrier_off(netdev);
2958 netif_stop_queue(netdev);
2959 2958
2960 /*
2961 * Register netdev
2962 */
2963 rc = register_netdev(netdev); 2959 rc = register_netdev(netdev);
2964 if (rc) { 2960 if (rc) {
2965 pr_err("Cannot register net device\n"); 2961 pr_err("Cannot register net device\n");
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index a75ba951740..e1d30d7f207 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -41,9 +41,6 @@
41MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver"); 41MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
42MODULE_LICENSE("GPL"); 42MODULE_LICENSE("GPL");
43MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); 43MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
44MODULE_FIRMWARE(NX_P2_MN_ROMIMAGE_NAME);
45MODULE_FIRMWARE(NX_P3_CT_ROMIMAGE_NAME);
46MODULE_FIRMWARE(NX_P3_MN_ROMIMAGE_NAME);
47MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME); 44MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
48 45
49char netxen_nic_driver_name[] = "netxen_nic"; 46char netxen_nic_driver_name[] = "netxen_nic";
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index d2e166e29dd..8a4d19e5de0 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -111,13 +111,14 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id);
111 111
112typedef struct axnet_dev_t { 112typedef struct axnet_dev_t {
113 struct pcmcia_device *p_dev; 113 struct pcmcia_device *p_dev;
114 caddr_t base; 114 caddr_t base;
115 struct timer_list watchdog; 115 struct timer_list watchdog;
116 int stale, fast_poll; 116 int stale, fast_poll;
117 u_short link_status; 117 u_short link_status;
118 u_char duplex_flag; 118 u_char duplex_flag;
119 int phy_id; 119 int phy_id;
120 int flags; 120 int flags;
121 int active_low;
121} axnet_dev_t; 122} axnet_dev_t;
122 123
123static inline axnet_dev_t *PRIV(struct net_device *dev) 124static inline axnet_dev_t *PRIV(struct net_device *dev)
@@ -322,6 +323,8 @@ static int axnet_config(struct pcmcia_device *link)
322 if (info->flags & IS_AX88790) 323 if (info->flags & IS_AX88790)
323 outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */ 324 outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */
324 325
326 info->active_low = 0;
327
325 for (i = 0; i < 32; i++) { 328 for (i = 0; i < 32; i++) {
326 j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); 329 j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
327 j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2); 330 j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
@@ -329,15 +332,18 @@ static int axnet_config(struct pcmcia_device *link)
329 if ((j != 0) && (j != 0xffff)) break; 332 if ((j != 0) && (j != 0xffff)) break;
330 } 333 }
331 334
332 /* Maybe PHY is in power down mode. (PPD_SET = 1)
333 Bit 2 of CCSR is active low. */
334 if (i == 32) { 335 if (i == 32) {
336 /* Maybe PHY is in power down mode. (PPD_SET = 1)
337 Bit 2 of CCSR is active low. */
335 pcmcia_write_config_byte(link, CISREG_CCSR, 0x04); 338 pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
336 for (i = 0; i < 32; i++) { 339 for (i = 0; i < 32; i++) {
337 j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); 340 j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
338 j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2); 341 j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
339 if (j == j2) continue; 342 if (j == j2) continue;
340 if ((j != 0) && (j != 0xffff)) break; 343 if ((j != 0) && (j != 0xffff)) {
344 info->active_low = 1;
345 break;
346 }
341 } 347 }
342 } 348 }
343 349
@@ -383,8 +389,12 @@ static int axnet_suspend(struct pcmcia_device *link)
383static int axnet_resume(struct pcmcia_device *link) 389static int axnet_resume(struct pcmcia_device *link)
384{ 390{
385 struct net_device *dev = link->priv; 391 struct net_device *dev = link->priv;
392 axnet_dev_t *info = PRIV(dev);
386 393
387 if (link->open) { 394 if (link->open) {
395 if (info->active_low == 1)
396 pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
397
388 axnet_reset_8390(dev); 398 axnet_reset_8390(dev);
389 AX88190_init(dev, 1); 399 AX88190_init(dev, 1);
390 netif_device_attach(dev); 400 netif_device_attach(dev);
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 7a298cdf9ab..a3dcd04be22 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -1450,7 +1450,6 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1450 netdev->irq = adapter->msix_entries[0].vector; 1450 netdev->irq = adapter->msix_entries[0].vector;
1451 1451
1452 netif_carrier_off(netdev); 1452 netif_carrier_off(netdev);
1453 netif_stop_queue(netdev);
1454 1453
1455 err = register_netdev(netdev); 1454 err = register_netdev(netdev);
1456 if (err) { 1455 if (err) {
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index d88ce9fb1cb..4c4d16905ef 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -846,10 +846,10 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
846 else 846 else
847 tp->features &= ~RTL_FEATURE_WOL; 847 tp->features &= ~RTL_FEATURE_WOL;
848 __rtl8169_set_wol(tp, wol->wolopts); 848 __rtl8169_set_wol(tp, wol->wolopts);
849 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
850
851 spin_unlock_irq(&tp->lock); 849 spin_unlock_irq(&tp->lock);
852 850
851 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
852
853 return 0; 853 return 0;
854} 854}
855 855
@@ -2931,7 +2931,7 @@ static const struct rtl_cfg_info {
2931 .hw_start = rtl_hw_start_8168, 2931 .hw_start = rtl_hw_start_8168,
2932 .region = 2, 2932 .region = 2,
2933 .align = 8, 2933 .align = 8,
2934 .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow | 2934 .intr_event = SYSErr | LinkChg | RxOverflow |
2935 TxErr | TxOK | RxOK | RxErr, 2935 TxErr | TxOK | RxOK | RxErr,
2936 .napi_event = TxErr | TxOK | RxOK | RxOverflow, 2936 .napi_event = TxErr | TxOK | RxOK | RxOverflow,
2937 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, 2937 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
@@ -4588,7 +4588,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4588 } 4588 }
4589 4589
4590 /* Work around for rx fifo overflow */ 4590 /* Work around for rx fifo overflow */
4591 if (unlikely(status & RxFIFOOver)) { 4591 if (unlikely(status & RxFIFOOver) &&
4592 (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
4592 netif_stop_queue(dev); 4593 netif_stop_queue(dev);
4593 rtl8169_tx_timeout(dev); 4594 rtl8169_tx_timeout(dev);
4594 break; 4595 break;
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index bfec2e0f527..220e0398f1d 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3858,7 +3858,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3858 3858
3859 /* device is off until link detection */ 3859 /* device is off until link detection */
3860 netif_carrier_off(dev); 3860 netif_carrier_off(dev);
3861 netif_stop_queue(dev);
3862 3861
3863 return dev; 3862 return dev;
3864} 3863}
diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h
index 52f38e12a87..50f712e99e9 100644
--- a/drivers/net/smsc911x.h
+++ b/drivers/net/smsc911x.h
@@ -22,7 +22,7 @@
22#define __SMSC911X_H__ 22#define __SMSC911X_H__
23 23
24#define TX_FIFO_LOW_THRESHOLD ((u32)1600) 24#define TX_FIFO_LOW_THRESHOLD ((u32)1600)
25#define SMSC911X_EEPROM_SIZE ((u32)7) 25#define SMSC911X_EEPROM_SIZE ((u32)128)
26#define USE_DEBUG 0 26#define USE_DEBUG 0
27 27
28/* This is the maximum number of packets to be received every 28/* This is the maximum number of packets to be received every
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 28e1ffb13db..c78a50586c1 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -2021,7 +2021,6 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2021 de->media_timer.data = (unsigned long) de; 2021 de->media_timer.data = (unsigned long) de;
2022 2022
2023 netif_carrier_off(dev); 2023 netif_carrier_off(dev);
2024 netif_stop_queue(dev);
2025 2024
2026 /* wake up device, assign resources */ 2025 /* wake up device, assign resources */
2027 rc = pci_enable_device(pdev); 2026 rc = pci_enable_device(pdev);
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index a4c3f570824..acbdab3d66c 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2050,12 +2050,16 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
2050 2050
2051 ugeth_vdbg("%s: IN", __func__); 2051 ugeth_vdbg("%s: IN", __func__);
2052 2052
2053 /*
2054 * Tell the kernel the link is down.
2055 * Must be done before disabling the controller
2056 * or deadlock may happen.
2057 */
2058 phy_stop(phydev);
2059
2053 /* Disable the controller */ 2060 /* Disable the controller */
2054 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); 2061 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
2055 2062
2056 /* Tell the kernel the link is down */
2057 phy_stop(phydev);
2058
2059 /* Mask all interrupts */ 2063 /* Mask all interrupts */
2060 out_be32(ugeth->uccf->p_uccm, 0x00000000); 2064 out_be32(ugeth->uccf->p_uccm, 0x00000000);
2061 2065
@@ -2065,9 +2069,6 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
2065 /* Disable Rx and Tx */ 2069 /* Disable Rx and Tx */
2066 clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); 2070 clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
2067 2071
2068 phy_disconnect(ugeth->phydev);
2069 ugeth->phydev = NULL;
2070
2071 ucc_geth_memclean(ugeth); 2072 ucc_geth_memclean(ugeth);
2072} 2073}
2073 2074
@@ -3550,7 +3551,10 @@ static int ucc_geth_close(struct net_device *dev)
3550 3551
3551 napi_disable(&ugeth->napi); 3552 napi_disable(&ugeth->napi);
3552 3553
3554 cancel_work_sync(&ugeth->timeout_work);
3553 ucc_geth_stop(ugeth); 3555 ucc_geth_stop(ugeth);
3556 phy_disconnect(ugeth->phydev);
3557 ugeth->phydev = NULL;
3554 3558
3555 free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); 3559 free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
3556 3560
@@ -3579,8 +3583,12 @@ static void ucc_geth_timeout_work(struct work_struct *work)
3579 * Must reset MAC *and* PHY. This is done by reopening 3583 * Must reset MAC *and* PHY. This is done by reopening
3580 * the device. 3584 * the device.
3581 */ 3585 */
3582 ucc_geth_close(dev); 3586 netif_tx_stop_all_queues(dev);
3583 ucc_geth_open(dev); 3587 ucc_geth_stop(ugeth);
3588 ucc_geth_init_mac(ugeth);
3589 /* Must start PHY here */
3590 phy_start(ugeth->phydev);
3591 netif_tx_start_all_queues(dev);
3584 } 3592 }
3585 3593
3586 netif_tx_schedule_all(dev); 3594 netif_tx_schedule_all(dev);
@@ -3594,7 +3602,6 @@ static void ucc_geth_timeout(struct net_device *dev)
3594{ 3602{
3595 struct ucc_geth_private *ugeth = netdev_priv(dev); 3603 struct ucc_geth_private *ugeth = netdev_priv(dev);
3596 3604
3597 netif_carrier_off(dev);
3598 schedule_work(&ugeth->timeout_work); 3605 schedule_work(&ugeth->timeout_work);
3599} 3606}
3600 3607
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ca7fc9df1cc..c04d49e31f8 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -45,6 +45,7 @@
45#include <linux/usb/usbnet.h> 45#include <linux/usb/usbnet.h>
46#include <linux/slab.h> 46#include <linux/slab.h>
47#include <linux/kernel.h> 47#include <linux/kernel.h>
48#include <linux/pm_runtime.h>
48 49
49#define DRIVER_VERSION "22-Aug-2005" 50#define DRIVER_VERSION "22-Aug-2005"
50 51
@@ -1273,6 +1274,16 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1273 struct usb_device *xdev; 1274 struct usb_device *xdev;
1274 int status; 1275 int status;
1275 const char *name; 1276 const char *name;
1277 struct usb_driver *driver = to_usb_driver(udev->dev.driver);
1278
1279 /* usbnet already took usb runtime pm, so have to enable the feature
1280 * for usb interface, otherwise usb_autopm_get_interface may return
1281 * failure if USB_SUSPEND(RUNTIME_PM) is enabled.
1282 */
1283 if (!driver->supports_autosuspend) {
1284 driver->supports_autosuspend = 1;
1285 pm_runtime_enable(&udev->dev);
1286 }
1276 1287
1277 name = udev->dev.driver->name; 1288 name = udev->dev.driver->name;
1278 info = (struct driver_info *) prod->driver_info; 1289 info = (struct driver_info *) prod->driver_info;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bb6b67f6b0c..b6d402806ae 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -986,9 +986,15 @@ static int virtnet_probe(struct virtio_device *vdev)
986 goto unregister; 986 goto unregister;
987 } 987 }
988 988
989 vi->status = VIRTIO_NET_S_LINK_UP; 989 /* Assume link up if device can't report link status,
990 virtnet_update_status(vi); 990 otherwise get link status from config. */
991 netif_carrier_on(dev); 991 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
992 netif_carrier_off(dev);
993 virtnet_update_status(vi);
994 } else {
995 vi->status = VIRTIO_NET_S_LINK_UP;
996 netif_carrier_on(dev);
997 }
992 998
993 pr_debug("virtnet: registered device %s\n", dev->name); 999 pr_debug("virtnet: registered device %s\n", dev->name);
994 return 0; 1000 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index a0471f2e1c7..48261b7252d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -410,6 +410,9 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
410 val &= ~(AR_WA_BIT6 | AR_WA_BIT7); 410 val &= ~(AR_WA_BIT6 | AR_WA_BIT7);
411 } 411 }
412 412
413 if (AR_SREV_9280(ah))
414 val |= AR_WA_BIT22;
415
413 if (AR_SREV_9285E_20(ah)) 416 if (AR_SREV_9285E_20(ah))
414 val |= AR_WA_BIT23; 417 val |= AR_WA_BIT23;
415 418
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 9b8e7e3fceb..170d44a35cc 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -675,6 +675,7 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
675} 675}
676 676
677extern struct ieee80211_ops ath9k_ops; 677extern struct ieee80211_ops ath9k_ops;
678extern struct pm_qos_request_list ath9k_pm_qos_req;
678extern int modparam_nohwcrypt; 679extern int modparam_nohwcrypt;
679extern int led_blink; 680extern int led_blink;
680 681
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 6576f683dba..f7ec31b4ddd 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -35,6 +35,7 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
35 { USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */ 35 { USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */
36 { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */ 36 { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */
37 { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */ 37 { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */
38 { USB_DEVICE(0x13D3, 0x3346) }, /* IMC Networks */
38 { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */ 39 { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
39 { USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */ 40 { USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */
40 { }, 41 { },
@@ -540,11 +541,11 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
540 return; 541 return;
541 } 542 }
542 543
543 usb_fill_int_urb(urb, hif_dev->udev, 544 usb_fill_bulk_urb(urb, hif_dev->udev,
544 usb_rcvbulkpipe(hif_dev->udev, 545 usb_rcvbulkpipe(hif_dev->udev,
545 USB_REG_IN_PIPE), 546 USB_REG_IN_PIPE),
546 nskb->data, MAX_REG_IN_BUF_SIZE, 547 nskb->data, MAX_REG_IN_BUF_SIZE,
547 ath9k_hif_usb_reg_in_cb, nskb, 1); 548 ath9k_hif_usb_reg_in_cb, nskb);
548 549
549 ret = usb_submit_urb(urb, GFP_ATOMIC); 550 ret = usb_submit_urb(urb, GFP_ATOMIC);
550 if (ret) { 551 if (ret) {
@@ -720,11 +721,11 @@ static int ath9k_hif_usb_alloc_reg_in_urb(struct hif_device_usb *hif_dev)
720 if (!skb) 721 if (!skb)
721 goto err; 722 goto err;
722 723
723 usb_fill_int_urb(hif_dev->reg_in_urb, hif_dev->udev, 724 usb_fill_bulk_urb(hif_dev->reg_in_urb, hif_dev->udev,
724 usb_rcvbulkpipe(hif_dev->udev, 725 usb_rcvbulkpipe(hif_dev->udev,
725 USB_REG_IN_PIPE), 726 USB_REG_IN_PIPE),
726 skb->data, MAX_REG_IN_BUF_SIZE, 727 skb->data, MAX_REG_IN_BUF_SIZE,
727 ath9k_hif_usb_reg_in_cb, skb, 1); 728 ath9k_hif_usb_reg_in_cb, skb);
728 729
729 if (usb_submit_urb(hif_dev->reg_in_urb, GFP_KERNEL) != 0) 730 if (usb_submit_urb(hif_dev->reg_in_urb, GFP_KERNEL) != 0)
730 goto err; 731 goto err;
@@ -843,14 +844,6 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
843 goto err_fw_req; 844 goto err_fw_req;
844 } 845 }
845 846
846 /* Alloc URBs */
847 ret = ath9k_hif_usb_alloc_urbs(hif_dev);
848 if (ret) {
849 dev_err(&hif_dev->udev->dev,
850 "ath9k_htc: Unable to allocate URBs\n");
851 goto err_urb;
852 }
853
854 /* Download firmware */ 847 /* Download firmware */
855 ret = ath9k_hif_usb_download_fw(hif_dev); 848 ret = ath9k_hif_usb_download_fw(hif_dev);
856 if (ret) { 849 if (ret) {
@@ -866,16 +859,22 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
866 */ 859 */
867 for (idx = 0; idx < alt->desc.bNumEndpoints; idx++) { 860 for (idx = 0; idx < alt->desc.bNumEndpoints; idx++) {
868 endp = &alt->endpoint[idx].desc; 861 endp = &alt->endpoint[idx].desc;
869 if (((endp->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) 862 if ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
870 == 0x04) && 863 == USB_ENDPOINT_XFER_INT) {
871 ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
872 == USB_ENDPOINT_XFER_INT)) {
873 endp->bmAttributes &= ~USB_ENDPOINT_XFERTYPE_MASK; 864 endp->bmAttributes &= ~USB_ENDPOINT_XFERTYPE_MASK;
874 endp->bmAttributes |= USB_ENDPOINT_XFER_BULK; 865 endp->bmAttributes |= USB_ENDPOINT_XFER_BULK;
875 endp->bInterval = 0; 866 endp->bInterval = 0;
876 } 867 }
877 } 868 }
878 869
870 /* Alloc URBs */
871 ret = ath9k_hif_usb_alloc_urbs(hif_dev);
872 if (ret) {
873 dev_err(&hif_dev->udev->dev,
874 "ath9k_htc: Unable to allocate URBs\n");
875 goto err_urb;
876 }
877
879 return 0; 878 return 0;
880 879
881err_fw_download: 880err_fw_download:
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index cc13ee11782..6ebc68bca91 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -484,6 +484,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
484 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, 484 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
485 "Failed allocating banks for " 485 "Failed allocating banks for "
486 "external radio\n"); 486 "external radio\n");
487 ath9k_hw_rf_free_ext_banks(ah);
487 return ecode; 488 return ecode;
488 } 489 }
489 490
@@ -952,9 +953,12 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
952 REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); 953 REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
953 break; 954 break;
954 case NL80211_IFTYPE_STATION: 955 case NL80211_IFTYPE_STATION:
955 case NL80211_IFTYPE_MONITOR:
956 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE); 956 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE);
957 break; 957 break;
958 default:
959 if (ah->is_monitoring)
960 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE);
961 break;
958 } 962 }
959} 963}
960 964
@@ -1634,7 +1638,6 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
1634 1638
1635 switch (ah->opmode) { 1639 switch (ah->opmode) {
1636 case NL80211_IFTYPE_STATION: 1640 case NL80211_IFTYPE_STATION:
1637 case NL80211_IFTYPE_MONITOR:
1638 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon)); 1641 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
1639 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff); 1642 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff);
1640 REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff); 1643 REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff);
@@ -1663,6 +1666,14 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
1663 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN; 1666 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
1664 break; 1667 break;
1665 default: 1668 default:
1669 if (ah->is_monitoring) {
1670 REG_WRITE(ah, AR_NEXT_TBTT_TIMER,
1671 TU_TO_USEC(next_beacon));
1672 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff);
1673 REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff);
1674 flags |= AR_TBTT_TIMER_EN;
1675 break;
1676 }
1666 ath_print(ath9k_hw_common(ah), ATH_DBG_BEACON, 1677 ath_print(ath9k_hw_common(ah), ATH_DBG_BEACON,
1667 "%s: unsupported opmode: %d\n", 1678 "%s: unsupported opmode: %d\n",
1668 __func__, ah->opmode); 1679 __func__, ah->opmode);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index d032939768b..d47d1b4b600 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -622,6 +622,7 @@ struct ath_hw {
622 622
623 bool sw_mgmt_crypto; 623 bool sw_mgmt_crypto;
624 bool is_pciexpress; 624 bool is_pciexpress;
625 bool is_monitoring;
625 bool need_an_top2_fixup; 626 bool need_an_top2_fixup;
626 u16 tx_trig_level; 627 u16 tx_trig_level;
627 628
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 95b41db0d86..6a0d99eff40 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/pm_qos_params.h>
18 19
19#include "ath9k.h" 20#include "ath9k.h"
20 21
@@ -179,6 +180,8 @@ static const struct ath_ops ath9k_common_ops = {
179 .write = ath9k_iowrite32, 180 .write = ath9k_iowrite32,
180}; 181};
181 182
183struct pm_qos_request_list ath9k_pm_qos_req;
184
182/**************************/ 185/**************************/
183/* Initialization */ 186/* Initialization */
184/**************************/ 187/**************************/
@@ -756,6 +759,9 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
756 ath_init_leds(sc); 759 ath_init_leds(sc);
757 ath_start_rfkill_poll(sc); 760 ath_start_rfkill_poll(sc);
758 761
762 pm_qos_add_request(&ath9k_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
763 PM_QOS_DEFAULT_VALUE);
764
759 return 0; 765 return 0;
760 766
761error_world: 767error_world:
@@ -811,6 +817,8 @@ void ath9k_deinit_device(struct ath_softc *sc)
811 817
812 ath9k_ps_wakeup(sc); 818 ath9k_ps_wakeup(sc);
813 819
820 pm_qos_remove_request(&ath9k_pm_qos_req);
821
814 wiphy_rfkill_stop_polling(sc->hw->wiphy); 822 wiphy_rfkill_stop_polling(sc->hw->wiphy);
815 ath_deinit_leds(sc); 823 ath_deinit_leds(sc);
816 824
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index b52f1cf8a60..25d3ef4c338 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include <linux/nl80211.h> 17#include <linux/nl80211.h>
18#include <linux/pm_qos_params.h>
18#include "ath9k.h" 19#include "ath9k.h"
19#include "btcoex.h" 20#include "btcoex.h"
20 21
@@ -93,11 +94,13 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
93{ 94{
94 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 95 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
95 unsigned long flags; 96 unsigned long flags;
97 enum ath9k_power_mode power_mode;
96 98
97 spin_lock_irqsave(&sc->sc_pm_lock, flags); 99 spin_lock_irqsave(&sc->sc_pm_lock, flags);
98 if (++sc->ps_usecount != 1) 100 if (++sc->ps_usecount != 1)
99 goto unlock; 101 goto unlock;
100 102
103 power_mode = sc->sc_ah->power_mode;
101 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); 104 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
102 105
103 /* 106 /*
@@ -105,10 +108,12 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
105 * useful data. Better clear them now so that they don't mess up 108 * useful data. Better clear them now so that they don't mess up
106 * survey data results. 109 * survey data results.
107 */ 110 */
108 spin_lock(&common->cc_lock); 111 if (power_mode != ATH9K_PM_AWAKE) {
109 ath_hw_cycle_counters_update(common); 112 spin_lock(&common->cc_lock);
110 memset(&common->cc_survey, 0, sizeof(common->cc_survey)); 113 ath_hw_cycle_counters_update(common);
111 spin_unlock(&common->cc_lock); 114 memset(&common->cc_survey, 0, sizeof(common->cc_survey));
115 spin_unlock(&common->cc_lock);
116 }
112 117
113 unlock: 118 unlock:
114 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 119 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
@@ -1217,6 +1222,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
1217 ah->imask |= ATH9K_INT_CST; 1222 ah->imask |= ATH9K_INT_CST;
1218 1223
1219 sc->sc_flags &= ~SC_OP_INVALID; 1224 sc->sc_flags &= ~SC_OP_INVALID;
1225 sc->sc_ah->is_monitoring = false;
1220 1226
1221 /* Disable BMISS interrupt when we're not associated */ 1227 /* Disable BMISS interrupt when we're not associated */
1222 ah->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS); 1228 ah->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
@@ -1238,6 +1244,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
1238 ath9k_btcoex_timer_resume(sc); 1244 ath9k_btcoex_timer_resume(sc);
1239 } 1245 }
1240 1246
1247 pm_qos_update_request(&ath9k_pm_qos_req, 55);
1248
1241mutex_unlock: 1249mutex_unlock:
1242 mutex_unlock(&sc->mutex); 1250 mutex_unlock(&sc->mutex);
1243 1251
@@ -1415,6 +1423,8 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1415 1423
1416 sc->sc_flags |= SC_OP_INVALID; 1424 sc->sc_flags |= SC_OP_INVALID;
1417 1425
1426 pm_qos_update_request(&ath9k_pm_qos_req, PM_QOS_DEFAULT_VALUE);
1427
1418 mutex_unlock(&sc->mutex); 1428 mutex_unlock(&sc->mutex);
1419 1429
1420 ath_print(common, ATH_DBG_CONFIG, "Driver halt\n"); 1430 ath_print(common, ATH_DBG_CONFIG, "Driver halt\n");
@@ -1493,8 +1503,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1493 ath9k_hw_set_interrupts(ah, ah->imask); 1503 ath9k_hw_set_interrupts(ah, ah->imask);
1494 1504
1495 if (vif->type == NL80211_IFTYPE_AP || 1505 if (vif->type == NL80211_IFTYPE_AP ||
1496 vif->type == NL80211_IFTYPE_ADHOC || 1506 vif->type == NL80211_IFTYPE_ADHOC) {
1497 vif->type == NL80211_IFTYPE_MONITOR) {
1498 sc->sc_flags |= SC_OP_ANI_RUN; 1507 sc->sc_flags |= SC_OP_ANI_RUN;
1499 ath_start_ani(common); 1508 ath_start_ani(common);
1500 } 1509 }
@@ -1644,8 +1653,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1644 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 1653 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
1645 if (conf->flags & IEEE80211_CONF_MONITOR) { 1654 if (conf->flags & IEEE80211_CONF_MONITOR) {
1646 ath_print(common, ATH_DBG_CONFIG, 1655 ath_print(common, ATH_DBG_CONFIG,
1647 "HW opmode set to Monitor mode\n"); 1656 "Monitor mode is enabled\n");
1648 sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR; 1657 sc->sc_ah->is_monitoring = true;
1658 } else {
1659 ath_print(common, ATH_DBG_CONFIG,
1660 "Monitor mode is disabled\n");
1661 sc->sc_ah->is_monitoring = false;
1649 } 1662 }
1650 } 1663 }
1651 1664
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index fddb0129bb5..c76ea53c20c 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -441,7 +441,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
441 */ 441 */
442 if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && 442 if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) &&
443 (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || 443 (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) ||
444 (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR)) 444 (sc->sc_ah->is_monitoring))
445 rfilt |= ATH9K_RX_FILTER_PROM; 445 rfilt |= ATH9K_RX_FILTER_PROM;
446 446
447 if (sc->rx.rxfilter & FIF_CONTROL) 447 if (sc->rx.rxfilter & FIF_CONTROL)
@@ -897,7 +897,7 @@ static bool ath9k_rx_accept(struct ath_common *common,
897 * decryption and MIC failures. For monitor mode, 897 * decryption and MIC failures. For monitor mode,
898 * we also ignore the CRC error. 898 * we also ignore the CRC error.
899 */ 899 */
900 if (ah->opmode == NL80211_IFTYPE_MONITOR) { 900 if (ah->is_monitoring) {
901 if (rx_stats->rs_status & 901 if (rx_stats->rs_status &
902 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 902 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
903 ATH9K_RXERR_CRC)) 903 ATH9K_RXERR_CRC))
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 42976b0a01c..fa05b711e5c 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -703,6 +703,7 @@
703#define AR_WA_RESET_EN (1 << 18) /* Sw Control to enable PCI-Reset to POR (bit 15) */ 703#define AR_WA_RESET_EN (1 << 18) /* Sw Control to enable PCI-Reset to POR (bit 15) */
704#define AR_WA_ANALOG_SHIFT (1 << 20) 704#define AR_WA_ANALOG_SHIFT (1 << 20)
705#define AR_WA_POR_SHORT (1 << 21) /* PCI-E Phy reset control */ 705#define AR_WA_POR_SHORT (1 << 21) /* PCI-E Phy reset control */
706#define AR_WA_BIT22 (1 << 22)
706#define AR9285_WA_DEFAULT 0x004a050b 707#define AR9285_WA_DEFAULT 0x004a050b
707#define AR9280_WA_DEFAULT 0x0040073b 708#define AR9280_WA_DEFAULT 0x0040073b
708#define AR_WA_DEFAULT 0x0000073f 709#define AR_WA_DEFAULT 0x0000073f
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index d8607f4c144..3317039cd28 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -82,9 +82,11 @@ static struct usb_device_id carl9170_usb_ids[] = {
82 { USB_DEVICE(0x07d1, 0x3c10) }, 82 { USB_DEVICE(0x07d1, 0x3c10) },
83 /* D-Link DWA 160 A2 */ 83 /* D-Link DWA 160 A2 */
84 { USB_DEVICE(0x07d1, 0x3a09) }, 84 { USB_DEVICE(0x07d1, 0x3a09) },
85 /* D-Link DWA 130 D */
86 { USB_DEVICE(0x07d1, 0x3a0f) },
85 /* Netgear WNA1000 */ 87 /* Netgear WNA1000 */
86 { USB_DEVICE(0x0846, 0x9040) }, 88 { USB_DEVICE(0x0846, 0x9040) },
87 /* Netgear WNDA3100 */ 89 /* Netgear WNDA3100 (v1) */
88 { USB_DEVICE(0x0846, 0x9010) }, 90 { USB_DEVICE(0x0846, 0x9010) },
89 /* Netgear WN111 v2 */ 91 /* Netgear WN111 v2 */
90 { USB_DEVICE(0x0846, 0x9001), .driver_info = CARL9170_ONE_LED }, 92 { USB_DEVICE(0x0846, 0x9001), .driver_info = CARL9170_ONE_LED },
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index 32dee2ce5d3..d5ef696298e 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -54,6 +54,7 @@
54 54
55#define DRV_DESCRIPTION "802.11 data/management/control stack" 55#define DRV_DESCRIPTION "802.11 data/management/control stack"
56#define DRV_NAME "libipw" 56#define DRV_NAME "libipw"
57#define DRV_PROCNAME "ieee80211"
57#define DRV_VERSION LIBIPW_VERSION 58#define DRV_VERSION LIBIPW_VERSION
58#define DRV_COPYRIGHT "Copyright (C) 2004-2005 Intel Corporation <jketreno@linux.intel.com>" 59#define DRV_COPYRIGHT "Copyright (C) 2004-2005 Intel Corporation <jketreno@linux.intel.com>"
59 60
@@ -293,16 +294,16 @@ static int __init libipw_init(void)
293 struct proc_dir_entry *e; 294 struct proc_dir_entry *e;
294 295
295 libipw_debug_level = debug; 296 libipw_debug_level = debug;
296 libipw_proc = proc_mkdir("ieee80211", init_net.proc_net); 297 libipw_proc = proc_mkdir(DRV_PROCNAME, init_net.proc_net);
297 if (libipw_proc == NULL) { 298 if (libipw_proc == NULL) {
298 LIBIPW_ERROR("Unable to create " DRV_NAME 299 LIBIPW_ERROR("Unable to create " DRV_PROCNAME
299 " proc directory\n"); 300 " proc directory\n");
300 return -EIO; 301 return -EIO;
301 } 302 }
302 e = proc_create("debug_level", S_IRUGO | S_IWUSR, libipw_proc, 303 e = proc_create("debug_level", S_IRUGO | S_IWUSR, libipw_proc,
303 &debug_level_proc_fops); 304 &debug_level_proc_fops);
304 if (!e) { 305 if (!e) {
305 remove_proc_entry(DRV_NAME, init_net.proc_net); 306 remove_proc_entry(DRV_PROCNAME, init_net.proc_net);
306 libipw_proc = NULL; 307 libipw_proc = NULL;
307 return -EIO; 308 return -EIO;
308 } 309 }
@@ -319,7 +320,7 @@ static void __exit libipw_exit(void)
319#ifdef CONFIG_LIBIPW_DEBUG 320#ifdef CONFIG_LIBIPW_DEBUG
320 if (libipw_proc) { 321 if (libipw_proc) {
321 remove_proc_entry("debug_level", libipw_proc); 322 remove_proc_entry("debug_level", libipw_proc);
322 remove_proc_entry(DRV_NAME, init_net.proc_net); 323 remove_proc_entry(DRV_PROCNAME, init_net.proc_net);
323 libipw_proc = NULL; 324 libipw_proc = NULL;
324 } 325 }
325#endif /* CONFIG_LIBIPW_DEBUG */ 326#endif /* CONFIG_LIBIPW_DEBUG */
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 8f8c4b73f8b..7edf8c2fb8c 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -4000,7 +4000,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4000 * "the hard way", rather than using device's scan. 4000 * "the hard way", rather than using device's scan.
4001 */ 4001 */
4002 if (iwl3945_mod_params.disable_hw_scan) { 4002 if (iwl3945_mod_params.disable_hw_scan) {
4003 IWL_ERR(priv, "sw scan support is deprecated\n"); 4003 dev_printk(KERN_DEBUG, &(pdev->dev),
4004 "sw scan support is deprecated\n");
4004 iwl3945_hw_ops.hw_scan = NULL; 4005 iwl3945_hw_ops.hw_scan = NULL;
4005 } 4006 }
4006 4007
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 5046a000503..373930afc26 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -700,8 +700,9 @@ static void lbs_scan_worker(struct work_struct *work)
700 700
701 if (priv->scan_channel < priv->scan_req->n_channels) { 701 if (priv->scan_channel < priv->scan_req->n_channels) {
702 cancel_delayed_work(&priv->scan_work); 702 cancel_delayed_work(&priv->scan_work);
703 queue_delayed_work(priv->work_thread, &priv->scan_work, 703 if (!priv->stopping)
704 msecs_to_jiffies(300)); 704 queue_delayed_work(priv->work_thread, &priv->scan_work,
705 msecs_to_jiffies(300));
705 } 706 }
706 707
707 /* This is the final data we are about to send */ 708 /* This is the final data we are about to send */
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index f062ed58390..cb14c38caf3 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -36,6 +36,7 @@ struct lbs_private {
36 /* CFG80211 */ 36 /* CFG80211 */
37 struct wireless_dev *wdev; 37 struct wireless_dev *wdev;
38 bool wiphy_registered; 38 bool wiphy_registered;
39 bool stopping;
39 struct cfg80211_scan_request *scan_req; 40 struct cfg80211_scan_request *scan_req;
40 u8 assoc_bss[ETH_ALEN]; 41 u8 assoc_bss[ETH_ALEN];
41 u8 disassoc_reason; 42 u8 disassoc_reason;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 47ce5a6ba12..46b88b118c9 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -104,6 +104,7 @@ static int lbs_dev_open(struct net_device *dev)
104 lbs_deb_enter(LBS_DEB_NET); 104 lbs_deb_enter(LBS_DEB_NET);
105 105
106 spin_lock_irq(&priv->driver_lock); 106 spin_lock_irq(&priv->driver_lock);
107 priv->stopping = false;
107 108
108 if (priv->connect_status == LBS_CONNECTED) 109 if (priv->connect_status == LBS_CONNECTED)
109 netif_carrier_on(dev); 110 netif_carrier_on(dev);
@@ -131,10 +132,16 @@ static int lbs_eth_stop(struct net_device *dev)
131 lbs_deb_enter(LBS_DEB_NET); 132 lbs_deb_enter(LBS_DEB_NET);
132 133
133 spin_lock_irq(&priv->driver_lock); 134 spin_lock_irq(&priv->driver_lock);
135 priv->stopping = true;
134 netif_stop_queue(dev); 136 netif_stop_queue(dev);
135 spin_unlock_irq(&priv->driver_lock); 137 spin_unlock_irq(&priv->driver_lock);
136 138
137 schedule_work(&priv->mcast_work); 139 schedule_work(&priv->mcast_work);
140 cancel_delayed_work_sync(&priv->scan_work);
141 if (priv->scan_req) {
142 cfg80211_scan_done(priv->scan_req, false);
143 priv->scan_req = NULL;
144 }
138 145
139 lbs_deb_leave(LBS_DEB_NET); 146 lbs_deb_leave(LBS_DEB_NET);
140 return 0; 147 return 0;
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index eea1ef2f502..4396d4b9bfb 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -221,9 +221,6 @@ config RT2X00_LIB_LEDS
221 boolean 221 boolean
222 default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n) 222 default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n)
223 223
224comment "rt2x00 leds support disabled due to modularized LEDS_CLASS and built-in rt2x00"
225 depends on RT2X00_LIB=y && LEDS_CLASS=m
226
227config RT2X00_LIB_DEBUGFS 224config RT2X00_LIB_DEBUGFS
228 bool "Ralink debugfs support" 225 bool "Ralink debugfs support"
229 depends on RT2X00_LIB && MAC80211_DEBUGFS 226 depends on RT2X00_LIB && MAC80211_DEBUGFS
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 5624db8c9ad..003170ea2e3 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -64,17 +64,57 @@ void pci_bus_remove_resources(struct pci_bus *bus)
64 } 64 }
65} 65}
66 66
67static bool pci_bus_resource_better(struct resource *res1, bool pos1,
68 struct resource *res2, bool pos2)
69{
70 /* If exactly one is positive decode, always prefer that one */
71 if (pos1 != pos2)
72 return pos1 ? true : false;
73
74 /* Prefer the one that contains the highest address */
75 if (res1->end != res2->end)
76 return (res1->end > res2->end) ? true : false;
77
78 /* Otherwise, prefer the one with highest "center of gravity" */
79 if (res1->start != res2->start)
80 return (res1->start > res2->start) ? true : false;
81
82 /* Otherwise, choose one arbitrarily (but consistently) */
83 return (res1 > res2) ? true : false;
84}
85
86static bool pci_bus_resource_positive(struct pci_bus *bus, struct resource *res)
87{
88 struct pci_bus_resource *bus_res;
89
90 /*
91 * This relies on the fact that pci_bus.resource[] refers to P2P or
92 * CardBus bridge base/limit registers, which are always positively
93 * decoded. The pci_bus.resources list contains host bridge or
94 * subtractively decoded resources.
95 */
96 list_for_each_entry(bus_res, &bus->resources, list) {
97 if (bus_res->res == res)
98 return (bus_res->flags & PCI_SUBTRACTIVE_DECODE) ?
99 false : true;
100 }
101 return true;
102}
103
67/* 104/*
68 * Find the highest-address bus resource below the cursor "res". If the 105 * Find the next-best bus resource after the cursor "res". If the cursor is
69 * cursor is NULL, return the highest resource. 106 * NULL, return the best resource. "Best" means that we prefer positive
107 * decode regions over subtractive decode, then those at higher addresses.
70 */ 108 */
71static struct resource *pci_bus_find_resource_prev(struct pci_bus *bus, 109static struct resource *pci_bus_find_resource_prev(struct pci_bus *bus,
72 unsigned int type, 110 unsigned int type,
73 struct resource *res) 111 struct resource *res)
74{ 112{
113 bool res_pos, r_pos, prev_pos = false;
75 struct resource *r, *prev = NULL; 114 struct resource *r, *prev = NULL;
76 int i; 115 int i;
77 116
117 res_pos = pci_bus_resource_positive(bus, res);
78 pci_bus_for_each_resource(bus, r, i) { 118 pci_bus_for_each_resource(bus, r, i) {
79 if (!r) 119 if (!r)
80 continue; 120 continue;
@@ -82,26 +122,14 @@ static struct resource *pci_bus_find_resource_prev(struct pci_bus *bus,
82 if ((r->flags & IORESOURCE_TYPE_BITS) != type) 122 if ((r->flags & IORESOURCE_TYPE_BITS) != type)
83 continue; 123 continue;
84 124
85 /* If this resource is at or past the cursor, skip it */ 125 r_pos = pci_bus_resource_positive(bus, r);
86 if (res) { 126 if (!res || pci_bus_resource_better(res, res_pos, r, r_pos)) {
87 if (r == res) 127 if (!prev || pci_bus_resource_better(r, r_pos,
88 continue; 128 prev, prev_pos)) {
89 if (r->end > res->end) 129 prev = r;
90 continue; 130 prev_pos = r_pos;
91 if (r->end == res->end && r->start > res->start) 131 }
92 continue;
93 } 132 }
94
95 if (!prev)
96 prev = r;
97
98 /*
99 * A small resource is higher than a large one that ends at
100 * the same address.
101 */
102 if (r->end > prev->end ||
103 (r->end == prev->end && r->start > prev->start))
104 prev = r;
105 } 133 }
106 134
107 return prev; 135 return prev;
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 5becbdee402..2850e64deda 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -276,6 +276,12 @@ int __init ibmphp_access_ebda (void)
276 276
277 for (;;) { 277 for (;;) {
278 offset = next_offset; 278 offset = next_offset;
279
280 /* Make sure what we read is still in the mapped section */
281 if (WARN(offset > (ebda_sz * 1024 - 4),
282 "ibmphp_ebda: next read is beyond ebda_sz\n"))
283 break;
284
279 next_offset = readw (io_mem + offset); /* offset of next blk */ 285 next_offset = readw (io_mem + offset); /* offset of next blk */
280 286
281 offset += 2; 287 offset += 2;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index b5a7d9bfcb2..95712a375cd 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -705,17 +705,21 @@ void pci_remove_legacy_files(struct pci_bus *b)
705 705
706#ifdef HAVE_PCI_MMAP 706#ifdef HAVE_PCI_MMAP
707 707
708int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma) 708int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
709 enum pci_mmap_api mmap_api)
709{ 710{
710 unsigned long nr, start, size; 711 unsigned long nr, start, size, pci_start;
711 712
713 if (pci_resource_len(pdev, resno) == 0)
714 return 0;
712 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 715 nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
713 start = vma->vm_pgoff; 716 start = vma->vm_pgoff;
714 size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; 717 size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
715 if (start < size && size - start >= nr) 718 pci_start = (mmap_api == PCI_MMAP_SYSFS) ?
719 pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0;
720 if (start >= pci_start && start < pci_start + size &&
721 start + nr <= pci_start + size)
716 return 1; 722 return 1;
717 WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n",
718 current->comm, start, start+nr, pci_name(pdev), resno, size);
719 return 0; 723 return 0;
720} 724}
721 725
@@ -745,8 +749,15 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
745 if (i >= PCI_ROM_RESOURCE) 749 if (i >= PCI_ROM_RESOURCE)
746 return -ENODEV; 750 return -ENODEV;
747 751
748 if (!pci_mmap_fits(pdev, i, vma)) 752 if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) {
753 WARN(1, "process \"%s\" tried to map 0x%08lx bytes "
754 "at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n",
755 current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff,
756 pci_name(pdev), i,
757 (u64)pci_resource_start(pdev, i),
758 (u64)pci_resource_len(pdev, i));
749 return -EINVAL; 759 return -EINVAL;
760 }
750 761
751 /* pci_mmap_page_range() expects the same kind of entry as coming 762 /* pci_mmap_page_range() expects the same kind of entry as coming
752 * from /proc/bus/pci/ which is a "user visible" value. If this is 763 * from /proc/bus/pci/ which is a "user visible" value. If this is
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e98c8104297..710c8a29be0 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1007,6 +1007,18 @@ static int __pci_enable_device_flags(struct pci_dev *dev,
1007 int err; 1007 int err;
1008 int i, bars = 0; 1008 int i, bars = 0;
1009 1009
1010 /*
1011 * Power state could be unknown at this point, either due to a fresh
1012 * boot or a device removal call. So get the current power state
1013 * so that things like MSI message writing will behave as expected
1014 * (e.g. if the device really is in D0 at enable time).
1015 */
1016 if (dev->pm_cap) {
1017 u16 pmcsr;
1018 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1019 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1020 }
1021
1010 if (atomic_add_return(1, &dev->enable_cnt) > 1) 1022 if (atomic_add_return(1, &dev->enable_cnt) > 1)
1011 return 0; /* already enabled */ 1023 return 0; /* already enabled */
1012 1024
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index f5c7c382765..7d33f667386 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -22,8 +22,13 @@ extern void pci_remove_firmware_label_files(struct pci_dev *pdev);
22#endif 22#endif
23extern void pci_cleanup_rom(struct pci_dev *dev); 23extern void pci_cleanup_rom(struct pci_dev *dev);
24#ifdef HAVE_PCI_MMAP 24#ifdef HAVE_PCI_MMAP
25enum pci_mmap_api {
26 PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */
27 PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */
28};
25extern int pci_mmap_fits(struct pci_dev *pdev, int resno, 29extern int pci_mmap_fits(struct pci_dev *pdev, int resno,
26 struct vm_area_struct *vma); 30 struct vm_area_struct *vmai,
31 enum pci_mmap_api mmap_api);
27#endif 32#endif
28int pci_probe_reset_function(struct pci_dev *dev); 33int pci_probe_reset_function(struct pci_dev *dev);
29 34
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index 297b72c880a..ea00647f473 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -257,7 +257,7 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
257 257
258 /* Make sure the caller is mapping a real resource for this device */ 258 /* Make sure the caller is mapping a real resource for this device */
259 for (i = 0; i < PCI_ROM_RESOURCE; i++) { 259 for (i = 0; i < PCI_ROM_RESOURCE; i++) {
260 if (pci_mmap_fits(dev, i, vma)) 260 if (pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS))
261 break; 261 break;
262 } 262 }
263 263
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index a87c4985326..3a5a6fcc0ea 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -13,7 +13,6 @@
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/msi.h> 15#include <linux/msi.h>
16#include <xen/xenbus.h>
17#include <xen/interface/io/pciif.h> 16#include <xen/interface/io/pciif.h>
18#include <asm/xen/pci.h> 17#include <asm/xen/pci.h>
19#include <linux/interrupt.h> 18#include <linux/interrupt.h>
@@ -576,8 +575,9 @@ static pci_ers_result_t pcifront_common_process(int cmd,
576 575
577 pcidev = pci_get_bus_and_slot(bus, devfn); 576 pcidev = pci_get_bus_and_slot(bus, devfn);
578 if (!pcidev || !pcidev->driver) { 577 if (!pcidev || !pcidev->driver) {
579 dev_err(&pcidev->dev, 578 dev_err(&pdev->xdev->dev, "device or AER driver is NULL\n");
580 "device or driver is NULL\n"); 579 if (pcidev)
580 pci_dev_put(pcidev);
581 return result; 581 return result;
582 } 582 }
583 pdrv = pcidev->driver; 583 pdrv = pcidev->driver;
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index 8cbfa067171..96c72e90b79 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -725,17 +725,17 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
725 725
726 return 0; 726 return 0;
727 727
728 err_out_free_res2: 728err_out_free_res2:
729 if (irq_mode == 1) 729 if (irq_mode == 1)
730 free_irq(dev->irq, socket); 730 free_irq(dev->irq, socket);
731 else 731 else
732 del_timer_sync(&socket->poll_timer); 732 del_timer_sync(&socket->poll_timer);
733 err_out_free_res: 733err_out_free_res:
734 pci_release_regions(dev); 734 pci_release_regions(dev);
735 err_out_disable: 735err_out_disable:
736 pci_disable_device(dev); 736 pci_disable_device(dev);
737 737
738 err_out_free_mem: 738err_out_free_mem:
739 kfree(socket); 739 kfree(socket);
740 return ret; 740 return ret;
741} 741}
diff --git a/drivers/pcmcia/pd6729.h b/drivers/pcmcia/pd6729.h
index 41418d394c5..c8e84bdece3 100644
--- a/drivers/pcmcia/pd6729.h
+++ b/drivers/pcmcia/pd6729.h
@@ -15,7 +15,7 @@
15struct pd6729_socket { 15struct pd6729_socket {
16 int number; 16 int number;
17 int card_irq; 17 int card_irq;
18 unsigned long io_base; /* base io address of the socket */ 18 unsigned long io_base; /* base io address of the socket */
19 struct pcmcia_socket socket; 19 struct pcmcia_socket socket;
20 struct timer_list poll_timer; 20 struct timer_list poll_timer;
21}; 21};
diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c
index 0ea3b29440e..81af2b3bcc0 100644
--- a/drivers/pcmcia/pxa2xx_sharpsl.c
+++ b/drivers/pcmcia/pxa2xx_sharpsl.c
@@ -237,7 +237,7 @@ static struct pcmcia_low_level sharpsl_pcmcia_ops __initdata = {
237#ifdef CONFIG_SA1100_COLLIE 237#ifdef CONFIG_SA1100_COLLIE
238#include "sa11xx_base.h" 238#include "sa11xx_base.h"
239 239
240int __init pcmcia_collie_init(struct device *dev) 240int __devinit pcmcia_collie_init(struct device *dev)
241{ 241{
242 int ret = -ENODEV; 242 int ret = -ENODEV;
243 243
diff --git a/drivers/pcmcia/sa1100_assabet.c b/drivers/pcmcia/sa1100_assabet.c
index fd013a1ef47..f1e882272ab 100644
--- a/drivers/pcmcia/sa1100_assabet.c
+++ b/drivers/pcmcia/sa1100_assabet.c
@@ -130,7 +130,7 @@ static struct pcmcia_low_level assabet_pcmcia_ops = {
130 .socket_suspend = assabet_pcmcia_socket_suspend, 130 .socket_suspend = assabet_pcmcia_socket_suspend,
131}; 131};
132 132
133int pcmcia_assabet_init(struct device *dev) 133int __devinit pcmcia_assabet_init(struct device *dev)
134{ 134{
135 int ret = -ENODEV; 135 int ret = -ENODEV;
136 136
diff --git a/drivers/pcmcia/sa1100_cerf.c b/drivers/pcmcia/sa1100_cerf.c
index 9bf088b1727..30560df8c76 100644
--- a/drivers/pcmcia/sa1100_cerf.c
+++ b/drivers/pcmcia/sa1100_cerf.c
@@ -97,7 +97,7 @@ static struct pcmcia_low_level cerf_pcmcia_ops = {
97 .socket_suspend = cerf_pcmcia_socket_suspend, 97 .socket_suspend = cerf_pcmcia_socket_suspend,
98}; 98};
99 99
100int __init pcmcia_cerf_init(struct device *dev) 100int __devinit pcmcia_cerf_init(struct device *dev)
101{ 101{
102 int ret = -ENODEV; 102 int ret = -ENODEV;
103 103
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index 945857f8c28..6b228590b3f 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -64,7 +64,7 @@ static int (*sa11x0_pcmcia_hw_init[])(struct device *dev) = {
64#endif 64#endif
65}; 65};
66 66
67static int sa11x0_drv_pcmcia_probe(struct platform_device *dev) 67static int __devinit sa11x0_drv_pcmcia_probe(struct platform_device *dev)
68{ 68{
69 int i, ret = -ENODEV; 69 int i, ret = -ENODEV;
70 70
diff --git a/drivers/pcmcia/sa1100_h3600.c b/drivers/pcmcia/sa1100_h3600.c
index 56329ad575a..edf8f002889 100644
--- a/drivers/pcmcia/sa1100_h3600.c
+++ b/drivers/pcmcia/sa1100_h3600.c
@@ -219,7 +219,7 @@ struct pcmcia_low_level h3600_pcmcia_ops = {
219 .socket_suspend = h3600_pcmcia_socket_suspend, 219 .socket_suspend = h3600_pcmcia_socket_suspend,
220}; 220};
221 221
222int __init pcmcia_h3600_init(struct device *dev) 222int __devinit pcmcia_h3600_init(struct device *dev)
223{ 223{
224 int ret = -ENODEV; 224 int ret = -ENODEV;
225 225
diff --git a/drivers/pcmcia/sa1100_shannon.c b/drivers/pcmcia/sa1100_shannon.c
index c4d51867a05..7ff1b43540b 100644
--- a/drivers/pcmcia/sa1100_shannon.c
+++ b/drivers/pcmcia/sa1100_shannon.c
@@ -113,7 +113,7 @@ static struct pcmcia_low_level shannon_pcmcia_ops = {
113 .socket_suspend = shannon_pcmcia_socket_suspend, 113 .socket_suspend = shannon_pcmcia_socket_suspend,
114}; 114};
115 115
116int __init pcmcia_shannon_init(struct device *dev) 116int __devinit pcmcia_shannon_init(struct device *dev)
117{ 117{
118 int ret = -ENODEV; 118 int ret = -ENODEV;
119 119
diff --git a/drivers/pcmcia/sa1100_simpad.c b/drivers/pcmcia/sa1100_simpad.c
index 05bd504e6f1..c998f7aaadb 100644
--- a/drivers/pcmcia/sa1100_simpad.c
+++ b/drivers/pcmcia/sa1100_simpad.c
@@ -123,7 +123,7 @@ static struct pcmcia_low_level simpad_pcmcia_ops = {
123 .socket_suspend = simpad_pcmcia_socket_suspend, 123 .socket_suspend = simpad_pcmcia_socket_suspend,
124}; 124};
125 125
126int __init pcmcia_simpad_init(struct device *dev) 126int __devinit pcmcia_simpad_init(struct device *dev)
127{ 127{
128 int ret = -ENODEV; 128 int ret = -ENODEV;
129 129
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index 689e3c02edb..3753fd0722e 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -57,11 +57,16 @@ module_param(pc_debug, int, 0644);
57void soc_pcmcia_debug(struct soc_pcmcia_socket *skt, const char *func, 57void soc_pcmcia_debug(struct soc_pcmcia_socket *skt, const char *func,
58 int lvl, const char *fmt, ...) 58 int lvl, const char *fmt, ...)
59{ 59{
60 struct va_format vaf;
60 va_list args; 61 va_list args;
61 if (pc_debug > lvl) { 62 if (pc_debug > lvl) {
62 printk(KERN_DEBUG "skt%u: %s: ", skt->nr, func);
63 va_start(args, fmt); 63 va_start(args, fmt);
64 vprintk(fmt, args); 64
65 vaf.fmt = fmt;
66 vaf.va = &args;
67
68 printk(KERN_DEBUG "skt%u: %s: %pV", skt->nr, func, &vaf);
69
65 va_end(args); 70 va_end(args);
66 } 71 }
67} 72}
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 68cf0c99138..7b5080c4556 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1159,11 +1159,11 @@ int __devinit rio_init_mports(void)
1159 1159
1160 list_for_each_entry(port, &rio_mports, node) { 1160 list_for_each_entry(port, &rio_mports, node) {
1161 if (!request_mem_region(port->iores.start, 1161 if (!request_mem_region(port->iores.start,
1162 port->iores.end - port->iores.start, 1162 resource_size(&port->iores),
1163 port->name)) { 1163 port->name)) {
1164 printk(KERN_ERR 1164 printk(KERN_ERR
1165 "RIO: Error requesting master port region 0x%016llx-0x%016llx\n", 1165 "RIO: Error requesting master port region 0x%016llx-0x%016llx\n",
1166 (u64)port->iores.start, (u64)port->iores.end - 1); 1166 (u64)port->iores.start, (u64)port->iores.end);
1167 rc = -ENOMEM; 1167 rc = -ENOMEM;
1168 goto out; 1168 goto out;
1169 } 1169 }
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index 359d1e04626..f0d63892264 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -35,7 +35,7 @@
35 35
36#ifdef CONFIG_SH_SECUREEDGE5410 36#ifdef CONFIG_SH_SECUREEDGE5410
37#include <asm/rtc.h> 37#include <asm/rtc.h>
38#include <mach/snapgear.h> 38#include <mach/secureedge5410.h>
39 39
40#define RTC_RESET 0x1000 40#define RTC_RESET 0x1000
41#define RTC_IODATA 0x0800 41#define RTC_IODATA 0x0800
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 5efbd5990ff..06e41ed9323 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -761,7 +761,7 @@ err_unmap:
761 clk_put(rtc->clk); 761 clk_put(rtc->clk);
762 iounmap(rtc->regbase); 762 iounmap(rtc->regbase);
763err_badmap: 763err_badmap:
764 release_resource(rtc->res); 764 release_mem_region(rtc->res->start, rtc->regsize);
765err_badres: 765err_badres:
766 kfree(rtc); 766 kfree(rtc);
767 767
@@ -786,7 +786,7 @@ static int __exit sh_rtc_remove(struct platform_device *pdev)
786 } 786 }
787 787
788 iounmap(rtc->regbase); 788 iounmap(rtc->regbase);
789 release_resource(rtc->res); 789 release_mem_region(rtc->res->start, rtc->regsize);
790 790
791 clk_disable(rtc->clk); 791 clk_disable(rtc->clk);
792 clk_put(rtc->clk); 792 clk_put(rtc->clk);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 6be43eb126b..f47a714538d 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -440,7 +440,6 @@ struct qeth_qdio_out_q {
440 * index of buffer to be filled by driver; state EMPTY or PACKING 440 * index of buffer to be filled by driver; state EMPTY or PACKING
441 */ 441 */
442 int next_buf_to_fill; 442 int next_buf_to_fill;
443 int sync_iqdio_error;
444 /* 443 /*
445 * number of buffers that are currently filled (PRIMED) 444 * number of buffers that are currently filled (PRIMED)
446 * -> these buffers are hardware-owned 445 * -> these buffers are hardware-owned
@@ -695,14 +694,6 @@ struct qeth_mc_mac {
695 int is_vmac; 694 int is_vmac;
696}; 695};
697 696
698struct qeth_skb_data {
699 __u32 magic;
700 int count;
701};
702
703#define QETH_SKB_MAGIC 0x71657468
704#define QETH_SIGA_CC2_RETRIES 3
705
706struct qeth_rx { 697struct qeth_rx {
707 int b_count; 698 int b_count;
708 int b_index; 699 int b_index;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 76426706260..e6b2df0e73f 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -877,8 +877,8 @@ out:
877 return; 877 return;
878} 878}
879 879
880static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 880static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
881 struct qeth_qdio_out_buffer *buf, unsigned int qeth_skip_skb) 881 struct qeth_qdio_out_buffer *buf)
882{ 882{
883 int i; 883 int i;
884 struct sk_buff *skb; 884 struct sk_buff *skb;
@@ -887,13 +887,11 @@ static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
887 if (buf->buffer->element[0].flags & 0x40) 887 if (buf->buffer->element[0].flags & 0x40)
888 atomic_dec(&queue->set_pci_flags_count); 888 atomic_dec(&queue->set_pci_flags_count);
889 889
890 if (!qeth_skip_skb) { 890 skb = skb_dequeue(&buf->skb_list);
891 while (skb) {
892 atomic_dec(&skb->users);
893 dev_kfree_skb_any(skb);
891 skb = skb_dequeue(&buf->skb_list); 894 skb = skb_dequeue(&buf->skb_list);
892 while (skb) {
893 atomic_dec(&skb->users);
894 dev_kfree_skb_any(skb);
895 skb = skb_dequeue(&buf->skb_list);
896 }
897 } 895 }
898 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { 896 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
899 if (buf->buffer->element[i].addr && buf->is_header[i]) 897 if (buf->buffer->element[i].addr && buf->is_header[i])
@@ -909,12 +907,6 @@ static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
909 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 907 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
910} 908}
911 909
912static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
913 struct qeth_qdio_out_buffer *buf)
914{
915 __qeth_clear_output_buffer(queue, buf, 0);
916}
917
918void qeth_clear_qdio_buffers(struct qeth_card *card) 910void qeth_clear_qdio_buffers(struct qeth_card *card)
919{ 911{
920 int i, j; 912 int i, j;
@@ -2833,7 +2825,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2833 } 2825 }
2834 } 2826 }
2835 2827
2836 queue->sync_iqdio_error = 0;
2837 queue->card->dev->trans_start = jiffies; 2828 queue->card->dev->trans_start = jiffies;
2838 if (queue->card->options.performance_stats) { 2829 if (queue->card->options.performance_stats) {
2839 queue->card->perf_stats.outbound_do_qdio_cnt++; 2830 queue->card->perf_stats.outbound_do_qdio_cnt++;
@@ -2849,10 +2840,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2849 queue->card->perf_stats.outbound_do_qdio_time += 2840 queue->card->perf_stats.outbound_do_qdio_time +=
2850 qeth_get_micros() - 2841 qeth_get_micros() -
2851 queue->card->perf_stats.outbound_do_qdio_start_time; 2842 queue->card->perf_stats.outbound_do_qdio_start_time;
2852 if (rc > 0) {
2853 if (!(rc & QDIO_ERROR_SIGA_BUSY))
2854 queue->sync_iqdio_error = rc & 3;
2855 }
2856 if (rc) { 2843 if (rc) {
2857 queue->card->stats.tx_errors += count; 2844 queue->card->stats.tx_errors += count;
2858 /* ignore temporary SIGA errors without busy condition */ 2845 /* ignore temporary SIGA errors without busy condition */
@@ -2916,7 +2903,7 @@ void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
2916{ 2903{
2917 struct qeth_card *card = (struct qeth_card *)card_ptr; 2904 struct qeth_card *card = (struct qeth_card *)card_ptr;
2918 2905
2919 if (card->dev) 2906 if (card->dev && (card->dev->flags & IFF_UP))
2920 napi_schedule(&card->napi); 2907 napi_schedule(&card->napi);
2921} 2908}
2922EXPORT_SYMBOL_GPL(qeth_qdio_start_poll); 2909EXPORT_SYMBOL_GPL(qeth_qdio_start_poll);
@@ -2940,7 +2927,6 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
2940 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; 2927 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
2941 struct qeth_qdio_out_buffer *buffer; 2928 struct qeth_qdio_out_buffer *buffer;
2942 int i; 2929 int i;
2943 unsigned qeth_send_err;
2944 2930
2945 QETH_CARD_TEXT(card, 6, "qdouhdl"); 2931 QETH_CARD_TEXT(card, 6, "qdouhdl");
2946 if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { 2932 if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
@@ -2956,9 +2942,8 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
2956 } 2942 }
2957 for (i = first_element; i < (first_element + count); ++i) { 2943 for (i = first_element; i < (first_element + count); ++i) {
2958 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; 2944 buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2959 qeth_send_err = qeth_handle_send_error(card, buffer, qdio_error); 2945 qeth_handle_send_error(card, buffer, qdio_error);
2960 __qeth_clear_output_buffer(queue, buffer, 2946 qeth_clear_output_buffer(queue, buffer);
2961 (qeth_send_err == QETH_SEND_ERROR_RETRY) ? 1 : 0);
2962 } 2947 }
2963 atomic_sub(count, &queue->used_buffers); 2948 atomic_sub(count, &queue->used_buffers);
2964 /* check if we need to do something on this outbound queue */ 2949 /* check if we need to do something on this outbound queue */
@@ -3183,10 +3168,7 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
3183 int offset, int hd_len) 3168 int offset, int hd_len)
3184{ 3169{
3185 struct qeth_qdio_out_buffer *buffer; 3170 struct qeth_qdio_out_buffer *buffer;
3186 struct sk_buff *skb1;
3187 struct qeth_skb_data *retry_ctrl;
3188 int index; 3171 int index;
3189 int rc;
3190 3172
3191 /* spin until we get the queue ... */ 3173 /* spin until we get the queue ... */
3192 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, 3174 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
@@ -3205,25 +3187,6 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
3205 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 3187 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3206 qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); 3188 qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
3207 qeth_flush_buffers(queue, index, 1); 3189 qeth_flush_buffers(queue, index, 1);
3208 if (queue->sync_iqdio_error == 2) {
3209 skb1 = skb_dequeue(&buffer->skb_list);
3210 while (skb1) {
3211 atomic_dec(&skb1->users);
3212 skb1 = skb_dequeue(&buffer->skb_list);
3213 }
3214 retry_ctrl = (struct qeth_skb_data *) &skb->cb[16];
3215 if (retry_ctrl->magic != QETH_SKB_MAGIC) {
3216 retry_ctrl->magic = QETH_SKB_MAGIC;
3217 retry_ctrl->count = 0;
3218 }
3219 if (retry_ctrl->count < QETH_SIGA_CC2_RETRIES) {
3220 retry_ctrl->count++;
3221 rc = dev_queue_xmit(skb);
3222 } else {
3223 dev_kfree_skb_any(skb);
3224 QETH_CARD_TEXT(card, 2, "qrdrop");
3225 }
3226 }
3227 return 0; 3190 return 0;
3228out: 3191out:
3229 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); 3192 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1de30eb83bb..f3cf924a2cd 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -320,19 +320,11 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
320 "changed. The Linux SCSI layer does not " 320 "changed. The Linux SCSI layer does not "
321 "automatically adjust these parameters.\n"); 321 "automatically adjust these parameters.\n");
322 322
323 if (scmd->request->cmd_flags & REQ_HARDBARRIER) 323 /*
324 /* 324 * Pass the UA upwards for a determination in the completion
325 * barrier requests should always retry on UA 325 * functions.
326 * otherwise block will get a spurious error 326 */
327 */ 327 return SUCCESS;
328 return NEEDS_RETRY;
329 else
330 /*
331 * for normal (non barrier) commands, pass the
332 * UA upwards for a determination in the
333 * completion functions
334 */
335 return SUCCESS;
336 328
337 /* these three are not supported */ 329 /* these three are not supported */
338 case COPY_ABORTED: 330 case COPY_ABORTED:
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 53be4d35a0a..842e3b2a02b 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -2285,6 +2285,8 @@ static struct pciserial_board pci_boards[] __devinitdata = {
2285 2285
2286static const struct pci_device_id softmodem_blacklist[] = { 2286static const struct pci_device_id softmodem_blacklist[] = {
2287 { PCI_VDEVICE(AL, 0x5457), }, /* ALi Corporation M5457 AC'97 Modem */ 2287 { PCI_VDEVICE(AL, 0x5457), }, /* ALi Corporation M5457 AC'97 Modem */
2288 { PCI_VDEVICE(MOTOROLA, 0x3052), }, /* Motorola Si3052-based modem */
2289 { PCI_DEVICE(0x1543, 0x3052), }, /* Si3052-based modem, default IDs */
2288}; 2290};
2289 2291
2290/* 2292/*
@@ -2863,6 +2865,9 @@ static struct pci_device_id serial_pci_tbl[] = {
2863 PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL, 2865 PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL,
2864 0, 0, 2866 0, 0,
2865 pbn_b0_4_1152000 }, 2867 pbn_b0_4_1152000 },
2868 { PCI_VENDOR_ID_OXSEMI, 0x9505,
2869 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2870 pbn_b0_bt_2_921600 },
2866 2871
2867 /* 2872 /*
2868 * The below card is a little controversial since it is the 2873 * The below card is a little controversial since it is the
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index a9eff2b18ea..19cac9f610f 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -23,6 +23,7 @@
23#include <linux/tty.h> 23#include <linux/tty.h>
24#include <linux/tty_flip.h> 24#include <linux/tty_flip.h>
25#include <linux/serial_core.h> 25#include <linux/serial_core.h>
26#include <linux/dma-mapping.h>
26 27
27#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ 28#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
28 defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) 29 defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
@@ -33,12 +34,10 @@
33#include <asm/gpio.h> 34#include <asm/gpio.h>
34#include <mach/bfin_serial_5xx.h> 35#include <mach/bfin_serial_5xx.h>
35 36
36#ifdef CONFIG_SERIAL_BFIN_DMA 37#include <asm/dma.h>
37#include <linux/dma-mapping.h>
38#include <asm/io.h> 38#include <asm/io.h>
39#include <asm/irq.h> 39#include <asm/irq.h>
40#include <asm/cacheflush.h> 40#include <asm/cacheflush.h>
41#endif
42 41
43#ifdef CONFIG_SERIAL_BFIN_MODULE 42#ifdef CONFIG_SERIAL_BFIN_MODULE
44# undef CONFIG_EARLY_PRINTK 43# undef CONFIG_EARLY_PRINTK
@@ -360,7 +359,6 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
360 UART_PUT_CHAR(uart, xmit->buf[xmit->tail]); 359 UART_PUT_CHAR(uart, xmit->buf[xmit->tail]);
361 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 360 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
362 uart->port.icount.tx++; 361 uart->port.icount.tx++;
363 SSYNC();
364 } 362 }
365 363
366 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 364 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -688,6 +686,13 @@ static int bfin_serial_startup(struct uart_port *port)
688 686
689# ifdef CONFIG_BF54x 687# ifdef CONFIG_BF54x
690 { 688 {
689 /*
690 * UART2 and UART3 on BF548 share interrupt PINs and DMA
691 * controllers with SPORT2 and SPORT3. UART rx and tx
692 * interrupts are generated in PIO mode only when configure
693 * their peripheral mapping registers properly, which means
694 * request corresponding DMA channels in PIO mode as well.
695 */
691 unsigned uart_dma_ch_rx, uart_dma_ch_tx; 696 unsigned uart_dma_ch_rx, uart_dma_ch_tx;
692 697
693 switch (uart->port.irq) { 698 switch (uart->port.irq) {
@@ -734,8 +739,7 @@ static int bfin_serial_startup(struct uart_port *port)
734 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | 739 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
735 IRQF_DISABLED, "BFIN_UART_CTS", uart)) { 740 IRQF_DISABLED, "BFIN_UART_CTS", uart)) {
736 uart->cts_pin = -1; 741 uart->cts_pin = -1;
737 pr_info("Unable to attach BlackFin UART CTS interrupt.\ 742 pr_info("Unable to attach BlackFin UART CTS interrupt. So, disable it.\n");
738 So, disable it.\n");
739 } 743 }
740 } 744 }
741 if (uart->rts_pin >= 0) { 745 if (uart->rts_pin >= 0) {
@@ -747,8 +751,7 @@ static int bfin_serial_startup(struct uart_port *port)
747 if (request_irq(uart->status_irq, 751 if (request_irq(uart->status_irq,
748 bfin_serial_mctrl_cts_int, 752 bfin_serial_mctrl_cts_int,
749 IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) { 753 IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
750 pr_info("Unable to attach BlackFin UART Modem \ 754 pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n");
751 Status interrupt.\n");
752 } 755 }
753 756
754 /* CTS RTS PINs are negative assertive. */ 757 /* CTS RTS PINs are negative assertive. */
@@ -846,6 +849,8 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
846 if (termios->c_cflag & CMSPAR) 849 if (termios->c_cflag & CMSPAR)
847 lcr |= STP; 850 lcr |= STP;
848 851
852 spin_lock_irqsave(&uart->port.lock, flags);
853
849 port->read_status_mask = OE; 854 port->read_status_mask = OE;
850 if (termios->c_iflag & INPCK) 855 if (termios->c_iflag & INPCK)
851 port->read_status_mask |= (FE | PE); 856 port->read_status_mask |= (FE | PE);
@@ -875,8 +880,6 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
875 if (termios->c_line != N_IRDA) 880 if (termios->c_line != N_IRDA)
876 quot -= ANOMALY_05000230; 881 quot -= ANOMALY_05000230;
877 882
878 spin_lock_irqsave(&uart->port.lock, flags);
879
880 UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15); 883 UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15);
881 884
882 /* Disable UART */ 885 /* Disable UART */
@@ -1321,6 +1324,14 @@ struct console __init *bfin_earlyserial_init(unsigned int port,
1321 struct bfin_serial_port *uart; 1324 struct bfin_serial_port *uart;
1322 struct ktermios t; 1325 struct ktermios t;
1323 1326
1327#ifdef CONFIG_SERIAL_BFIN_CONSOLE
1328 /*
1329 * If we are using early serial, don't let the normal console rewind
1330 * log buffer, since that causes things to be printed multiple times
1331 */
1332 bfin_serial_console.flags &= ~CON_PRINTBUFFER;
1333#endif
1334
1324 if (port == -1 || port >= nr_active_ports) 1335 if (port == -1 || port >= nr_active_ports)
1325 port = 0; 1336 port = 0;
1326 bfin_serial_init_ports(); 1337 bfin_serial_init_ports();
diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
index d4b711c9a41..3374618300a 100644
--- a/drivers/serial/kgdboc.c
+++ b/drivers/serial/kgdboc.c
@@ -18,6 +18,7 @@
18#include <linux/tty.h> 18#include <linux/tty.h>
19#include <linux/console.h> 19#include <linux/console.h>
20#include <linux/vt_kern.h> 20#include <linux/vt_kern.h>
21#include <linux/input.h>
21 22
22#define MAX_CONFIG_LEN 40 23#define MAX_CONFIG_LEN 40
23 24
@@ -37,6 +38,61 @@ static struct tty_driver *kgdb_tty_driver;
37static int kgdb_tty_line; 38static int kgdb_tty_line;
38 39
39#ifdef CONFIG_KDB_KEYBOARD 40#ifdef CONFIG_KDB_KEYBOARD
41static int kgdboc_reset_connect(struct input_handler *handler,
42 struct input_dev *dev,
43 const struct input_device_id *id)
44{
45 input_reset_device(dev);
46
47 /* Retrun an error - we do not want to bind, just to reset */
48 return -ENODEV;
49}
50
51static void kgdboc_reset_disconnect(struct input_handle *handle)
52{
53 /* We do not expect anyone to actually bind to us */
54 BUG();
55}
56
57static const struct input_device_id kgdboc_reset_ids[] = {
58 {
59 .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
60 .evbit = { BIT_MASK(EV_KEY) },
61 },
62 { }
63};
64
65static struct input_handler kgdboc_reset_handler = {
66 .connect = kgdboc_reset_connect,
67 .disconnect = kgdboc_reset_disconnect,
68 .name = "kgdboc_reset",
69 .id_table = kgdboc_reset_ids,
70};
71
72static DEFINE_MUTEX(kgdboc_reset_mutex);
73
74static void kgdboc_restore_input_helper(struct work_struct *dummy)
75{
76 /*
77 * We need to take a mutex to prevent several instances of
78 * this work running on different CPUs so they don't try
79 * to register again already registered handler.
80 */
81 mutex_lock(&kgdboc_reset_mutex);
82
83 if (input_register_handler(&kgdboc_reset_handler) == 0)
84 input_unregister_handler(&kgdboc_reset_handler);
85
86 mutex_unlock(&kgdboc_reset_mutex);
87}
88
89static DECLARE_WORK(kgdboc_restore_input_work, kgdboc_restore_input_helper);
90
91static void kgdboc_restore_input(void)
92{
93 schedule_work(&kgdboc_restore_input_work);
94}
95
40static int kgdboc_register_kbd(char **cptr) 96static int kgdboc_register_kbd(char **cptr)
41{ 97{
42 if (strncmp(*cptr, "kbd", 3) == 0) { 98 if (strncmp(*cptr, "kbd", 3) == 0) {
@@ -64,10 +120,12 @@ static void kgdboc_unregister_kbd(void)
64 i--; 120 i--;
65 } 121 }
66 } 122 }
123 flush_work_sync(&kgdboc_restore_input_work);
67} 124}
68#else /* ! CONFIG_KDB_KEYBOARD */ 125#else /* ! CONFIG_KDB_KEYBOARD */
69#define kgdboc_register_kbd(x) 0 126#define kgdboc_register_kbd(x) 0
70#define kgdboc_unregister_kbd() 127#define kgdboc_unregister_kbd()
128#define kgdboc_restore_input()
71#endif /* ! CONFIG_KDB_KEYBOARD */ 129#endif /* ! CONFIG_KDB_KEYBOARD */
72 130
73static int kgdboc_option_setup(char *opt) 131static int kgdboc_option_setup(char *opt)
@@ -231,6 +289,7 @@ static void kgdboc_post_exp_handler(void)
231 dbg_restore_graphics = 0; 289 dbg_restore_graphics = 0;
232 con_debug_leave(); 290 con_debug_leave();
233 } 291 }
292 kgdboc_restore_input();
234} 293}
235 294
236static struct kgdb_io kgdboc_io_ops = { 295static struct kgdb_io kgdboc_io_ops = {
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index fd0d1b98901..cb12a8e1466 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -90,8 +90,8 @@ struct clk_rate_round_data {
90static long clk_rate_round_helper(struct clk_rate_round_data *rounder) 90static long clk_rate_round_helper(struct clk_rate_round_data *rounder)
91{ 91{
92 unsigned long rate_error, rate_error_prev = ~0UL; 92 unsigned long rate_error, rate_error_prev = ~0UL;
93 unsigned long rate_best_fit = rounder->rate;
94 unsigned long highest, lowest, freq; 93 unsigned long highest, lowest, freq;
94 long rate_best_fit = -ENOENT;
95 int i; 95 int i;
96 96
97 highest = 0; 97 highest = 0;
@@ -146,7 +146,7 @@ long clk_rate_table_round(struct clk *clk,
146 }; 146 };
147 147
148 if (clk->nr_freqs < 1) 148 if (clk->nr_freqs < 1)
149 return 0; 149 return -ENOSYS;
150 150
151 return clk_rate_round_helper(&table_round); 151 return clk_rate_round_helper(&table_round);
152} 152}
@@ -541,6 +541,98 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
541} 541}
542EXPORT_SYMBOL_GPL(clk_round_rate); 542EXPORT_SYMBOL_GPL(clk_round_rate);
543 543
544long clk_round_parent(struct clk *clk, unsigned long target,
545 unsigned long *best_freq, unsigned long *parent_freq,
546 unsigned int div_min, unsigned int div_max)
547{
548 struct cpufreq_frequency_table *freq, *best = NULL;
549 unsigned long error = ULONG_MAX, freq_high, freq_low, div;
550 struct clk *parent = clk_get_parent(clk);
551
552 if (!parent) {
553 *parent_freq = 0;
554 *best_freq = clk_round_rate(clk, target);
555 return abs(target - *best_freq);
556 }
557
558 for (freq = parent->freq_table; freq->frequency != CPUFREQ_TABLE_END;
559 freq++) {
560 if (freq->frequency == CPUFREQ_ENTRY_INVALID)
561 continue;
562
563 if (unlikely(freq->frequency / target <= div_min - 1)) {
564 unsigned long freq_max;
565
566 freq_max = (freq->frequency + div_min / 2) / div_min;
567 if (error > target - freq_max) {
568 error = target - freq_max;
569 best = freq;
570 if (best_freq)
571 *best_freq = freq_max;
572 }
573
574 pr_debug("too low freq %u, error %lu\n", freq->frequency,
575 target - freq_max);
576
577 if (!error)
578 break;
579
580 continue;
581 }
582
583 if (unlikely(freq->frequency / target >= div_max)) {
584 unsigned long freq_min;
585
586 freq_min = (freq->frequency + div_max / 2) / div_max;
587 if (error > freq_min - target) {
588 error = freq_min - target;
589 best = freq;
590 if (best_freq)
591 *best_freq = freq_min;
592 }
593
594 pr_debug("too high freq %u, error %lu\n", freq->frequency,
595 freq_min - target);
596
597 if (!error)
598 break;
599
600 continue;
601 }
602
603 div = freq->frequency / target;
604 freq_high = freq->frequency / div;
605 freq_low = freq->frequency / (div + 1);
606
607 if (freq_high - target < error) {
608 error = freq_high - target;
609 best = freq;
610 if (best_freq)
611 *best_freq = freq_high;
612 }
613
614 if (target - freq_low < error) {
615 error = target - freq_low;
616 best = freq;
617 if (best_freq)
618 *best_freq = freq_low;
619 }
620
621 pr_debug("%u / %lu = %lu, / %lu = %lu, best %lu, parent %u\n",
622 freq->frequency, div, freq_high, div + 1, freq_low,
623 *best_freq, best->frequency);
624
625 if (!error)
626 break;
627 }
628
629 if (parent_freq)
630 *parent_freq = best->frequency;
631
632 return error;
633}
634EXPORT_SYMBOL_GPL(clk_round_parent);
635
544#ifdef CONFIG_PM 636#ifdef CONFIG_PM
545static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state) 637static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
546{ 638{
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 873a99ff8f6..e5e9e6735f7 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -79,7 +79,7 @@ static void __init intc_register_irq(struct intc_desc *desc,
79 * Register the IRQ position with the global IRQ map, then insert 79 * Register the IRQ position with the global IRQ map, then insert
80 * it in to the radix tree. 80 * it in to the radix tree.
81 */ 81 */
82 irq_reserve_irqs(irq, 1); 82 irq_reserve_irq(irq);
83 83
84 raw_spin_lock_irqsave(&intc_big_lock, flags); 84 raw_spin_lock_irqsave(&intc_big_lock, flags);
85 radix_tree_insert(&d->tree, enum_id, intc_irq_xlate_get(irq)); 85 radix_tree_insert(&d->tree, enum_id, intc_irq_xlate_get(irq));
diff --git a/drivers/sh/intc/dynamic.c b/drivers/sh/intc/dynamic.c
index 4187cce20ff..a3677c9dfe3 100644
--- a/drivers/sh/intc/dynamic.c
+++ b/drivers/sh/intc/dynamic.c
@@ -60,5 +60,5 @@ void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs)
60 int i; 60 int i;
61 61
62 for (i = 0; i < nr_vecs; i++) 62 for (i = 0; i < nr_vecs; i++)
63 irq_reserve_irqs(evt2irq(vectors[i].vect), 1); 63 irq_reserve_irq(evt2irq(vectors[i].vect));
64} 64}
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c
index e5bf5d3c698..4e0ff718116 100644
--- a/drivers/sh/intc/virq.c
+++ b/drivers/sh/intc/virq.c
@@ -215,7 +215,7 @@ restart:
215 entry = radix_tree_deref_slot((void **)entries[i]); 215 entry = radix_tree_deref_slot((void **)entries[i]);
216 if (unlikely(!entry)) 216 if (unlikely(!entry))
217 continue; 217 continue;
218 if (unlikely(entry == RADIX_TREE_RETRY)) 218 if (radix_tree_deref_retry(entry))
219 goto restart; 219 goto restart;
220 220
221 irq = create_irq(); 221 irq = create_irq();
diff --git a/drivers/staging/ath6kl/Kconfig b/drivers/staging/ath6kl/Kconfig
index ae2cdf48b74..8a5caa30b85 100644
--- a/drivers/staging/ath6kl/Kconfig
+++ b/drivers/staging/ath6kl/Kconfig
@@ -102,7 +102,7 @@ config AR600x_BT_RESET_PIN
102 102
103config ATH6KL_CFG80211 103config ATH6KL_CFG80211
104 bool "CFG80211 support" 104 bool "CFG80211 support"
105 depends on ATH6K_LEGACY 105 depends on ATH6K_LEGACY && CFG80211
106 help 106 help
107 Enables support for CFG80211 APIs. The default option is to use WEXT. Even with this option enabled, WEXT is not explicitly disabled and the onus of not exercising WEXT lies on the application(s) running in the user space. 107 Enables support for CFG80211 APIs. The default option is to use WEXT. Even with this option enabled, WEXT is not explicitly disabled and the onus of not exercising WEXT lies on the application(s) running in the user space.
108 108
diff --git a/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c b/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c
index 22c6c6659f5..ee8b47746a1 100644
--- a/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c
+++ b/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c
@@ -285,9 +285,9 @@ A_STATUS SetupHIFScatterSupport(HIF_DEVICE *device, HIF_DEVICE_SCATTER_SUPPORT_I
285 do { 285 do {
286 286
287 /* check if host supports scatter requests and it meets our requirements */ 287 /* check if host supports scatter requests and it meets our requirements */
288 if (device->func->card->host->max_hw_segs < MAX_SCATTER_ENTRIES_PER_REQ) { 288 if (device->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
289 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HIF-SCATTER : host only supports scatter of : %d entries, need: %d \n", 289 AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HIF-SCATTER : host only supports scatter of : %d entries, need: %d \n",
290 device->func->card->host->max_hw_segs, MAX_SCATTER_ENTRIES_PER_REQ)); 290 device->func->card->host->max_segs, MAX_SCATTER_ENTRIES_PER_REQ));
291 status = A_ENOTSUP; 291 status = A_ENOTSUP;
292 break; 292 break;
293 } 293 }
diff --git a/drivers/staging/ath6kl/os/linux/ar6000_drv.c b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
index c5a6d6c1673..a659f704737 100644
--- a/drivers/staging/ath6kl/os/linux/ar6000_drv.c
+++ b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
@@ -1126,7 +1126,7 @@ ar6000_transfer_bin_file(AR_SOFTC_T *ar, AR6K_BIN_FILE file, A_UINT32 address, A
1126 if ((board_ext_address) && (fw_entry->size == (board_data_size + board_ext_data_size))) { 1126 if ((board_ext_address) && (fw_entry->size == (board_data_size + board_ext_data_size))) {
1127 A_UINT32 param; 1127 A_UINT32 param;
1128 1128
1129 status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (A_UCHAR *)(((A_UINT32)fw_entry->data) + board_data_size), board_ext_data_size); 1129 status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (A_UCHAR *)(fw_entry->data + board_data_size), board_ext_data_size);
1130 1130
1131 if (status != A_OK) { 1131 if (status != A_OK) {
1132 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__)); 1132 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__));
@@ -3030,7 +3030,8 @@ ar6000_data_tx(struct sk_buff *skb, struct net_device *dev)
3030 A_UINT8 csumDest=0; 3030 A_UINT8 csumDest=0;
3031 A_UINT8 csum=skb->ip_summed; 3031 A_UINT8 csum=skb->ip_summed;
3032 if(csumOffload && (csum==CHECKSUM_PARTIAL)){ 3032 if(csumOffload && (csum==CHECKSUM_PARTIAL)){
3033 csumStart=skb->csum_start-(skb->network_header-skb->head)+sizeof(ATH_LLC_SNAP_HDR); 3033 csumStart = (skb->head + skb->csum_start - skb_network_header(skb) +
3034 sizeof(ATH_LLC_SNAP_HDR));
3034 csumDest=skb->csum_offset+csumStart; 3035 csumDest=skb->csum_offset+csumStart;
3035 } 3036 }
3036#endif 3037#endif
diff --git a/drivers/staging/ath6kl/os/linux/cfg80211.c b/drivers/staging/ath6kl/os/linux/cfg80211.c
index c94ad29eeb4..7269d0a1d61 100644
--- a/drivers/staging/ath6kl/os/linux/cfg80211.c
+++ b/drivers/staging/ath6kl/os/linux/cfg80211.c
@@ -808,7 +808,7 @@ ar6k_cfg80211_scanComplete_event(AR_SOFTC_T *ar, A_STATUS status)
808 808
809static int 809static int
810ar6k_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, 810ar6k_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
811 A_UINT8 key_index, const A_UINT8 *mac_addr, 811 A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr,
812 struct key_params *params) 812 struct key_params *params)
813{ 813{
814 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev); 814 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
@@ -901,7 +901,7 @@ ar6k_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
901 901
902static int 902static int
903ar6k_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, 903ar6k_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
904 A_UINT8 key_index, const A_UINT8 *mac_addr) 904 A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr)
905{ 905{
906 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev); 906 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
907 907
@@ -936,7 +936,8 @@ ar6k_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
936 936
937static int 937static int
938ar6k_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, 938ar6k_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
939 A_UINT8 key_index, const A_UINT8 *mac_addr, void *cookie, 939 A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr,
940 void *cookie,
940 void (*callback)(void *cookie, struct key_params*)) 941 void (*callback)(void *cookie, struct key_params*))
941{ 942{
942 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev); 943 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
diff --git a/drivers/staging/ath6kl/os/linux/include/athendpack_linux.h b/drivers/staging/ath6kl/os/linux/include/athendpack_linux.h
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/drivers/staging/ath6kl/os/linux/include/athendpack_linux.h
+++ /dev/null
diff --git a/drivers/staging/ath6kl/os/linux/include/athstartpack_linux.h b/drivers/staging/ath6kl/os/linux/include/athstartpack_linux.h
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/drivers/staging/ath6kl/os/linux/include/athstartpack_linux.h
+++ /dev/null
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index 80cfa866958..b68a7e5173b 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -165,7 +165,7 @@ static void update_mac_addresses(struct batman_if *batman_if)
165 batman_if->net_dev->dev_addr, ETH_ALEN); 165 batman_if->net_dev->dev_addr, ETH_ALEN);
166} 166}
167 167
168static void check_known_mac_addr(uint8_t *addr) 168static void check_known_mac_addr(struct net_device *net_dev)
169{ 169{
170 struct batman_if *batman_if; 170 struct batman_if *batman_if;
171 171
@@ -175,11 +175,16 @@ static void check_known_mac_addr(uint8_t *addr)
175 (batman_if->if_status != IF_TO_BE_ACTIVATED)) 175 (batman_if->if_status != IF_TO_BE_ACTIVATED))
176 continue; 176 continue;
177 177
178 if (!compare_orig(batman_if->net_dev->dev_addr, addr)) 178 if (batman_if->net_dev == net_dev)
179 continue;
180
181 if (!compare_orig(batman_if->net_dev->dev_addr,
182 net_dev->dev_addr))
179 continue; 183 continue;
180 184
181 pr_warning("The newly added mac address (%pM) already exists " 185 pr_warning("The newly added mac address (%pM) already exists "
182 "on: %s\n", addr, batman_if->net_dev->name); 186 "on: %s\n", net_dev->dev_addr,
187 batman_if->net_dev->name);
183 pr_warning("It is strongly recommended to keep mac addresses " 188 pr_warning("It is strongly recommended to keep mac addresses "
184 "unique to avoid problems!\n"); 189 "unique to avoid problems!\n");
185 } 190 }
@@ -430,7 +435,7 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
430 atomic_set(&batman_if->refcnt, 0); 435 atomic_set(&batman_if->refcnt, 0);
431 hardif_hold(batman_if); 436 hardif_hold(batman_if);
432 437
433 check_known_mac_addr(batman_if->net_dev->dev_addr); 438 check_known_mac_addr(batman_if->net_dev);
434 439
435 spin_lock(&if_list_lock); 440 spin_lock(&if_list_lock);
436 list_add_tail_rcu(&batman_if->list, &if_list); 441 list_add_tail_rcu(&batman_if->list, &if_list);
@@ -515,7 +520,7 @@ static int hard_if_event(struct notifier_block *this,
515 goto out; 520 goto out;
516 } 521 }
517 522
518 check_known_mac_addr(batman_if->net_dev->dev_addr); 523 check_known_mac_addr(batman_if->net_dev);
519 update_mac_addresses(batman_if); 524 update_mac_addresses(batman_if);
520 525
521 bat_priv = netdev_priv(batman_if->soft_iface); 526 bat_priv = netdev_priv(batman_if->soft_iface);
diff --git a/drivers/staging/batman-adv/routing.c b/drivers/staging/batman-adv/routing.c
index 90102631330..657b69e6b95 100644
--- a/drivers/staging/batman-adv/routing.c
+++ b/drivers/staging/batman-adv/routing.c
@@ -1000,10 +1000,10 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
1000 1000
1001/* find a suitable router for this originator, and use 1001/* find a suitable router for this originator, and use
1002 * bonding if possible. */ 1002 * bonding if possible. */
1003struct neigh_node *find_router(struct orig_node *orig_node, 1003struct neigh_node *find_router(struct bat_priv *bat_priv,
1004 struct orig_node *orig_node,
1004 struct batman_if *recv_if) 1005 struct batman_if *recv_if)
1005{ 1006{
1006 struct bat_priv *bat_priv;
1007 struct orig_node *primary_orig_node; 1007 struct orig_node *primary_orig_node;
1008 struct orig_node *router_orig; 1008 struct orig_node *router_orig;
1009 struct neigh_node *router, *first_candidate, *best_router; 1009 struct neigh_node *router, *first_candidate, *best_router;
@@ -1019,13 +1019,9 @@ struct neigh_node *find_router(struct orig_node *orig_node,
1019 /* without bonding, the first node should 1019 /* without bonding, the first node should
1020 * always choose the default router. */ 1020 * always choose the default router. */
1021 1021
1022 if (!recv_if)
1023 return orig_node->router;
1024
1025 bat_priv = netdev_priv(recv_if->soft_iface);
1026 bonding_enabled = atomic_read(&bat_priv->bonding_enabled); 1022 bonding_enabled = atomic_read(&bat_priv->bonding_enabled);
1027 1023
1028 if (!bonding_enabled) 1024 if ((!recv_if) && (!bonding_enabled))
1029 return orig_node->router; 1025 return orig_node->router;
1030 1026
1031 router_orig = orig_node->router->orig_node; 1027 router_orig = orig_node->router->orig_node;
@@ -1154,7 +1150,7 @@ static int route_unicast_packet(struct sk_buff *skb,
1154 orig_node = ((struct orig_node *) 1150 orig_node = ((struct orig_node *)
1155 hash_find(bat_priv->orig_hash, unicast_packet->dest)); 1151 hash_find(bat_priv->orig_hash, unicast_packet->dest));
1156 1152
1157 router = find_router(orig_node, recv_if); 1153 router = find_router(bat_priv, orig_node, recv_if);
1158 1154
1159 if (!router) { 1155 if (!router) {
1160 spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); 1156 spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
diff --git a/drivers/staging/batman-adv/routing.h b/drivers/staging/batman-adv/routing.h
index 06ea99df370..92674c8d9c0 100644
--- a/drivers/staging/batman-adv/routing.h
+++ b/drivers/staging/batman-adv/routing.h
@@ -38,8 +38,8 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if);
38int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if); 38int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if);
39int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if); 39int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if);
40int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if); 40int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if);
41struct neigh_node *find_router(struct orig_node *orig_node, 41struct neigh_node *find_router(struct bat_priv *bat_priv,
42 struct batman_if *recv_if); 42 struct orig_node *orig_node, struct batman_if *recv_if);
43void update_bonding_candidates(struct bat_priv *bat_priv, 43void update_bonding_candidates(struct bat_priv *bat_priv,
44 struct orig_node *orig_node); 44 struct orig_node *orig_node);
45 45
diff --git a/drivers/staging/batman-adv/unicast.c b/drivers/staging/batman-adv/unicast.c
index 0dac50d69c0..0459413ff67 100644
--- a/drivers/staging/batman-adv/unicast.c
+++ b/drivers/staging/batman-adv/unicast.c
@@ -224,7 +224,7 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
224 if (!orig_node) 224 if (!orig_node)
225 orig_node = transtable_search(bat_priv, ethhdr->h_dest); 225 orig_node = transtable_search(bat_priv, ethhdr->h_dest);
226 226
227 router = find_router(orig_node, NULL); 227 router = find_router(bat_priv, orig_node, NULL);
228 228
229 if (!router) 229 if (!router)
230 goto unlock; 230 goto unlock;
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index 77fdfe24d99..fead9c56162 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -1001,13 +1001,15 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
1001 } 1001 }
1002#endif 1002#endif
1003 case IOCTL_BE_BUCKET_SIZE: 1003 case IOCTL_BE_BUCKET_SIZE:
1004 Adapter->BEBucketSize = *(PULONG)arg; 1004 Status = 0;
1005 Status = STATUS_SUCCESS; 1005 if (get_user(Adapter->BEBucketSize, (unsigned long __user *)arg))
1006 Status = -EFAULT;
1006 break; 1007 break;
1007 1008
1008 case IOCTL_RTPS_BUCKET_SIZE: 1009 case IOCTL_RTPS_BUCKET_SIZE:
1009 Adapter->rtPSBucketSize = *(PULONG)arg; 1010 Status = 0;
1010 Status = STATUS_SUCCESS; 1011 if (get_user(Adapter->rtPSBucketSize, (unsigned long __user *)arg))
1012 Status = -EFAULT;
1011 break; 1013 break;
1012 case IOCTL_CHIP_RESET: 1014 case IOCTL_CHIP_RESET:
1013 { 1015 {
@@ -1028,11 +1030,15 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
1028 case IOCTL_QOS_THRESHOLD: 1030 case IOCTL_QOS_THRESHOLD:
1029 { 1031 {
1030 USHORT uiLoopIndex; 1032 USHORT uiLoopIndex;
1031 for(uiLoopIndex = 0 ; uiLoopIndex < NO_OF_QUEUES ; uiLoopIndex++) 1033
1032 { 1034 Status = 0;
1033 Adapter->PackInfo[uiLoopIndex].uiThreshold = *(PULONG)arg; 1035 for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES; uiLoopIndex++) {
1036 if (get_user(Adapter->PackInfo[uiLoopIndex].uiThreshold,
1037 (unsigned long __user *)arg)) {
1038 Status = -EFAULT;
1039 break;
1040 }
1034 } 1041 }
1035 Status = STATUS_SUCCESS;
1036 break; 1042 break;
1037 } 1043 }
1038 1044
@@ -1093,7 +1099,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
1093 } 1099 }
1094 case IOCTL_BCM_GET_CURRENT_STATUS: 1100 case IOCTL_BCM_GET_CURRENT_STATUS:
1095 { 1101 {
1096 LINK_STATE *plink_state = NULL; 1102 LINK_STATE plink_state;
1103
1097 /* Copy Ioctl Buffer structure */ 1104 /* Copy Ioctl Buffer structure */
1098 if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) 1105 if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
1099 { 1106 {
@@ -1101,13 +1108,19 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
1101 Status = -EFAULT; 1108 Status = -EFAULT;
1102 break; 1109 break;
1103 } 1110 }
1104 plink_state = (LINK_STATE*)arg; 1111 if (IoBuffer.OutputLength != sizeof(plink_state)) {
1105 plink_state->bIdleMode = (UCHAR)Adapter->IdleMode; 1112 Status = -EINVAL;
1106 plink_state->bShutdownMode = Adapter->bShutStatus; 1113 break;
1107 plink_state->ucLinkStatus = (UCHAR)Adapter->LinkStatus; 1114 }
1108 if(copy_to_user(IoBuffer.OutputBuffer, 1115
1109 (PUCHAR)plink_state, (UINT)IoBuffer.OutputLength)) 1116 if (copy_from_user(&plink_state, (void __user *)arg, sizeof(plink_state))) {
1110 { 1117 Status = -EFAULT;
1118 break;
1119 }
1120 plink_state.bIdleMode = (UCHAR)Adapter->IdleMode;
1121 plink_state.bShutdownMode = Adapter->bShutStatus;
1122 plink_state.ucLinkStatus = (UCHAR)Adapter->LinkStatus;
1123 if (copy_to_user(IoBuffer.OutputBuffer, &plink_state, IoBuffer.OutputLength)) {
1111 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n"); 1124 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n");
1112 Status = -EFAULT; 1125 Status = -EFAULT;
1113 break; 1126 break;
@@ -1331,7 +1344,9 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
1331 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Copy From User space failed. status :%d", Status); 1344 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Copy From User space failed. status :%d", Status);
1332 return -EFAULT; 1345 return -EFAULT;
1333 } 1346 }
1334 uiSectorSize = *((PUINT)(IoBuffer.InputBuffer)); /* FIXME: unchecked __user access */ 1347 if (get_user(uiSectorSize, (unsigned int __user *)IoBuffer.InputBuffer))
1348 return -EFAULT;
1349
1335 if((uiSectorSize < MIN_SECTOR_SIZE) || (uiSectorSize > MAX_SECTOR_SIZE)) 1350 if((uiSectorSize < MIN_SECTOR_SIZE) || (uiSectorSize > MAX_SECTOR_SIZE))
1336 { 1351 {
1337 1352
diff --git a/drivers/staging/brcm80211/README b/drivers/staging/brcm80211/README
index c3ba9bb9b11..c8f1cf1b440 100644
--- a/drivers/staging/brcm80211/README
+++ b/drivers/staging/brcm80211/README
@@ -90,5 +90,5 @@ Contact Info:
90============= 90=============
91Brett Rudley brudley@broadcom.com 91Brett Rudley brudley@broadcom.com
92Henry Ptasinski henryp@broadcom.com 92Henry Ptasinski henryp@broadcom.com
93Nohee Ko noheek@broadcom.com 93Dowan Kim dowan@broadcom.com
94 94
diff --git a/drivers/staging/brcm80211/TODO b/drivers/staging/brcm80211/TODO
index 8803d300b53..dbf90418489 100644
--- a/drivers/staging/brcm80211/TODO
+++ b/drivers/staging/brcm80211/TODO
@@ -45,5 +45,5 @@ Contact
45===== 45=====
46Brett Rudley <brudley@broadcom.com> 46Brett Rudley <brudley@broadcom.com>
47Henry Ptasinski <henryp@broadcom.com> 47Henry Ptasinski <henryp@broadcom.com>
48Nohee Ko <noheek@broadcom.com> 48Dowan Kim <dowan@broadcom.com>
49 49
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
index bbbe7c5f749..9335f02029a 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
@@ -2222,8 +2222,6 @@ int dhd_net_attach(dhd_pub_t *dhdp, int ifidx)
2222 ASSERT(net); 2222 ASSERT(net);
2223 2223
2224 ASSERT(!net->netdev_ops); 2224 ASSERT(!net->netdev_ops);
2225 net->netdev_ops = &dhd_ops_virt;
2226
2227 net->netdev_ops = &dhd_ops_pri; 2225 net->netdev_ops = &dhd_ops_pri;
2228 2226
2229 /* 2227 /*
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
index 3f29488d9c7..ea0825238d5 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
@@ -95,12 +95,12 @@ static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy,
95 struct net_device *dev, 95 struct net_device *dev,
96 u8 key_idx); 96 u8 key_idx);
97static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, 97static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
98 u8 key_idx, const u8 *mac_addr, 98 u8 key_idx, bool pairwise, const u8 *mac_addr,
99 struct key_params *params); 99 struct key_params *params);
100static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, 100static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
101 u8 key_idx, const u8 *mac_addr); 101 u8 key_idx, bool pairwise, const u8 *mac_addr);
102static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, 102static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
103 u8 key_idx, const u8 *mac_addr, 103 u8 key_idx, bool pairwise, const u8 *mac_addr,
104 void *cookie, void (*callback) (void *cookie, 104 void *cookie, void (*callback) (void *cookie,
105 struct 105 struct
106 key_params * 106 key_params *
@@ -1615,7 +1615,7 @@ wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
1615 1615
1616static s32 1616static s32
1617wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, 1617wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
1618 u8 key_idx, const u8 *mac_addr, 1618 u8 key_idx, bool pairwise, const u8 *mac_addr,
1619 struct key_params *params) 1619 struct key_params *params)
1620{ 1620{
1621 struct wl_wsec_key key; 1621 struct wl_wsec_key key;
@@ -1700,7 +1700,7 @@ wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
1700 1700
1701static s32 1701static s32
1702wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, 1702wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
1703 u8 key_idx, const u8 *mac_addr) 1703 u8 key_idx, bool pairwise, const u8 *mac_addr)
1704{ 1704{
1705 struct wl_wsec_key key; 1705 struct wl_wsec_key key;
1706 s32 err = 0; 1706 s32 err = 0;
@@ -1756,7 +1756,7 @@ wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
1756 1756
1757static s32 1757static s32
1758wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, 1758wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
1759 u8 key_idx, const u8 *mac_addr, void *cookie, 1759 u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie,
1760 void (*callback) (void *cookie, struct key_params * params)) 1760 void (*callback) (void *cookie, struct key_params * params))
1761{ 1761{
1762 struct key_params params; 1762 struct key_params params;
diff --git a/drivers/staging/cpia/cpia.c b/drivers/staging/cpia/cpia.c
index 933ae4c8cb9..0e740b8dafc 100644
--- a/drivers/staging/cpia/cpia.c
+++ b/drivers/staging/cpia/cpia.c
@@ -3184,13 +3184,9 @@ static int cpia_open(struct file *file)
3184 goto oops; 3184 goto oops;
3185 } 3185 }
3186 3186
3187 err = -EINTR;
3188 if(signal_pending(current))
3189 goto oops;
3190
3191 /* Set ownership of /proc/cpia/videoX to current user */ 3187 /* Set ownership of /proc/cpia/videoX to current user */
3192 if(cam->proc_entry) 3188 if(cam->proc_entry)
3193 cam->proc_entry->uid = current_uid(); 3189 cam->proc_entry->uid = current_euid();
3194 3190
3195 /* set mark for loading first frame uncompressed */ 3191 /* set mark for loading first frame uncompressed */
3196 cam->first_frame = 1; 3192 cam->first_frame = 1;
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c b/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c
index 87a6487531c..20d509836d9 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c
@@ -286,7 +286,6 @@ int ft1000_CreateDevice(struct ft1000_device *dev)
286 pid = kernel_thread (exec_mknod, (void *)info, 0); 286 pid = kernel_thread (exec_mknod, (void *)info, 0);
287 287
288 // initialize application information 288 // initialize application information
289 info->appcnt = 0;
290 289
291// if (ft1000_flarion_cnt == 0) { 290// if (ft1000_flarion_cnt == 0) {
292// 291//
diff --git a/drivers/staging/hv/hv_utils.c b/drivers/staging/hv/hv_utils.c
index 702a478d554..a99e900ec4c 100644
--- a/drivers/staging/hv/hv_utils.c
+++ b/drivers/staging/hv/hv_utils.c
@@ -212,9 +212,6 @@ static void heartbeat_onchannelcallback(void *context)
212 recvlen, requestid); 212 recvlen, requestid);
213 213
214 icmsghdrp = (struct icmsg_hdr *)&buf[ 214 icmsghdrp = (struct icmsg_hdr *)&buf[
215 sizeof(struct vmbuspipe_hdr)];
216
217 icmsghdrp = (struct icmsg_hdr *)&buf[
218 sizeof(struct vmbuspipe_hdr)]; 215 sizeof(struct vmbuspipe_hdr)];
219 216
220 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 217 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
diff --git a/drivers/staging/intel_sst/intel_sst_app_interface.c b/drivers/staging/intel_sst/intel_sst_app_interface.c
index 463e5cba830..9618c799746 100644
--- a/drivers/staging/intel_sst/intel_sst_app_interface.c
+++ b/drivers/staging/intel_sst/intel_sst_app_interface.c
@@ -244,12 +244,12 @@ static int intel_sst_mmap_play_capture(u32 str_id,
244 int retval, i; 244 int retval, i;
245 struct stream_info *stream; 245 struct stream_info *stream;
246 struct snd_sst_mmap_buff_entry *buf_entry; 246 struct snd_sst_mmap_buff_entry *buf_entry;
247 struct snd_sst_mmap_buff_entry *tmp_buf;
247 248
248 pr_debug("sst:called for str_id %d\n", str_id); 249 pr_debug("sst:called for str_id %d\n", str_id);
249 retval = sst_validate_strid(str_id); 250 retval = sst_validate_strid(str_id);
250 if (retval) 251 if (retval)
251 return -EINVAL; 252 return -EINVAL;
252 BUG_ON(!mmap_buf);
253 253
254 stream = &sst_drv_ctx->streams[str_id]; 254 stream = &sst_drv_ctx->streams[str_id];
255 if (stream->mmapped != true) 255 if (stream->mmapped != true)
@@ -262,14 +262,24 @@ static int intel_sst_mmap_play_capture(u32 str_id,
262 stream->curr_bytes = 0; 262 stream->curr_bytes = 0;
263 stream->cumm_bytes = 0; 263 stream->cumm_bytes = 0;
264 264
265 tmp_buf = kcalloc(mmap_buf->entries, sizeof(*tmp_buf), GFP_KERNEL);
266 if (!tmp_buf)
267 return -ENOMEM;
268 if (copy_from_user(tmp_buf, (void __user *)mmap_buf->buff,
269 mmap_buf->entries * sizeof(*tmp_buf))) {
270 retval = -EFAULT;
271 goto out_free;
272 }
273
265 pr_debug("sst:new buffers count %d status %d\n", 274 pr_debug("sst:new buffers count %d status %d\n",
266 mmap_buf->entries, stream->status); 275 mmap_buf->entries, stream->status);
267 buf_entry = mmap_buf->buff; 276 buf_entry = tmp_buf;
268 for (i = 0; i < mmap_buf->entries; i++) { 277 for (i = 0; i < mmap_buf->entries; i++) {
269 BUG_ON(!buf_entry);
270 bufs = kzalloc(sizeof(*bufs), GFP_KERNEL); 278 bufs = kzalloc(sizeof(*bufs), GFP_KERNEL);
271 if (!bufs) 279 if (!bufs) {
272 return -ENOMEM; 280 retval = -ENOMEM;
281 goto out_free;
282 }
273 bufs->size = buf_entry->size; 283 bufs->size = buf_entry->size;
274 bufs->offset = buf_entry->offset; 284 bufs->offset = buf_entry->offset;
275 bufs->addr = sst_drv_ctx->mmap_mem; 285 bufs->addr = sst_drv_ctx->mmap_mem;
@@ -293,13 +303,15 @@ static int intel_sst_mmap_play_capture(u32 str_id,
293 if (sst_play_frame(str_id) < 0) { 303 if (sst_play_frame(str_id) < 0) {
294 pr_warn("sst: play frames fail\n"); 304 pr_warn("sst: play frames fail\n");
295 mutex_unlock(&stream->lock); 305 mutex_unlock(&stream->lock);
296 return -EIO; 306 retval = -EIO;
307 goto out_free;
297 } 308 }
298 } else if (stream->ops == STREAM_OPS_CAPTURE) { 309 } else if (stream->ops == STREAM_OPS_CAPTURE) {
299 if (sst_capture_frame(str_id) < 0) { 310 if (sst_capture_frame(str_id) < 0) {
300 pr_warn("sst: capture frame fail\n"); 311 pr_warn("sst: capture frame fail\n");
301 mutex_unlock(&stream->lock); 312 mutex_unlock(&stream->lock);
302 return -EIO; 313 retval = -EIO;
314 goto out_free;
303 } 315 }
304 } 316 }
305 } 317 }
@@ -314,6 +326,9 @@ static int intel_sst_mmap_play_capture(u32 str_id,
314 if (retval >= 0) 326 if (retval >= 0)
315 retval = stream->cumm_bytes; 327 retval = stream->cumm_bytes;
316 pr_debug("sst:end of play/rec ioctl bytes = %d!!\n", retval); 328 pr_debug("sst:end of play/rec ioctl bytes = %d!!\n", retval);
329
330out_free:
331 kfree(tmp_buf);
317 return retval; 332 return retval;
318} 333}
319 334
@@ -377,7 +392,7 @@ static int snd_sst_fill_kernel_list(struct stream_info *stream,
377{ 392{
378 struct sst_stream_bufs *stream_bufs; 393 struct sst_stream_bufs *stream_bufs;
379 unsigned long index, mmap_len; 394 unsigned long index, mmap_len;
380 unsigned char *bufp; 395 unsigned char __user *bufp;
381 unsigned long size, copied_size; 396 unsigned long size, copied_size;
382 int retval = 0, add_to_list = 0; 397 int retval = 0, add_to_list = 0;
383 static int sent_offset; 398 static int sent_offset;
@@ -512,9 +527,7 @@ static int snd_sst_copy_userbuf_capture(struct stream_info *stream,
512 /* copy to user */ 527 /* copy to user */
513 list_for_each_entry_safe(entry, _entry, 528 list_for_each_entry_safe(entry, _entry,
514 copy_to_list, node) { 529 copy_to_list, node) {
515 if (copy_to_user((void *) 530 if (copy_to_user(iovec[entry->iov_index].iov_base + entry->iov_offset,
516 iovec[entry->iov_index].iov_base +
517 entry->iov_offset,
518 kbufs->addr + entry->offset, 531 kbufs->addr + entry->offset,
519 entry->size)) { 532 entry->size)) {
520 /* Clean up the list and return error */ 533 /* Clean up the list and return error */
@@ -590,7 +603,7 @@ static int intel_sst_read_write(unsigned int str_id, char __user *buf,
590 buf, (int) count, (int) stream->status); 603 buf, (int) count, (int) stream->status);
591 604
592 stream->buf_type = SST_BUF_USER_STATIC; 605 stream->buf_type = SST_BUF_USER_STATIC;
593 iovec.iov_base = (void *)buf; 606 iovec.iov_base = buf;
594 iovec.iov_len = count; 607 iovec.iov_len = count;
595 nr_segs = 1; 608 nr_segs = 1;
596 609
@@ -838,7 +851,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
838 break; 851 break;
839 852
840 case _IOC_NR(SNDRV_SST_STREAM_SET_PARAMS): { 853 case _IOC_NR(SNDRV_SST_STREAM_SET_PARAMS): {
841 struct snd_sst_params *str_param = (struct snd_sst_params *)arg; 854 struct snd_sst_params str_param;
842 855
843 pr_debug("sst: IOCTL_SET_PARAMS recieved!\n"); 856 pr_debug("sst: IOCTL_SET_PARAMS recieved!\n");
844 if (minor != STREAM_MODULE) { 857 if (minor != STREAM_MODULE) {
@@ -846,17 +859,25 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
846 break; 859 break;
847 } 860 }
848 861
862 if (copy_from_user(&str_param, (void __user *)arg,
863 sizeof(str_param))) {
864 retval = -EFAULT;
865 break;
866 }
867
849 if (!str_id) { 868 if (!str_id) {
850 869
851 retval = sst_get_stream(str_param); 870 retval = sst_get_stream(&str_param);
852 if (retval > 0) { 871 if (retval > 0) {
853 struct stream_info *str_info; 872 struct stream_info *str_info;
873 char __user *dest;
874
854 sst_drv_ctx->stream_cnt++; 875 sst_drv_ctx->stream_cnt++;
855 data->str_id = retval; 876 data->str_id = retval;
856 str_info = &sst_drv_ctx->streams[retval]; 877 str_info = &sst_drv_ctx->streams[retval];
857 str_info->src = SST_DRV; 878 str_info->src = SST_DRV;
858 retval = copy_to_user(&str_param->stream_id, 879 dest = (char __user *)arg + offsetof(struct snd_sst_params, stream_id);
859 &retval, sizeof(__u32)); 880 retval = copy_to_user(dest, &retval, sizeof(__u32));
860 if (retval) 881 if (retval)
861 retval = -EFAULT; 882 retval = -EFAULT;
862 } else { 883 } else {
@@ -866,16 +887,14 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
866 } else { 887 } else {
867 pr_debug("sst: SET_STREAM_PARAMS recieved!\n"); 888 pr_debug("sst: SET_STREAM_PARAMS recieved!\n");
868 /* allocated set params only */ 889 /* allocated set params only */
869 retval = sst_set_stream_param(str_id, str_param); 890 retval = sst_set_stream_param(str_id, &str_param);
870 /* Block the call for reply */ 891 /* Block the call for reply */
871 if (!retval) { 892 if (!retval) {
872 int sfreq = 0, word_size = 0, num_channel = 0; 893 int sfreq = 0, word_size = 0, num_channel = 0;
873 sfreq = str_param->sparams.uc.pcm_params.sfreq; 894 sfreq = str_param.sparams.uc.pcm_params.sfreq;
874 word_size = str_param->sparams. 895 word_size = str_param.sparams.uc.pcm_params.pcm_wd_sz;
875 uc.pcm_params.pcm_wd_sz; 896 num_channel = str_param.sparams.uc.pcm_params.num_chan;
876 num_channel = str_param-> 897 if (str_param.ops == STREAM_OPS_CAPTURE) {
877 sparams.uc.pcm_params.num_chan;
878 if (str_param->ops == STREAM_OPS_CAPTURE) {
879 sst_drv_ctx->scard_ops->\ 898 sst_drv_ctx->scard_ops->\
880 set_pcm_audio_params(sfreq, 899 set_pcm_audio_params(sfreq,
881 word_size, num_channel); 900 word_size, num_channel);
@@ -885,41 +904,39 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
885 break; 904 break;
886 } 905 }
887 case _IOC_NR(SNDRV_SST_SET_VOL): { 906 case _IOC_NR(SNDRV_SST_SET_VOL): {
888 struct snd_sst_vol *set_vol; 907 struct snd_sst_vol set_vol;
889 struct snd_sst_vol *rec_vol = (struct snd_sst_vol *)arg; 908
909 if (copy_from_user(&set_vol, (void __user *)arg,
910 sizeof(set_vol))) {
911 pr_debug("sst: copy failed\n");
912 retval = -EFAULT;
913 break;
914 }
890 pr_debug("sst: SET_VOLUME recieved for %d!\n", 915 pr_debug("sst: SET_VOLUME recieved for %d!\n",
891 rec_vol->stream_id); 916 set_vol.stream_id);
892 if (minor == STREAM_MODULE && rec_vol->stream_id == 0) { 917 if (minor == STREAM_MODULE && set_vol.stream_id == 0) {
893 pr_debug("sst: invalid operation!\n"); 918 pr_debug("sst: invalid operation!\n");
894 retval = -EPERM; 919 retval = -EPERM;
895 break; 920 break;
896 } 921 }
897 set_vol = kzalloc(sizeof(*set_vol), GFP_ATOMIC); 922 retval = sst_set_vol(&set_vol);
898 if (!set_vol) {
899 pr_debug("sst: mem allocation failed\n");
900 retval = -ENOMEM;
901 break;
902 }
903 if (copy_from_user(set_vol, rec_vol, sizeof(*set_vol))) {
904 pr_debug("sst: copy failed\n");
905 retval = -EFAULT;
906 break;
907 }
908 retval = sst_set_vol(set_vol);
909 kfree(set_vol);
910 break; 923 break;
911 } 924 }
912 case _IOC_NR(SNDRV_SST_GET_VOL): { 925 case _IOC_NR(SNDRV_SST_GET_VOL): {
913 struct snd_sst_vol *rec_vol = (struct snd_sst_vol *)arg;
914 struct snd_sst_vol get_vol; 926 struct snd_sst_vol get_vol;
927
928 if (copy_from_user(&get_vol, (void __user *)arg,
929 sizeof(get_vol))) {
930 retval = -EFAULT;
931 break;
932 }
915 pr_debug("sst: IOCTL_GET_VOLUME recieved for stream = %d!\n", 933 pr_debug("sst: IOCTL_GET_VOLUME recieved for stream = %d!\n",
916 rec_vol->stream_id); 934 get_vol.stream_id);
917 if (minor == STREAM_MODULE && rec_vol->stream_id == 0) { 935 if (minor == STREAM_MODULE && get_vol.stream_id == 0) {
918 pr_debug("sst: invalid operation!\n"); 936 pr_debug("sst: invalid operation!\n");
919 retval = -EPERM; 937 retval = -EPERM;
920 break; 938 break;
921 } 939 }
922 get_vol.stream_id = rec_vol->stream_id;
923 retval = sst_get_vol(&get_vol); 940 retval = sst_get_vol(&get_vol);
924 if (retval) { 941 if (retval) {
925 retval = -EIO; 942 retval = -EIO;
@@ -928,7 +945,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
928 pr_debug("sst: id:%d\n, vol:%d, ramp_dur:%d, ramp_type:%d\n", 945 pr_debug("sst: id:%d\n, vol:%d, ramp_dur:%d, ramp_type:%d\n",
929 get_vol.stream_id, get_vol.volume, 946 get_vol.stream_id, get_vol.volume,
930 get_vol.ramp_duration, get_vol.ramp_type); 947 get_vol.ramp_duration, get_vol.ramp_type);
931 if (copy_to_user((struct snd_sst_vol *)arg, 948 if (copy_to_user((struct snd_sst_vol __user *)arg,
932 &get_vol, sizeof(get_vol))) { 949 &get_vol, sizeof(get_vol))) {
933 retval = -EFAULT; 950 retval = -EFAULT;
934 break; 951 break;
@@ -938,25 +955,20 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
938 } 955 }
939 956
940 case _IOC_NR(SNDRV_SST_MUTE): { 957 case _IOC_NR(SNDRV_SST_MUTE): {
941 struct snd_sst_mute *set_mute; 958 struct snd_sst_mute set_mute;
942 struct snd_sst_vol *rec_mute = (struct snd_sst_vol *)arg; 959
943 pr_debug("sst: SNDRV_SST_SET_VOLUME recieved for %d!\n", 960 if (copy_from_user(&set_mute, (void __user *)arg,
944 rec_mute->stream_id); 961 sizeof(set_mute))) {
945 if (minor == STREAM_MODULE && rec_mute->stream_id == 0) { 962 retval = -EFAULT;
946 retval = -EPERM;
947 break;
948 }
949 set_mute = kzalloc(sizeof(*set_mute), GFP_ATOMIC);
950 if (!set_mute) {
951 retval = -ENOMEM;
952 break; 963 break;
953 } 964 }
954 if (copy_from_user(set_mute, rec_mute, sizeof(*set_mute))) { 965 pr_debug("sst: SNDRV_SST_SET_VOLUME recieved for %d!\n",
955 retval = -EFAULT; 966 set_mute.stream_id);
967 if (minor == STREAM_MODULE && set_mute.stream_id == 0) {
968 retval = -EPERM;
956 break; 969 break;
957 } 970 }
958 retval = sst_set_mute(set_mute); 971 retval = sst_set_mute(&set_mute);
959 kfree(set_mute);
960 break; 972 break;
961 } 973 }
962 case _IOC_NR(SNDRV_SST_STREAM_GET_PARAMS): { 974 case _IOC_NR(SNDRV_SST_STREAM_GET_PARAMS): {
@@ -973,7 +985,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
973 retval = -EIO; 985 retval = -EIO;
974 break; 986 break;
975 } 987 }
976 if (copy_to_user((struct snd_sst_get_stream_params *)arg, 988 if (copy_to_user((struct snd_sst_get_stream_params __user *)arg,
977 &get_params, sizeof(get_params))) { 989 &get_params, sizeof(get_params))) {
978 retval = -EFAULT; 990 retval = -EFAULT;
979 break; 991 break;
@@ -983,16 +995,22 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
983 } 995 }
984 996
985 case _IOC_NR(SNDRV_SST_MMAP_PLAY): 997 case _IOC_NR(SNDRV_SST_MMAP_PLAY):
986 case _IOC_NR(SNDRV_SST_MMAP_CAPTURE): 998 case _IOC_NR(SNDRV_SST_MMAP_CAPTURE): {
999 struct snd_sst_mmap_buffs mmap_buf;
1000
987 pr_debug("sst: SNDRV_SST_MMAP_PLAY/CAPTURE recieved!\n"); 1001 pr_debug("sst: SNDRV_SST_MMAP_PLAY/CAPTURE recieved!\n");
988 if (minor != STREAM_MODULE) { 1002 if (minor != STREAM_MODULE) {
989 retval = -EBADRQC; 1003 retval = -EBADRQC;
990 break; 1004 break;
991 } 1005 }
992 retval = intel_sst_mmap_play_capture(str_id, 1006 if (copy_from_user(&mmap_buf, (void __user *)arg,
993 (struct snd_sst_mmap_buffs *)arg); 1007 sizeof(mmap_buf))) {
1008 retval = -EFAULT;
1009 break;
1010 }
1011 retval = intel_sst_mmap_play_capture(str_id, &mmap_buf);
994 break; 1012 break;
995 1013 }
996 case _IOC_NR(SNDRV_SST_STREAM_DROP): 1014 case _IOC_NR(SNDRV_SST_STREAM_DROP):
997 pr_debug("sst: SNDRV_SST_IOCTL_DROP recieved!\n"); 1015 pr_debug("sst: SNDRV_SST_IOCTL_DROP recieved!\n");
998 if (minor != STREAM_MODULE) { 1016 if (minor != STREAM_MODULE) {
@@ -1003,7 +1021,6 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1003 break; 1021 break;
1004 1022
1005 case _IOC_NR(SNDRV_SST_STREAM_GET_TSTAMP): { 1023 case _IOC_NR(SNDRV_SST_STREAM_GET_TSTAMP): {
1006 unsigned long long *ms = (unsigned long long *)arg;
1007 struct snd_sst_tstamp tstamp = {0}; 1024 struct snd_sst_tstamp tstamp = {0};
1008 unsigned long long time, freq, mod; 1025 unsigned long long time, freq, mod;
1009 1026
@@ -1013,14 +1030,14 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1013 break; 1030 break;
1014 } 1031 }
1015 memcpy_fromio(&tstamp, 1032 memcpy_fromio(&tstamp,
1016 ((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP) 1033 sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp),
1017 +(str_id * sizeof(tstamp))),
1018 sizeof(tstamp)); 1034 sizeof(tstamp));
1019 time = tstamp.samples_rendered; 1035 time = tstamp.samples_rendered;
1020 freq = (unsigned long long) tstamp.sampling_frequency; 1036 freq = (unsigned long long) tstamp.sampling_frequency;
1021 time = time * 1000; /* converting it to ms */ 1037 time = time * 1000; /* converting it to ms */
1022 mod = do_div(time, freq); 1038 mod = do_div(time, freq);
1023 if (copy_to_user(ms, &time, sizeof(*ms))) 1039 if (copy_to_user((void __user *)arg, &time,
1040 sizeof(unsigned long long)))
1024 retval = -EFAULT; 1041 retval = -EFAULT;
1025 break; 1042 break;
1026 } 1043 }
@@ -1065,92 +1082,118 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1065 } 1082 }
1066 1083
1067 case _IOC_NR(SNDRV_SST_SET_TARGET_DEVICE): { 1084 case _IOC_NR(SNDRV_SST_SET_TARGET_DEVICE): {
1068 struct snd_sst_target_device *target_device; 1085 struct snd_sst_target_device target_device;
1069 1086
1070 pr_debug("sst: SET_TARGET_DEVICE recieved!\n"); 1087 pr_debug("sst: SET_TARGET_DEVICE recieved!\n");
1071 target_device = (struct snd_sst_target_device *)arg; 1088 if (copy_from_user(&target_device, (void __user *)arg,
1072 BUG_ON(!target_device); 1089 sizeof(target_device))) {
1090 retval = -EFAULT;
1091 break;
1092 }
1073 if (minor != AM_MODULE) { 1093 if (minor != AM_MODULE) {
1074 retval = -EBADRQC; 1094 retval = -EBADRQC;
1075 break; 1095 break;
1076 } 1096 }
1077 retval = sst_target_device_select(target_device); 1097 retval = sst_target_device_select(&target_device);
1078 break; 1098 break;
1079 } 1099 }
1080 1100
1081 case _IOC_NR(SNDRV_SST_DRIVER_INFO): { 1101 case _IOC_NR(SNDRV_SST_DRIVER_INFO): {
1082 struct snd_sst_driver_info *info = 1102 struct snd_sst_driver_info info;
1083 (struct snd_sst_driver_info *)arg;
1084 1103
1085 pr_debug("sst: SNDRV_SST_DRIVER_INFO recived\n"); 1104 pr_debug("sst: SNDRV_SST_DRIVER_INFO recived\n");
1086 info->version = SST_VERSION_NUM; 1105 info.version = SST_VERSION_NUM;
1087 /* hard coding, shud get sumhow later */ 1106 /* hard coding, shud get sumhow later */
1088 info->active_pcm_streams = sst_drv_ctx->stream_cnt - 1107 info.active_pcm_streams = sst_drv_ctx->stream_cnt -
1089 sst_drv_ctx->encoded_cnt; 1108 sst_drv_ctx->encoded_cnt;
1090 info->active_enc_streams = sst_drv_ctx->encoded_cnt; 1109 info.active_enc_streams = sst_drv_ctx->encoded_cnt;
1091 info->max_pcm_streams = MAX_ACTIVE_STREAM - MAX_ENC_STREAM; 1110 info.max_pcm_streams = MAX_ACTIVE_STREAM - MAX_ENC_STREAM;
1092 info->max_enc_streams = MAX_ENC_STREAM; 1111 info.max_enc_streams = MAX_ENC_STREAM;
1093 info->buf_per_stream = sst_drv_ctx->mmap_len; 1112 info.buf_per_stream = sst_drv_ctx->mmap_len;
1113 if (copy_to_user((void __user *)arg, &info,
1114 sizeof(info)))
1115 retval = -EFAULT;
1094 break; 1116 break;
1095 } 1117 }
1096 1118
1097 case _IOC_NR(SNDRV_SST_STREAM_DECODE): { 1119 case _IOC_NR(SNDRV_SST_STREAM_DECODE): {
1098 struct snd_sst_dbufs *param = 1120 struct snd_sst_dbufs param;
1099 (struct snd_sst_dbufs *)arg, dbufs_local; 1121 struct snd_sst_dbufs dbufs_local;
1100 int i;
1101 struct snd_sst_buffs ibufs, obufs; 1122 struct snd_sst_buffs ibufs, obufs;
1102 struct snd_sst_buff_entry ibuf_temp[param->ibufs->entries], 1123 struct snd_sst_buff_entry *ibuf_tmp, *obuf_tmp;
1103 obuf_temp[param->obufs->entries]; 1124 char __user *dest;
1104 1125
1105 pr_debug("sst: SNDRV_SST_STREAM_DECODE recived\n"); 1126 pr_debug("sst: SNDRV_SST_STREAM_DECODE recived\n");
1106 if (minor != STREAM_MODULE) { 1127 if (minor != STREAM_MODULE) {
1107 retval = -EBADRQC; 1128 retval = -EBADRQC;
1108 break; 1129 break;
1109 } 1130 }
1110 if (!param) { 1131 if (copy_from_user(&param, (void __user *)arg,
1111 retval = -EINVAL; 1132 sizeof(param))) {
1133 retval = -EFAULT;
1112 break; 1134 break;
1113 } 1135 }
1114 1136
1115 dbufs_local.input_bytes_consumed = param->input_bytes_consumed; 1137 dbufs_local.input_bytes_consumed = param.input_bytes_consumed;
1116 dbufs_local.output_bytes_produced = 1138 dbufs_local.output_bytes_produced =
1117 param->output_bytes_produced; 1139 param.output_bytes_produced;
1118 dbufs_local.ibufs = &ibufs; 1140
1119 dbufs_local.obufs = &obufs; 1141 if (copy_from_user(&ibufs, (void __user *)param.ibufs, sizeof(ibufs))) {
1120 dbufs_local.ibufs->entries = param->ibufs->entries; 1142 retval = -EFAULT;
1121 dbufs_local.ibufs->type = param->ibufs->type; 1143 break;
1122 dbufs_local.obufs->entries = param->obufs->entries; 1144 }
1123 dbufs_local.obufs->type = param->obufs->type; 1145 if (copy_from_user(&obufs, (void __user *)param.obufs, sizeof(obufs))) {
1124 1146 retval = -EFAULT;
1125 dbufs_local.ibufs->buff_entry = ibuf_temp; 1147 break;
1126 for (i = 0; i < dbufs_local.ibufs->entries; i++) {
1127 ibuf_temp[i].buffer =
1128 param->ibufs->buff_entry[i].buffer;
1129 ibuf_temp[i].size =
1130 param->ibufs->buff_entry[i].size;
1131 } 1148 }
1132 dbufs_local.obufs->buff_entry = obuf_temp; 1149
1133 for (i = 0; i < dbufs_local.obufs->entries; i++) { 1150 ibuf_tmp = kcalloc(ibufs.entries, sizeof(*ibuf_tmp), GFP_KERNEL);
1134 obuf_temp[i].buffer = 1151 obuf_tmp = kcalloc(obufs.entries, sizeof(*obuf_tmp), GFP_KERNEL);
1135 param->obufs->buff_entry[i].buffer; 1152 if (!ibuf_tmp || !obuf_tmp) {
1136 obuf_temp[i].size = 1153 retval = -ENOMEM;
1137 param->obufs->buff_entry[i].size; 1154 goto free_iobufs;
1155 }
1156
1157 if (copy_from_user(ibuf_tmp, (void __user *)ibufs.buff_entry,
1158 ibufs.entries * sizeof(*ibuf_tmp))) {
1159 retval = -EFAULT;
1160 goto free_iobufs;
1138 } 1161 }
1162 ibufs.buff_entry = ibuf_tmp;
1163 dbufs_local.ibufs = &ibufs;
1164
1165 if (copy_from_user(obuf_tmp, (void __user *)obufs.buff_entry,
1166 obufs.entries * sizeof(*obuf_tmp))) {
1167 retval = -EFAULT;
1168 goto free_iobufs;
1169 }
1170 obufs.buff_entry = obuf_tmp;
1171 dbufs_local.obufs = &obufs;
1172
1139 retval = sst_decode(str_id, &dbufs_local); 1173 retval = sst_decode(str_id, &dbufs_local);
1140 if (retval) 1174 if (retval) {
1141 retval = -EAGAIN; 1175 retval = -EAGAIN;
1142 if (copy_to_user(&param->input_bytes_consumed, 1176 goto free_iobufs;
1177 }
1178
1179 dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed);
1180 if (copy_to_user(dest,
1143 &dbufs_local.input_bytes_consumed, 1181 &dbufs_local.input_bytes_consumed,
1144 sizeof(unsigned long long))) { 1182 sizeof(unsigned long long))) {
1145 retval = -EFAULT; 1183 retval = -EFAULT;
1146 break; 1184 goto free_iobufs;
1147 } 1185 }
1148 if (copy_to_user(&param->output_bytes_produced, 1186
1187 dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed);
1188 if (copy_to_user(dest,
1149 &dbufs_local.output_bytes_produced, 1189 &dbufs_local.output_bytes_produced,
1150 sizeof(unsigned long long))) { 1190 sizeof(unsigned long long))) {
1151 retval = -EFAULT; 1191 retval = -EFAULT;
1152 break; 1192 goto free_iobufs;
1153 } 1193 }
1194free_iobufs:
1195 kfree(ibuf_tmp);
1196 kfree(obuf_tmp);
1154 break; 1197 break;
1155 } 1198 }
1156 1199
@@ -1164,7 +1207,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1164 break; 1207 break;
1165 1208
1166 case _IOC_NR(SNDRV_SST_STREAM_BYTES_DECODED): { 1209 case _IOC_NR(SNDRV_SST_STREAM_BYTES_DECODED): {
1167 unsigned long long *bytes = (unsigned long long *)arg; 1210 unsigned long long __user *bytes = (unsigned long long __user *)arg;
1168 struct snd_sst_tstamp tstamp = {0}; 1211 struct snd_sst_tstamp tstamp = {0};
1169 1212
1170 pr_debug("sst: STREAM_BYTES_DECODED recieved!\n"); 1213 pr_debug("sst: STREAM_BYTES_DECODED recieved!\n");
@@ -1173,8 +1216,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1173 break; 1216 break;
1174 } 1217 }
1175 memcpy_fromio(&tstamp, 1218 memcpy_fromio(&tstamp,
1176 ((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP) 1219 sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp),
1177 +(str_id * sizeof(tstamp))),
1178 sizeof(tstamp)); 1220 sizeof(tstamp));
1179 if (copy_to_user(bytes, &tstamp.bytes_processed, 1221 if (copy_to_user(bytes, &tstamp.bytes_processed,
1180 sizeof(*bytes))) 1222 sizeof(*bytes)))
@@ -1197,7 +1239,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1197 kfree(fw_info); 1239 kfree(fw_info);
1198 break; 1240 break;
1199 } 1241 }
1200 if (copy_to_user((struct snd_sst_dbufs *)arg, 1242 if (copy_to_user((struct snd_sst_dbufs __user *)arg,
1201 fw_info, sizeof(*fw_info))) { 1243 fw_info, sizeof(*fw_info))) {
1202 kfree(fw_info); 1244 kfree(fw_info);
1203 retval = -EFAULT; 1245 retval = -EFAULT;
diff --git a/drivers/staging/intel_sst/intel_sst_common.h b/drivers/staging/intel_sst/intel_sst_common.h
index 73a98c851e4..bf0ead78bfa 100644
--- a/drivers/staging/intel_sst/intel_sst_common.h
+++ b/drivers/staging/intel_sst/intel_sst_common.h
@@ -231,8 +231,8 @@ struct stream_info {
231 spinlock_t pcm_lock; 231 spinlock_t pcm_lock;
232 bool mmapped; 232 bool mmapped;
233 unsigned int sg_index; /* current buf Index */ 233 unsigned int sg_index; /* current buf Index */
234 unsigned char *cur_ptr; /* Current static bufs */ 234 unsigned char __user *cur_ptr; /* Current static bufs */
235 struct snd_sst_buf_entry *buf_entry; 235 struct snd_sst_buf_entry __user *buf_entry;
236 struct sst_block data_blk; /* stream ops block */ 236 struct sst_block data_blk; /* stream ops block */
237 struct sst_block ctrl_blk; /* stream control cmd block */ 237 struct sst_block ctrl_blk; /* stream control cmd block */
238 enum snd_sst_buf_type buf_type; 238 enum snd_sst_buf_type buf_type;
diff --git a/drivers/staging/keucr/init.c b/drivers/staging/keucr/init.c
index 1934805844f..978bf87ff13 100644
--- a/drivers/staging/keucr/init.c
+++ b/drivers/staging/keucr/init.c
@@ -22,7 +22,7 @@ int ENE_InitMedia(struct us_data *us)
22 int result; 22 int result;
23 BYTE MiscReg03 = 0; 23 BYTE MiscReg03 = 0;
24 24
25 printk("--- Initial Nedia ---\n"); 25 printk("--- Init Media ---\n");
26 result = ENE_Read_BYTE(us, REG_CARD_STATUS, &MiscReg03); 26 result = ENE_Read_BYTE(us, REG_CARD_STATUS, &MiscReg03);
27 if (result != USB_STOR_XFER_GOOD) 27 if (result != USB_STOR_XFER_GOOD)
28 { 28 {
@@ -64,7 +64,7 @@ int ENE_Read_BYTE(struct us_data *us, WORD index, void *buf)
64 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; 64 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
65 int result; 65 int result;
66 66
67 memset(bcb, 0, sizeof(bcb)); 67 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
68 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 68 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
69 bcb->DataTransferLength = 0x01; 69 bcb->DataTransferLength = 0x01;
70 bcb->Flags = 0x80; 70 bcb->Flags = 0x80;
@@ -92,7 +92,7 @@ int ENE_SDInit(struct us_data *us)
92 return USB_STOR_TRANSPORT_ERROR; 92 return USB_STOR_TRANSPORT_ERROR;
93 } 93 }
94 94
95 memset(bcb, 0, sizeof(bcb)); 95 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
96 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 96 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
97 bcb->Flags = 0x80; 97 bcb->Flags = 0x80;
98 bcb->CDB[0] = 0xF2; 98 bcb->CDB[0] = 0xF2;
@@ -112,7 +112,7 @@ int ENE_SDInit(struct us_data *us)
112 return USB_STOR_TRANSPORT_ERROR; 112 return USB_STOR_TRANSPORT_ERROR;
113 } 113 }
114 114
115 memset(bcb, 0, sizeof(bcb)); 115 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
116 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 116 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
117 bcb->DataTransferLength = 0x200; 117 bcb->DataTransferLength = 0x200;
118 bcb->Flags = 0x80; 118 bcb->Flags = 0x80;
@@ -161,7 +161,7 @@ int ENE_MSInit(struct us_data *us)
161 return USB_STOR_TRANSPORT_ERROR; 161 return USB_STOR_TRANSPORT_ERROR;
162 } 162 }
163 163
164 memset(bcb, 0, sizeof(bcb)); 164 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
165 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 165 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
166 bcb->DataTransferLength = 0x200; 166 bcb->DataTransferLength = 0x200;
167 bcb->Flags = 0x80; 167 bcb->Flags = 0x80;
@@ -219,7 +219,7 @@ int ENE_SMInit(struct us_data *us)
219 return USB_STOR_TRANSPORT_ERROR; 219 return USB_STOR_TRANSPORT_ERROR;
220 } 220 }
221 221
222 memset(bcb, 0, sizeof(bcb)); 222 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
223 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 223 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
224 bcb->DataTransferLength = 0x200; 224 bcb->DataTransferLength = 0x200;
225 bcb->Flags = 0x80; 225 bcb->Flags = 0x80;
@@ -341,7 +341,7 @@ int ENE_LoadBinCode(struct us_data *us, BYTE flag)
341 break; 341 break;
342 } 342 }
343 343
344 memset(bcb, 0, sizeof(bcb)); 344 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
345 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 345 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
346 bcb->DataTransferLength = 0x800; 346 bcb->DataTransferLength = 0x800;
347 bcb->Flags =0x00; 347 bcb->Flags =0x00;
@@ -433,7 +433,7 @@ int ENE_Read_Data(struct us_data *us, void *buf, unsigned int length)
433 433
434 //printk("transport --- ENE_Read_Data\n"); 434 //printk("transport --- ENE_Read_Data\n");
435 // set up the command wrapper 435 // set up the command wrapper
436 memset(bcb, 0, sizeof(bcb)); 436 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
437 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 437 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
438 bcb->DataTransferLength = length; 438 bcb->DataTransferLength = length;
439 bcb->Flags =0x80; 439 bcb->Flags =0x80;
@@ -470,7 +470,7 @@ int ENE_Write_Data(struct us_data *us, void *buf, unsigned int length)
470 470
471 //printk("transport --- ENE_Write_Data\n"); 471 //printk("transport --- ENE_Write_Data\n");
472 // set up the command wrapper 472 // set up the command wrapper
473 memset(bcb, 0, sizeof(bcb)); 473 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
474 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 474 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
475 bcb->DataTransferLength = length; 475 bcb->DataTransferLength = length;
476 bcb->Flags =0x00; 476 bcb->Flags =0x00;
diff --git a/drivers/staging/keucr/ms.c b/drivers/staging/keucr/ms.c
index d4340a9da87..9a3fdb4e4fe 100644
--- a/drivers/staging/keucr/ms.c
+++ b/drivers/staging/keucr/ms.c
@@ -15,7 +15,7 @@ int MS_ReaderCopyBlock(struct us_data *us, WORD oldphy, WORD newphy, WORD PhyBlo
15 if (result != USB_STOR_XFER_GOOD) 15 if (result != USB_STOR_XFER_GOOD)
16 return USB_STOR_TRANSPORT_ERROR; 16 return USB_STOR_TRANSPORT_ERROR;
17 17
18 memset(bcb, 0, sizeof(bcb)); 18 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
19 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 19 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
20 bcb->DataTransferLength = 0x200*len; 20 bcb->DataTransferLength = 0x200*len;
21 bcb->Flags = 0x00; 21 bcb->Flags = 0x00;
@@ -53,7 +53,7 @@ int MS_ReaderReadPage(struct us_data *us, DWORD PhyBlockAddr, BYTE PageNum, PDWO
53 return USB_STOR_TRANSPORT_ERROR; 53 return USB_STOR_TRANSPORT_ERROR;
54 54
55 // Read Page Data 55 // Read Page Data
56 memset(bcb, 0, sizeof(bcb)); 56 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
57 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 57 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
58 bcb->DataTransferLength = 0x200; 58 bcb->DataTransferLength = 0x200;
59 bcb->Flags = 0x80; 59 bcb->Flags = 0x80;
@@ -69,7 +69,7 @@ int MS_ReaderReadPage(struct us_data *us, DWORD PhyBlockAddr, BYTE PageNum, PDWO
69 return USB_STOR_TRANSPORT_ERROR; 69 return USB_STOR_TRANSPORT_ERROR;
70 70
71 // Read Extra Data 71 // Read Extra Data
72 memset(bcb, 0, sizeof(bcb)); 72 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
73 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 73 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
74 bcb->DataTransferLength = 0x4; 74 bcb->DataTransferLength = 0x4;
75 bcb->Flags = 0x80; 75 bcb->Flags = 0x80;
@@ -108,7 +108,7 @@ int MS_ReaderEraseBlock(struct us_data *us, DWORD PhyBlockAddr)
108 if (result != USB_STOR_XFER_GOOD) 108 if (result != USB_STOR_XFER_GOOD)
109 return USB_STOR_TRANSPORT_ERROR; 109 return USB_STOR_TRANSPORT_ERROR;
110 110
111 memset(bcb, 0, sizeof(bcb)); 111 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
112 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 112 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
113 bcb->DataTransferLength = 0x200; 113 bcb->DataTransferLength = 0x200;
114 bcb->Flags = 0x80; 114 bcb->Flags = 0x80;
@@ -673,7 +673,7 @@ int MS_LibReadExtraBlock(struct us_data *us, DWORD PhyBlock, BYTE PageNum, BYTE
673 //printk("MS_LibReadExtraBlock --- PhyBlock = %x, PageNum = %x, blen = %x\n", PhyBlock, PageNum, blen); 673 //printk("MS_LibReadExtraBlock --- PhyBlock = %x, PageNum = %x, blen = %x\n", PhyBlock, PageNum, blen);
674 674
675 // Read Extra Data 675 // Read Extra Data
676 memset(bcb, 0, sizeof(bcb)); 676 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
677 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 677 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
678 bcb->DataTransferLength = 0x4 * blen; 678 bcb->DataTransferLength = 0x4 * blen;
679 bcb->Flags = 0x80; 679 bcb->Flags = 0x80;
@@ -700,7 +700,7 @@ int MS_LibReadExtra(struct us_data *us, DWORD PhyBlock, BYTE PageNum, MS_LibType
700 BYTE ExtBuf[4]; 700 BYTE ExtBuf[4];
701 701
702 //printk("MS_LibReadExtra --- PhyBlock = %x, PageNum = %x\n", PhyBlock, PageNum); 702 //printk("MS_LibReadExtra --- PhyBlock = %x, PageNum = %x\n", PhyBlock, PageNum);
703 memset(bcb, 0, sizeof(bcb)); 703 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
704 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 704 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
705 bcb->DataTransferLength = 0x4; 705 bcb->DataTransferLength = 0x4;
706 bcb->Flags = 0x80; 706 bcb->Flags = 0x80;
@@ -807,7 +807,7 @@ int MS_LibOverwriteExtra(struct us_data *us, DWORD PhyBlockAddr, BYTE PageNum, B
807 if (result != USB_STOR_XFER_GOOD) 807 if (result != USB_STOR_XFER_GOOD)
808 return USB_STOR_TRANSPORT_ERROR; 808 return USB_STOR_TRANSPORT_ERROR;
809 809
810 memset(bcb, 0, sizeof(bcb)); 810 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
811 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 811 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
812 bcb->DataTransferLength = 0x4; 812 bcb->DataTransferLength = 0x4;
813 bcb->Flags = 0x80; 813 bcb->Flags = 0x80;
diff --git a/drivers/staging/keucr/msscsi.c b/drivers/staging/keucr/msscsi.c
index ad0c5c62993..cb92d25acee 100644
--- a/drivers/staging/keucr/msscsi.c
+++ b/drivers/staging/keucr/msscsi.c
@@ -145,7 +145,7 @@ int MS_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
145 } 145 }
146 146
147 // set up the command wrapper 147 // set up the command wrapper
148 memset(bcb, 0, sizeof(bcb)); 148 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
149 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 149 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
150 bcb->DataTransferLength = blenByte; 150 bcb->DataTransferLength = blenByte;
151 bcb->Flags = 0x80; 151 bcb->Flags = 0x80;
@@ -193,7 +193,7 @@ int MS_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
193 blkno = phyblk * 0x20 + PageNum; 193 blkno = phyblk * 0x20 + PageNum;
194 194
195 // set up the command wrapper 195 // set up the command wrapper
196 memset(bcb, 0, sizeof(bcb)); 196 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
197 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 197 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
198 bcb->DataTransferLength = 0x200 * len; 198 bcb->DataTransferLength = 0x200 * len;
199 bcb->Flags = 0x80; 199 bcb->Flags = 0x80;
@@ -250,7 +250,7 @@ int MS_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb)
250 } 250 }
251 251
252 // set up the command wrapper 252 // set up the command wrapper
253 memset(bcb, 0, sizeof(bcb)); 253 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
254 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 254 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
255 bcb->DataTransferLength = blenByte; 255 bcb->DataTransferLength = blenByte;
256 bcb->Flags = 0x00; 256 bcb->Flags = 0x00;
diff --git a/drivers/staging/keucr/sdscsi.c b/drivers/staging/keucr/sdscsi.c
index 6c332f850eb..d646507a361 100644
--- a/drivers/staging/keucr/sdscsi.c
+++ b/drivers/staging/keucr/sdscsi.c
@@ -152,7 +152,7 @@ int SD_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
152 bnByte = bn; 152 bnByte = bn;
153 153
154 // set up the command wrapper 154 // set up the command wrapper
155 memset(bcb, 0, sizeof(bcb)); 155 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
156 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 156 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
157 bcb->DataTransferLength = blenByte; 157 bcb->DataTransferLength = blenByte;
158 bcb->Flags = 0x80; 158 bcb->Flags = 0x80;
@@ -192,7 +192,7 @@ int SD_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb)
192 bnByte = bn; 192 bnByte = bn;
193 193
194 // set up the command wrapper 194 // set up the command wrapper
195 memset(bcb, 0, sizeof(bcb)); 195 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
196 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 196 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
197 bcb->DataTransferLength = blenByte; 197 bcb->DataTransferLength = blenByte;
198 bcb->Flags = 0x00; 198 bcb->Flags = 0x00;
diff --git a/drivers/staging/keucr/smilsub.c b/drivers/staging/keucr/smilsub.c
index 844b6598863..1b52535a388 100644
--- a/drivers/staging/keucr/smilsub.c
+++ b/drivers/staging/keucr/smilsub.c
@@ -266,7 +266,7 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf,BYTE *redundant)
266 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; 266 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
267 267
268 // Read sect data 268 // Read sect data
269 memset(bcb, 0, sizeof(bcb)); 269 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
270 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 270 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
271 bcb->DataTransferLength = 0x200; 271 bcb->DataTransferLength = 0x200;
272 bcb->Flags = 0x80; 272 bcb->Flags = 0x80;
@@ -281,7 +281,7 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf,BYTE *redundant)
281 return USB_STOR_TRANSPORT_ERROR; 281 return USB_STOR_TRANSPORT_ERROR;
282 282
283 // Read redundant 283 // Read redundant
284 memset(bcb, 0, sizeof(bcb)); 284 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
285 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 285 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
286 bcb->DataTransferLength = 0x10; 286 bcb->DataTransferLength = 0x10;
287 bcb->Flags = 0x80; 287 bcb->Flags = 0x80;
@@ -319,7 +319,7 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant)
319 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; 319 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
320 320
321 // Read sect data 321 // Read sect data
322 memset(bcb, 0, sizeof(bcb)); 322 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
323 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 323 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
324 bcb->DataTransferLength = 0x200*count; 324 bcb->DataTransferLength = 0x200*count;
325 bcb->Flags = 0x80; 325 bcb->Flags = 0x80;
@@ -334,7 +334,7 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant)
334 return USB_STOR_TRANSPORT_ERROR; 334 return USB_STOR_TRANSPORT_ERROR;
335 335
336 // Read redundant 336 // Read redundant
337 memset(bcb, 0, sizeof(bcb)); 337 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
338 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 338 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
339 bcb->DataTransferLength = 0x10; 339 bcb->DataTransferLength = 0x10;
340 bcb->Flags = 0x80; 340 bcb->Flags = 0x80;
@@ -536,7 +536,7 @@ int Ssfdc_D_CopyBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant)
536 WriteAddr = WriteAddr*(WORD)Ssfdc.MaxSectors; 536 WriteAddr = WriteAddr*(WORD)Ssfdc.MaxSectors;
537 537
538 // Write sect data 538 // Write sect data
539 memset(bcb, 0, sizeof(bcb)); 539 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
540 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 540 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
541 bcb->DataTransferLength = 0x200*count; 541 bcb->DataTransferLength = 0x200*count;
542 bcb->Flags = 0x00; 542 bcb->Flags = 0x00;
@@ -754,7 +754,7 @@ int Ssfdc_D_WriteSectForCopy(struct us_data *us, BYTE *buf, BYTE *redundant)
754 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; 754 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
755 755
756 // Write sect data 756 // Write sect data
757 memset(bcb, 0, sizeof(bcb)); 757 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
758 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 758 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
759 bcb->DataTransferLength = 0x200; 759 bcb->DataTransferLength = 0x200;
760 bcb->Flags = 0x00; 760 bcb->Flags = 0x00;
@@ -791,7 +791,7 @@ int Ssfdc_D_EraseBlock(struct us_data *us)
791 addr=(WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock; 791 addr=(WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
792 addr=addr*(WORD)Ssfdc.MaxSectors; 792 addr=addr*(WORD)Ssfdc.MaxSectors;
793 793
794 memset(bcb, 0, sizeof(bcb)); 794 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
795 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 795 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
796 bcb->DataTransferLength = 0x200; 796 bcb->DataTransferLength = 0x200;
797 bcb->Flags = 0x80; 797 bcb->Flags = 0x80;
@@ -827,7 +827,7 @@ int Ssfdc_D_ReadRedtData(struct us_data *us, BYTE *redundant)
827 addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock; 827 addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
828 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; 828 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
829 829
830 memset(bcb, 0, sizeof(bcb)); 830 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
831 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 831 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
832 bcb->DataTransferLength = 0x10; 832 bcb->DataTransferLength = 0x10;
833 bcb->Flags = 0x80; 833 bcb->Flags = 0x80;
@@ -870,7 +870,7 @@ int Ssfdc_D_WriteRedtData(struct us_data *us, BYTE *redundant)
870 addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock; 870 addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
871 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; 871 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
872 872
873 memset(bcb, 0, sizeof(bcb)); 873 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
874 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 874 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
875 bcb->DataTransferLength = 0x10; 875 bcb->DataTransferLength = 0x10;
876 bcb->Flags = 0x80; 876 bcb->Flags = 0x80;
diff --git a/drivers/staging/keucr/transport.c b/drivers/staging/keucr/transport.c
index fd98df643ab..111160cce44 100644
--- a/drivers/staging/keucr/transport.c
+++ b/drivers/staging/keucr/transport.c
@@ -40,7 +40,7 @@ static int usb_stor_msg_common(struct us_data *us, int timeout)
40 us->current_urb->error_count = 0; 40 us->current_urb->error_count = 0;
41 us->current_urb->status = 0; 41 us->current_urb->status = 0;
42 42
43// us->current_urb->transfer_flags = URB_NO_SETUP_DMA_MAP; 43 us->current_urb->transfer_flags = 0;
44 if (us->current_urb->transfer_buffer == us->iobuf) 44 if (us->current_urb->transfer_buffer == us->iobuf)
45 us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 45 us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
46 us->current_urb->transfer_dma = us->iobuf_dma; 46 us->current_urb->transfer_dma = us->iobuf_dma;
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 75aa7a36307..4ca45ec7fd8 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -17,7 +17,6 @@
17#include <linux/console.h> 17#include <linux/console.h>
18#include <linux/i2c.h> 18#include <linux/i2c.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/i2c-id.h>
21#include <linux/pci.h> 20#include <linux/pci.h>
22#include <linux/pci_ids.h> 21#include <linux/pci_ids.h>
23#include <linux/interrupt.h> 22#include <linux/interrupt.h>
@@ -733,7 +732,6 @@ static int dcon_probe(struct i2c_client *client, const struct i2c_device_id *id)
733 edev: 732 edev:
734 platform_device_unregister(dcon_device); 733 platform_device_unregister(dcon_device);
735 dcon_device = NULL; 734 dcon_device = NULL;
736 i2c_set_clientdata(client, NULL);
737 eirq: 735 eirq:
738 free_irq(DCON_IRQ, &dcon_driver); 736 free_irq(DCON_IRQ, &dcon_driver);
739 einit: 737 einit:
@@ -757,8 +755,6 @@ static int dcon_remove(struct i2c_client *client)
757 platform_device_unregister(dcon_device); 755 platform_device_unregister(dcon_device);
758 cancel_work_sync(&dcon_work); 756 cancel_work_sync(&dcon_work);
759 757
760 i2c_set_clientdata(client, NULL);
761
762 return 0; 758 return 0;
763} 759}
764 760
diff --git a/drivers/staging/rt2860/common/cmm_aes.c b/drivers/staging/rt2860/common/cmm_aes.c
index 1d159ff82fd..a99879bada4 100644
--- a/drivers/staging/rt2860/common/cmm_aes.c
+++ b/drivers/staging/rt2860/common/cmm_aes.c
@@ -330,8 +330,6 @@ void construct_mic_iv(unsigned char *mic_iv,
330 for (i = 8; i < 14; i++) 330 for (i = 8; i < 14; i++)
331 mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */ 331 mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */
332#endif 332#endif
333 i = (payload_length / 256);
334 i = (payload_length % 256);
335 mic_iv[14] = (unsigned char)(payload_length / 256); 333 mic_iv[14] = (unsigned char)(payload_length / 256);
336 mic_iv[15] = (unsigned char)(payload_length % 256); 334 mic_iv[15] = (unsigned char)(payload_length % 256);
337 335
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
index ebf9074a908..ddacfc6c486 100644
--- a/drivers/staging/rt2860/usb_main_dev.c
+++ b/drivers/staging/rt2860/usb_main_dev.c
@@ -65,6 +65,7 @@ struct usb_device_id rtusb_usb_id[] = {
65 {USB_DEVICE(0x14B2, 0x3C07)}, /* AL */ 65 {USB_DEVICE(0x14B2, 0x3C07)}, /* AL */
66 {USB_DEVICE(0x050D, 0x8053)}, /* Belkin */ 66 {USB_DEVICE(0x050D, 0x8053)}, /* Belkin */
67 {USB_DEVICE(0x050D, 0x825B)}, /* Belkin */ 67 {USB_DEVICE(0x050D, 0x825B)}, /* Belkin */
68 {USB_DEVICE(0x050D, 0x935A)}, /* Belkin F6D4050 v1 */
68 {USB_DEVICE(0x050D, 0x935B)}, /* Belkin F6D4050 v2 */ 69 {USB_DEVICE(0x050D, 0x935B)}, /* Belkin F6D4050 v2 */
69 {USB_DEVICE(0x14B2, 0x3C23)}, /* Airlink */ 70 {USB_DEVICE(0x14B2, 0x3C23)}, /* Airlink */
70 {USB_DEVICE(0x14B2, 0x3C27)}, /* Airlink */ 71 {USB_DEVICE(0x14B2, 0x3C27)}, /* Airlink */
diff --git a/drivers/staging/rtl8192e/r8192E_core.c b/drivers/staging/rtl8192e/r8192E_core.c
index a202194b5cb..b1786dcac24 100644
--- a/drivers/staging/rtl8192e/r8192E_core.c
+++ b/drivers/staging/rtl8192e/r8192E_core.c
@@ -5829,6 +5829,9 @@ static void rtl8192_rx(struct net_device *dev)
5829 } 5829 }
5830 } 5830 }
5831 5831
5832 pci_unmap_single(priv->pdev, *((dma_addr_t *) skb->cb),
5833 priv->rxbuffersize, PCI_DMA_FROMDEVICE);
5834
5832 skb = new_skb; 5835 skb = new_skb;
5833 priv->rx_buf[priv->rx_idx] = skb; 5836 priv->rx_buf[priv->rx_idx] = skb;
5834 *((dma_addr_t *) skb->cb) = pci_map_single(priv->pdev, skb_tail_pointer(skb), priv->rxbuffersize, PCI_DMA_FROMDEVICE); 5837 *((dma_addr_t *) skb->cb) = pci_map_single(priv->pdev, skb_tail_pointer(skb), priv->rxbuffersize, PCI_DMA_FROMDEVICE);
diff --git a/drivers/staging/solo6x10/solo6010-v4l2-enc.c b/drivers/staging/solo6x10/solo6010-v4l2-enc.c
index bbf3d9c4abb..097e82bc7a6 100644
--- a/drivers/staging/solo6x10/solo6010-v4l2-enc.c
+++ b/drivers/staging/solo6x10/solo6010-v4l2-enc.c
@@ -766,7 +766,7 @@ static int solo_enc_open(struct file *file)
766 &solo_enc->lock, 766 &solo_enc->lock,
767 V4L2_BUF_TYPE_VIDEO_CAPTURE, 767 V4L2_BUF_TYPE_VIDEO_CAPTURE,
768 V4L2_FIELD_INTERLACED, 768 V4L2_FIELD_INTERLACED,
769 sizeof(struct videobuf_buffer), fh); 769 sizeof(struct videobuf_buffer), fh, NULL);
770 770
771 spin_unlock(&solo_enc->lock); 771 spin_unlock(&solo_enc->lock);
772 772
diff --git a/drivers/staging/solo6x10/solo6010-v4l2.c b/drivers/staging/solo6x10/solo6010-v4l2.c
index 9731fa02b5e..6ffd21de837 100644
--- a/drivers/staging/solo6x10/solo6010-v4l2.c
+++ b/drivers/staging/solo6x10/solo6010-v4l2.c
@@ -437,7 +437,7 @@ static int solo_v4l2_open(struct file *file)
437 &solo_dev->pdev->dev, &fh->slock, 437 &solo_dev->pdev->dev, &fh->slock,
438 V4L2_BUF_TYPE_VIDEO_CAPTURE, 438 V4L2_BUF_TYPE_VIDEO_CAPTURE,
439 SOLO_DISP_PIX_FIELD, 439 SOLO_DISP_PIX_FIELD,
440 sizeof(struct videobuf_buffer), fh); 440 sizeof(struct videobuf_buffer), fh, NULL);
441 441
442 return 0; 442 return 0;
443} 443}
diff --git a/drivers/staging/stradis/stradis.c b/drivers/staging/stradis/stradis.c
index a057824e7eb..807dd7eb748 100644
--- a/drivers/staging/stradis/stradis.c
+++ b/drivers/staging/stradis/stradis.c
@@ -1286,6 +1286,7 @@ static long saa_ioctl(struct file *file,
1286 case VIDIOCGCAP: 1286 case VIDIOCGCAP:
1287 { 1287 {
1288 struct video_capability b; 1288 struct video_capability b;
1289 memset(&b, 0, sizeof(b));
1289 strcpy(b.name, saa->video_dev.name); 1290 strcpy(b.name, saa->video_dev.name);
1290 b.type = VID_TYPE_CAPTURE | VID_TYPE_OVERLAY | 1291 b.type = VID_TYPE_CAPTURE | VID_TYPE_OVERLAY |
1291 VID_TYPE_CLIPPING | VID_TYPE_FRAMERAM | 1292 VID_TYPE_CLIPPING | VID_TYPE_FRAMERAM |
@@ -1416,6 +1417,7 @@ static long saa_ioctl(struct file *file,
1416 case VIDIOCGWIN: 1417 case VIDIOCGWIN:
1417 { 1418 {
1418 struct video_window vw; 1419 struct video_window vw;
1420 memset(&vw, 0, sizeof(vw));
1419 vw.x = saa->win.x; 1421 vw.x = saa->win.x;
1420 vw.y = saa->win.y; 1422 vw.y = saa->win.y;
1421 vw.width = saa->win.width; 1423 vw.width = saa->win.width;
@@ -1448,6 +1450,7 @@ static long saa_ioctl(struct file *file,
1448 case VIDIOCGFBUF: 1450 case VIDIOCGFBUF:
1449 { 1451 {
1450 struct video_buffer v; 1452 struct video_buffer v;
1453 memset(&v, 0, sizeof(v));
1451 v.base = (void *)saa->win.vidadr; 1454 v.base = (void *)saa->win.vidadr;
1452 v.height = saa->win.sheight; 1455 v.height = saa->win.sheight;
1453 v.width = saa->win.swidth; 1456 v.width = saa->win.swidth;
@@ -1492,6 +1495,7 @@ static long saa_ioctl(struct file *file,
1492 case VIDIOCGAUDIO: 1495 case VIDIOCGAUDIO:
1493 { 1496 {
1494 struct video_audio v; 1497 struct video_audio v;
1498 memset(&v, 0, sizeof(v));
1495 v = saa->audio_dev; 1499 v = saa->audio_dev;
1496 v.flags &= ~(VIDEO_AUDIO_MUTE | VIDEO_AUDIO_MUTABLE); 1500 v.flags &= ~(VIDEO_AUDIO_MUTE | VIDEO_AUDIO_MUTABLE);
1497 v.flags |= VIDEO_AUDIO_MUTABLE | VIDEO_AUDIO_VOLUME; 1501 v.flags |= VIDEO_AUDIO_MUTABLE | VIDEO_AUDIO_VOLUME;
@@ -1534,6 +1538,7 @@ static long saa_ioctl(struct file *file,
1534 case VIDIOCGUNIT: 1538 case VIDIOCGUNIT:
1535 { 1539 {
1536 struct video_unit vu; 1540 struct video_unit vu;
1541 memset(&vu, 0, sizeof(vu));
1537 vu.video = saa->video_dev.minor; 1542 vu.video = saa->video_dev.minor;
1538 vu.vbi = VIDEO_NO_UNIT; 1543 vu.vbi = VIDEO_NO_UNIT;
1539 vu.radio = VIDEO_NO_UNIT; 1544 vu.radio = VIDEO_NO_UNIT;
@@ -1888,6 +1893,7 @@ static int saa_open(struct file *file)
1888 1893
1889 saa->user++; 1894 saa->user++;
1890 if (saa->user > 1) { 1895 if (saa->user > 1) {
1896 saa->user--;
1891 unlock_kernel(); 1897 unlock_kernel();
1892 return 0; /* device open already, don't reset */ 1898 return 0; /* device open already, don't reset */
1893 } 1899 }
@@ -2000,10 +2006,13 @@ static int __devinit configure_saa7146(struct pci_dev *pdev, int num)
2000 if (retval < 0) { 2006 if (retval < 0) {
2001 dev_err(&pdev->dev, "%d: error in registering video device!\n", 2007 dev_err(&pdev->dev, "%d: error in registering video device!\n",
2002 num); 2008 num);
2003 goto errio; 2009 goto errirq;
2004 } 2010 }
2005 2011
2006 return 0; 2012 return 0;
2013
2014errirq:
2015 free_irq(saa->irq, saa);
2007errio: 2016errio:
2008 iounmap(saa->saa7146_mem); 2017 iounmap(saa->saa7146_mem);
2009err: 2018err:
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index ff64d464143..93de4f2e8bf 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -6,7 +6,6 @@ menuconfig TIDSPBRIDGE
6 tristate "DSP Bridge driver" 6 tristate "DSP Bridge driver"
7 depends on ARCH_OMAP3 7 depends on ARCH_OMAP3
8 select OMAP_MBOX_FWK 8 select OMAP_MBOX_FWK
9 select OMAP_IOMMU
10 help 9 help
11 DSP/BIOS Bridge is designed for platforms that contain a GPP and 10 DSP/BIOS Bridge is designed for platforms that contain a GPP and
12 one or more attached DSPs. The GPP is considered the master or 11 one or more attached DSPs. The GPP is considered the master or
diff --git a/drivers/staging/tidspbridge/Makefile b/drivers/staging/tidspbridge/Makefile
index 50decc2935c..41c644c3318 100644
--- a/drivers/staging/tidspbridge/Makefile
+++ b/drivers/staging/tidspbridge/Makefile
@@ -2,18 +2,19 @@ obj-$(CONFIG_TIDSPBRIDGE) += bridgedriver.o
2 2
3libgen = gen/gb.o gen/gs.o gen/gh.o gen/uuidutil.o 3libgen = gen/gb.o gen/gs.o gen/gh.o gen/uuidutil.o
4libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \ 4libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \
5 core/tiomap3430_pwr.o core/tiomap_io.o core/dsp-mmu.o \ 5 core/tiomap3430_pwr.o core/tiomap_io.o \
6 core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o 6 core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o
7libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/dspapi.o \ 7libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/dspapi.o \
8 pmgr/cmm.o pmgr/dbll.o 8 pmgr/dmm.o pmgr/cmm.o pmgr/dbll.o
9librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \ 9librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \
10 rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \ 10 rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \
11 rmgr/nldr.o rmgr/drv_interface.o 11 rmgr/nldr.o rmgr/drv_interface.o
12libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \ 12libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \
13 dynload/tramp.o 13 dynload/tramp.o
14libhw = hw/hw_mmu.o
14 15
15bridgedriver-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \ 16bridgedriver-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \
16 $(libdload) 17 $(libdload) $(libhw)
17 18
18#Machine dependent 19#Machine dependent
19ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \ 20ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \
diff --git a/drivers/staging/tidspbridge/core/_deh.h b/drivers/staging/tidspbridge/core/_deh.h
index 8ae263387a8..16723cd3483 100644
--- a/drivers/staging/tidspbridge/core/_deh.h
+++ b/drivers/staging/tidspbridge/core/_deh.h
@@ -27,8 +27,9 @@
27struct deh_mgr { 27struct deh_mgr {
28 struct bridge_dev_context *hbridge_context; /* Bridge context. */ 28 struct bridge_dev_context *hbridge_context; /* Bridge context. */
29 struct ntfy_object *ntfy_obj; /* NTFY object */ 29 struct ntfy_object *ntfy_obj; /* NTFY object */
30};
31 30
32int mmu_fault_isr(struct iommu *mmu); 31 /* MMU Fault DPC */
32 struct tasklet_struct dpc_tasklet;
33};
33 34
34#endif /* _DEH_ */ 35#endif /* _DEH_ */
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h
index e0a801c1cb9..1c1f157e167 100644
--- a/drivers/staging/tidspbridge/core/_tiomap.h
+++ b/drivers/staging/tidspbridge/core/_tiomap.h
@@ -23,8 +23,8 @@
23#include <plat/clockdomain.h> 23#include <plat/clockdomain.h>
24#include <mach-omap2/prm-regbits-34xx.h> 24#include <mach-omap2/prm-regbits-34xx.h>
25#include <mach-omap2/cm-regbits-34xx.h> 25#include <mach-omap2/cm-regbits-34xx.h>
26#include <dspbridge/dsp-mmu.h>
27#include <dspbridge/devdefs.h> 26#include <dspbridge/devdefs.h>
27#include <hw_defs.h>
28#include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */ 28#include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */
29#include <dspbridge/sync.h> 29#include <dspbridge/sync.h>
30#include <dspbridge/clk.h> 30#include <dspbridge/clk.h>
@@ -306,18 +306,6 @@ static const struct bpwr_clk_t bpwr_clks[] = {
306 306
307#define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index))) 307#define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index)))
308 308
309struct shm_segs {
310 u32 seg0_da;
311 u32 seg0_pa;
312 u32 seg0_va;
313 u32 seg0_size;
314 u32 seg1_da;
315 u32 seg1_pa;
316 u32 seg1_va;
317 u32 seg1_size;
318};
319
320
321/* This Bridge driver's device context: */ 309/* This Bridge driver's device context: */
322struct bridge_dev_context { 310struct bridge_dev_context {
323 struct dev_object *hdev_obj; /* Handle to Bridge device object. */ 311 struct dev_object *hdev_obj; /* Handle to Bridge device object. */
@@ -328,6 +316,7 @@ struct bridge_dev_context {
328 */ 316 */
329 u32 dw_dsp_ext_base_addr; /* See the comment above */ 317 u32 dw_dsp_ext_base_addr; /* See the comment above */
330 u32 dw_api_reg_base; /* API mem map'd registers */ 318 u32 dw_api_reg_base; /* API mem map'd registers */
319 void __iomem *dw_dsp_mmu_base; /* DSP MMU Mapped registers */
331 u32 dw_api_clk_base; /* CLK Registers */ 320 u32 dw_api_clk_base; /* CLK Registers */
332 u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */ 321 u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */
333 u32 dw_public_rhea; /* Pub Rhea */ 322 u32 dw_public_rhea; /* Pub Rhea */
@@ -339,8 +328,7 @@ struct bridge_dev_context {
339 u32 dw_internal_size; /* Internal memory size */ 328 u32 dw_internal_size; /* Internal memory size */
340 329
341 struct omap_mbox *mbox; /* Mail box handle */ 330 struct omap_mbox *mbox; /* Mail box handle */
342 struct iommu *dsp_mmu; /* iommu for iva2 handler */ 331
343 struct shm_segs sh_s;
344 struct cfg_hostres *resources; /* Host Resources */ 332 struct cfg_hostres *resources; /* Host Resources */
345 333
346 /* 334 /*
@@ -353,6 +341,7 @@ struct bridge_dev_context {
353 341
354 /* TC Settings */ 342 /* TC Settings */
355 bool tc_word_swap_on; /* Traffic Controller Word Swap */ 343 bool tc_word_swap_on; /* Traffic Controller Word Swap */
344 struct pg_table_attrs *pt_attrs;
356 u32 dsp_per_clks; 345 u32 dsp_per_clks;
357}; 346};
358 347
diff --git a/drivers/staging/tidspbridge/core/dsp-mmu.c b/drivers/staging/tidspbridge/core/dsp-mmu.c
deleted file mode 100644
index 983c95adc8f..00000000000
--- a/drivers/staging/tidspbridge/core/dsp-mmu.c
+++ /dev/null
@@ -1,317 +0,0 @@
1/*
2 * dsp-mmu.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * DSP iommu.
7 *
8 * Copyright (C) 2010 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#include <dspbridge/host_os.h>
20#include <plat/dmtimer.h>
21#include <dspbridge/dbdefs.h>
22#include <dspbridge/dev.h>
23#include <dspbridge/io_sm.h>
24#include <dspbridge/dspdeh.h>
25#include "_tiomap.h"
26
27#include <dspbridge/dsp-mmu.h>
28
29#define MMU_CNTL_TWL_EN (1 << 2)
30
31static struct tasklet_struct mmu_tasklet;
32
33#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
34static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
35{
36 void *dummy_addr;
37 u32 fa, tmp;
38 struct iotlb_entry e;
39 struct iommu *mmu = dev_context->dsp_mmu;
40 dummy_addr = (void *)__get_free_page(GFP_ATOMIC);
41
42 /*
43 * Before acking the MMU fault, let's make sure MMU can only
44 * access entry #0. Then add a new entry so that the DSP OS
45 * can continue in order to dump the stack.
46 */
47 tmp = iommu_read_reg(mmu, MMU_CNTL);
48 tmp &= ~MMU_CNTL_TWL_EN;
49 iommu_write_reg(mmu, tmp, MMU_CNTL);
50 fa = iommu_read_reg(mmu, MMU_FAULT_AD);
51 e.da = fa & PAGE_MASK;
52 e.pa = virt_to_phys(dummy_addr);
53 e.valid = 1;
54 e.prsvd = 1;
55 e.pgsz = IOVMF_PGSZ_4K & MMU_CAM_PGSZ_MASK;
56 e.endian = MMU_RAM_ENDIAN_LITTLE;
57 e.elsz = MMU_RAM_ELSZ_32;
58 e.mixed = 0;
59
60 load_iotlb_entry(mmu, &e);
61
62 dsp_clk_enable(DSP_CLK_GPT8);
63
64 dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
65
66 /* Clear MMU interrupt */
67 tmp = iommu_read_reg(mmu, MMU_IRQSTATUS);
68 iommu_write_reg(mmu, tmp, MMU_IRQSTATUS);
69
70 dump_dsp_stack(dev_context);
71 dsp_clk_disable(DSP_CLK_GPT8);
72
73 iopgtable_clear_entry(mmu, fa);
74 free_page((unsigned long)dummy_addr);
75}
76#endif
77
78
79static void fault_tasklet(unsigned long data)
80{
81 struct iommu *mmu = (struct iommu *)data;
82 struct bridge_dev_context *dev_ctx;
83 struct deh_mgr *dm;
84 u32 fa;
85 dev_get_deh_mgr(dev_get_first(), &dm);
86 dev_get_bridge_context(dev_get_first(), &dev_ctx);
87
88 if (!dm || !dev_ctx)
89 return;
90
91 fa = iommu_read_reg(mmu, MMU_FAULT_AD);
92
93#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
94 print_dsp_trace_buffer(dev_ctx);
95 dump_dl_modules(dev_ctx);
96 mmu_fault_print_stack(dev_ctx);
97#endif
98
99 bridge_deh_notify(dm, DSP_MMUFAULT, fa);
100}
101
102/*
103 * ======== mmu_fault_isr ========
104 * ISR to be triggered by a DSP MMU fault interrupt.
105 */
106static int mmu_fault_callback(struct iommu *mmu)
107{
108 if (!mmu)
109 return -EPERM;
110
111 iommu_write_reg(mmu, 0, MMU_IRQENABLE);
112 tasklet_schedule(&mmu_tasklet);
113 return 0;
114}
115
116/**
117 * dsp_mmu_init() - initialize dsp_mmu module and returns a handle
118 *
119 * This function initialize dsp mmu module and returns a struct iommu
120 * handle to use it for dsp maps.
121 *
122 */
123struct iommu *dsp_mmu_init()
124{
125 struct iommu *mmu;
126
127 mmu = iommu_get("iva2");
128
129 if (!IS_ERR(mmu)) {
130 tasklet_init(&mmu_tasklet, fault_tasklet, (unsigned long)mmu);
131 mmu->isr = mmu_fault_callback;
132 }
133
134 return mmu;
135}
136
137/**
138 * dsp_mmu_exit() - destroy dsp mmu module
139 * @mmu: Pointer to iommu handle.
140 *
141 * This function destroys dsp mmu module.
142 *
143 */
144void dsp_mmu_exit(struct iommu *mmu)
145{
146 if (mmu)
147 iommu_put(mmu);
148 tasklet_kill(&mmu_tasklet);
149}
150
151/**
152 * user_va2_pa() - get physical address from userspace address.
153 * @mm: mm_struct Pointer of the process.
154 * @address: Virtual user space address.
155 *
156 */
157static u32 user_va2_pa(struct mm_struct *mm, u32 address)
158{
159 pgd_t *pgd;
160 pmd_t *pmd;
161 pte_t *ptep, pte;
162
163 pgd = pgd_offset(mm, address);
164 if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
165 pmd = pmd_offset(pgd, address);
166 if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
167 ptep = pte_offset_map(pmd, address);
168 if (ptep) {
169 pte = *ptep;
170 if (pte_present(pte))
171 return pte & PAGE_MASK;
172 }
173 }
174 }
175
176 return 0;
177}
178
179/**
180 * get_io_pages() - pin and get pages of io user's buffer.
181 * @mm: mm_struct Pointer of the process.
182 * @uva: Virtual user space address.
183 * @pages Pages to be pined.
184 * @usr_pgs struct page array pointer where the user pages will be stored
185 *
186 */
187static int get_io_pages(struct mm_struct *mm, u32 uva, unsigned pages,
188 struct page **usr_pgs)
189{
190 u32 pa;
191 int i;
192 struct page *pg;
193
194 for (i = 0; i < pages; i++) {
195 pa = user_va2_pa(mm, uva);
196
197 if (!pfn_valid(__phys_to_pfn(pa)))
198 break;
199
200 pg = phys_to_page(pa);
201 usr_pgs[i] = pg;
202 get_page(pg);
203 }
204 return i;
205}
206
207/**
208 * user_to_dsp_map() - maps user to dsp virtual address
209 * @mmu: Pointer to iommu handle.
210 * @uva: Virtual user space address.
211 * @da DSP address
212 * @size Buffer size to map.
213 * @usr_pgs struct page array pointer where the user pages will be stored
214 *
215 * This function maps a user space buffer into DSP virtual address.
216 *
217 */
218u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
219 struct page **usr_pgs)
220{
221 int res, w;
222 unsigned pages;
223 int i;
224 struct vm_area_struct *vma;
225 struct mm_struct *mm = current->mm;
226 struct sg_table *sgt;
227 struct scatterlist *sg;
228
229 if (!size || !usr_pgs)
230 return -EINVAL;
231
232 pages = size / PG_SIZE4K;
233
234 down_read(&mm->mmap_sem);
235 vma = find_vma(mm, uva);
236 while (vma && (uva + size > vma->vm_end))
237 vma = find_vma(mm, vma->vm_end + 1);
238
239 if (!vma) {
240 pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
241 __func__, uva, size);
242 up_read(&mm->mmap_sem);
243 return -EINVAL;
244 }
245 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
246 w = 1;
247
248 if (vma->vm_flags & VM_IO)
249 i = get_io_pages(mm, uva, pages, usr_pgs);
250 else
251 i = get_user_pages(current, mm, uva, pages, w, 1,
252 usr_pgs, NULL);
253 up_read(&mm->mmap_sem);
254
255 if (i < 0)
256 return i;
257
258 if (i < pages) {
259 res = -EFAULT;
260 goto err_pages;
261 }
262
263 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
264 if (!sgt) {
265 res = -ENOMEM;
266 goto err_pages;
267 }
268
269 res = sg_alloc_table(sgt, pages, GFP_KERNEL);
270
271 if (res < 0)
272 goto err_sg;
273
274 for_each_sg(sgt->sgl, sg, sgt->nents, i)
275 sg_set_page(sg, usr_pgs[i], PAGE_SIZE, 0);
276
277 da = iommu_vmap(mmu, da, sgt, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
278
279 if (!IS_ERR_VALUE(da))
280 return da;
281 res = (int)da;
282
283 sg_free_table(sgt);
284err_sg:
285 kfree(sgt);
286 i = pages;
287err_pages:
288 while (i--)
289 put_page(usr_pgs[i]);
290 return res;
291}
292
293/**
294 * user_to_dsp_unmap() - unmaps DSP virtual buffer.
295 * @mmu: Pointer to iommu handle.
296 * @da DSP address
297 *
298 * This function unmaps a user space buffer into DSP virtual address.
299 *
300 */
301int user_to_dsp_unmap(struct iommu *mmu, u32 da)
302{
303 unsigned i;
304 struct sg_table *sgt;
305 struct scatterlist *sg;
306
307 sgt = iommu_vunmap(mmu, da);
308 if (!sgt)
309 return -EFAULT;
310
311 for_each_sg(sgt->sgl, sg, sgt->nents, i)
312 put_page(sg_page(sg));
313 sg_free_table(sgt);
314 kfree(sgt);
315
316 return 0;
317}
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index 194badaba0e..571864555dd 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -39,6 +39,10 @@
39#include <dspbridge/ntfy.h> 39#include <dspbridge/ntfy.h>
40#include <dspbridge/sync.h> 40#include <dspbridge/sync.h>
41 41
42/* Hardware Abstraction Layer */
43#include <hw_defs.h>
44#include <hw_mmu.h>
45
42/* Bridge Driver */ 46/* Bridge Driver */
43#include <dspbridge/dspdeh.h> 47#include <dspbridge/dspdeh.h>
44#include <dspbridge/dspio.h> 48#include <dspbridge/dspio.h>
@@ -287,7 +291,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
287 struct cod_manager *cod_man; 291 struct cod_manager *cod_man;
288 struct chnl_mgr *hchnl_mgr; 292 struct chnl_mgr *hchnl_mgr;
289 struct msg_mgr *hmsg_mgr; 293 struct msg_mgr *hmsg_mgr;
290 struct shm_segs *sm_sg;
291 u32 ul_shm_base; 294 u32 ul_shm_base;
292 u32 ul_shm_base_offset; 295 u32 ul_shm_base_offset;
293 u32 ul_shm_limit; 296 u32 ul_shm_limit;
@@ -310,9 +313,18 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
310 struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB]; 313 struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
311 struct cfg_hostres *host_res; 314 struct cfg_hostres *host_res;
312 struct bridge_dev_context *pbridge_context; 315 struct bridge_dev_context *pbridge_context;
316 u32 map_attrs;
313 u32 shm0_end; 317 u32 shm0_end;
314 u32 ul_dyn_ext_base; 318 u32 ul_dyn_ext_base;
315 u32 ul_seg1_size = 0; 319 u32 ul_seg1_size = 0;
320 u32 pa_curr = 0;
321 u32 va_curr = 0;
322 u32 gpp_va_curr = 0;
323 u32 num_bytes = 0;
324 u32 all_bits = 0;
325 u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
326 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
327 };
316 328
317 status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context); 329 status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context);
318 if (!pbridge_context) { 330 if (!pbridge_context) {
@@ -325,8 +337,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
325 status = -EFAULT; 337 status = -EFAULT;
326 goto func_end; 338 goto func_end;
327 } 339 }
328 sm_sg = &pbridge_context->sh_s;
329
330 status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man); 340 status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man);
331 if (!cod_man) { 341 if (!cod_man) {
332 status = -EFAULT; 342 status = -EFAULT;
@@ -461,14 +471,129 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
461 if (status) 471 if (status)
462 goto func_end; 472 goto func_end;
463 473
464 sm_sg->seg1_pa = ul_gpp_pa; 474 pa_curr = ul_gpp_pa;
465 sm_sg->seg1_da = ul_dyn_ext_base; 475 va_curr = ul_dyn_ext_base * hio_mgr->word_size;
466 sm_sg->seg1_va = ul_gpp_va; 476 gpp_va_curr = ul_gpp_va;
467 sm_sg->seg1_size = ul_seg1_size; 477 num_bytes = ul_seg1_size;
468 sm_sg->seg0_pa = ul_gpp_pa + ul_pad_size + ul_seg1_size; 478
469 sm_sg->seg0_da = ul_dsp_va; 479 /*
470 sm_sg->seg0_va = ul_gpp_va + ul_pad_size + ul_seg1_size; 480 * Try to fit into TLB entries. If not possible, push them to page
471 sm_sg->seg0_size = ul_seg_size; 481 * tables. It is quite possible that if sections are not on
482 * bigger page boundary, we may end up making several small pages.
483 * So, push them onto page tables, if that is the case.
484 */
485 map_attrs = 0x00000000;
486 map_attrs = DSP_MAPLITTLEENDIAN;
487 map_attrs |= DSP_MAPPHYSICALADDR;
488 map_attrs |= DSP_MAPELEMSIZE32;
489 map_attrs |= DSP_MAPDONOTLOCK;
490
491 while (num_bytes) {
492 /*
493 * To find the max. page size with which both PA & VA are
494 * aligned.
495 */
496 all_bits = pa_curr | va_curr;
497 dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, "
498 "num_bytes %x\n", all_bits, pa_curr, va_curr,
499 num_bytes);
500 for (i = 0; i < 4; i++) {
501 if ((num_bytes >= page_size[i]) && ((all_bits &
502 (page_size[i] -
503 1)) == 0)) {
504 status =
505 hio_mgr->intf_fxns->
506 pfn_brd_mem_map(hio_mgr->hbridge_context,
507 pa_curr, va_curr,
508 page_size[i], map_attrs,
509 NULL);
510 if (status)
511 goto func_end;
512 pa_curr += page_size[i];
513 va_curr += page_size[i];
514 gpp_va_curr += page_size[i];
515 num_bytes -= page_size[i];
516 /*
517 * Don't try smaller sizes. Hopefully we have
518 * reached an address aligned to a bigger page
519 * size.
520 */
521 break;
522 }
523 }
524 }
525 pa_curr += ul_pad_size;
526 va_curr += ul_pad_size;
527 gpp_va_curr += ul_pad_size;
528
529 /* Configure the TLB entries for the next cacheable segment */
530 num_bytes = ul_seg_size;
531 va_curr = ul_dsp_va * hio_mgr->word_size;
532 while (num_bytes) {
533 /*
534 * To find the max. page size with which both PA & VA are
535 * aligned.
536 */
537 all_bits = pa_curr | va_curr;
538 dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, "
539 "va_curr %x, num_bytes %x\n", all_bits, pa_curr,
540 va_curr, num_bytes);
541 for (i = 0; i < 4; i++) {
542 if (!(num_bytes >= page_size[i]) ||
543 !((all_bits & (page_size[i] - 1)) == 0))
544 continue;
545 if (ndx < MAX_LOCK_TLB_ENTRIES) {
546 /*
547 * This is the physical address written to
548 * DSP MMU.
549 */
550 ae_proc[ndx].ul_gpp_pa = pa_curr;
551 /*
552 * This is the virtual uncached ioremapped
553 * address!!!
554 */
555 ae_proc[ndx].ul_gpp_va = gpp_va_curr;
556 ae_proc[ndx].ul_dsp_va =
557 va_curr / hio_mgr->word_size;
558 ae_proc[ndx].ul_size = page_size[i];
559 ae_proc[ndx].endianism = HW_LITTLE_ENDIAN;
560 ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT;
561 ae_proc[ndx].mixed_mode = HW_MMU_CPUES;
562 dev_dbg(bridge, "shm MMU TLB entry PA %x"
563 " VA %x DSP_VA %x Size %x\n",
564 ae_proc[ndx].ul_gpp_pa,
565 ae_proc[ndx].ul_gpp_va,
566 ae_proc[ndx].ul_dsp_va *
567 hio_mgr->word_size, page_size[i]);
568 ndx++;
569 } else {
570 status =
571 hio_mgr->intf_fxns->
572 pfn_brd_mem_map(hio_mgr->hbridge_context,
573 pa_curr, va_curr,
574 page_size[i], map_attrs,
575 NULL);
576 dev_dbg(bridge,
577 "shm MMU PTE entry PA %x"
578 " VA %x DSP_VA %x Size %x\n",
579 ae_proc[ndx].ul_gpp_pa,
580 ae_proc[ndx].ul_gpp_va,
581 ae_proc[ndx].ul_dsp_va *
582 hio_mgr->word_size, page_size[i]);
583 if (status)
584 goto func_end;
585 }
586 pa_curr += page_size[i];
587 va_curr += page_size[i];
588 gpp_va_curr += page_size[i];
589 num_bytes -= page_size[i];
590 /*
591 * Don't try smaller sizes. Hopefully we have reached
592 * an address aligned to a bigger page size.
593 */
594 break;
595 }
596 }
472 597
473 /* 598 /*
474 * Copy remaining entries from CDB. All entries are 1 MB and 599 * Copy remaining entries from CDB. All entries are 1 MB and
@@ -509,12 +634,38 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
509 "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa, 634 "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa,
510 ae_proc[ndx].ul_dsp_va); 635 ae_proc[ndx].ul_dsp_va);
511 ndx++; 636 ndx++;
637 } else {
638 status = hio_mgr->intf_fxns->pfn_brd_mem_map
639 (hio_mgr->hbridge_context,
640 hio_mgr->ext_proc_info.ty_tlb[i].
641 ul_gpp_phys,
642 hio_mgr->ext_proc_info.ty_tlb[i].
643 ul_dsp_virt, 0x100000, map_attrs,
644 NULL);
512 } 645 }
513 } 646 }
514 if (status) 647 if (status)
515 goto func_end; 648 goto func_end;
516 } 649 }
517 650
651 map_attrs = 0x00000000;
652 map_attrs = DSP_MAPLITTLEENDIAN;
653 map_attrs |= DSP_MAPPHYSICALADDR;
654 map_attrs |= DSP_MAPELEMSIZE32;
655 map_attrs |= DSP_MAPDONOTLOCK;
656
657 /* Map the L4 peripherals */
658 i = 0;
659 while (l4_peripheral_table[i].phys_addr) {
660 status = hio_mgr->intf_fxns->pfn_brd_mem_map
661 (hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr,
662 l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
663 map_attrs, NULL);
664 if (status)
665 goto func_end;
666 i++;
667 }
668
518 for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) { 669 for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
519 ae_proc[i].ul_dsp_va = 0; 670 ae_proc[i].ul_dsp_va = 0;
520 ae_proc[i].ul_gpp_pa = 0; 671 ae_proc[i].ul_gpp_pa = 0;
@@ -537,12 +688,12 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
537 status = -EFAULT; 688 status = -EFAULT;
538 goto func_end; 689 goto func_end;
539 } else { 690 } else {
540 if (sm_sg->seg0_da > ul_shm_base) { 691 if (ae_proc[0].ul_dsp_va > ul_shm_base) {
541 status = -EPERM; 692 status = -EPERM;
542 goto func_end; 693 goto func_end;
543 } 694 }
544 /* ul_shm_base may not be at ul_dsp_va address */ 695 /* ul_shm_base may not be at ul_dsp_va address */
545 ul_shm_base_offset = (ul_shm_base - sm_sg->seg0_da) * 696 ul_shm_base_offset = (ul_shm_base - ae_proc[0].ul_dsp_va) *
546 hio_mgr->word_size; 697 hio_mgr->word_size;
547 /* 698 /*
548 * bridge_dev_ctrl() will set dev context dsp-mmu info. In 699 * bridge_dev_ctrl() will set dev context dsp-mmu info. In
@@ -566,7 +717,8 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
566 goto func_end; 717 goto func_end;
567 } 718 }
568 /* Register SM */ 719 /* Register SM */
569 status = register_shm_segs(hio_mgr, cod_man, sm_sg->seg0_pa); 720 status =
721 register_shm_segs(hio_mgr, cod_man, ae_proc[0].ul_gpp_pa);
570 } 722 }
571 723
572 hio_mgr->shared_mem = (struct shm *)ul_shm_base; 724 hio_mgr->shared_mem = (struct shm *)ul_shm_base;
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index f22bc12bc0d..1be081f917a 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -23,7 +23,6 @@
23#include <dspbridge/host_os.h> 23#include <dspbridge/host_os.h>
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/mmzone.h> 25#include <linux/mmzone.h>
26#include <plat/control.h>
27 26
28/* ----------------------------------- DSP/BIOS Bridge */ 27/* ----------------------------------- DSP/BIOS Bridge */
29#include <dspbridge/dbdefs.h> 28#include <dspbridge/dbdefs.h>
@@ -35,6 +34,10 @@
35#include <dspbridge/drv.h> 34#include <dspbridge/drv.h>
36#include <dspbridge/sync.h> 35#include <dspbridge/sync.h>
37 36
37/* ------------------------------------ Hardware Abstraction Layer */
38#include <hw_defs.h>
39#include <hw_mmu.h>
40
38/* ----------------------------------- Link Driver */ 41/* ----------------------------------- Link Driver */
39#include <dspbridge/dspdefs.h> 42#include <dspbridge/dspdefs.h>
40#include <dspbridge/dspchnl.h> 43#include <dspbridge/dspchnl.h>
@@ -47,6 +50,7 @@
47/* ----------------------------------- Platform Manager */ 50/* ----------------------------------- Platform Manager */
48#include <dspbridge/dev.h> 51#include <dspbridge/dev.h>
49#include <dspbridge/dspapi.h> 52#include <dspbridge/dspapi.h>
53#include <dspbridge/dmm.h>
50#include <dspbridge/wdt.h> 54#include <dspbridge/wdt.h>
51 55
52/* ----------------------------------- Local */ 56/* ----------------------------------- Local */
@@ -67,6 +71,20 @@
67#define MMU_SMALL_PAGE_MASK 0xFFFFF000 71#define MMU_SMALL_PAGE_MASK 0xFFFFF000
68#define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00 72#define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
69#define PAGES_II_LVL_TABLE 512 73#define PAGES_II_LVL_TABLE 512
74#define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT)
75
76/*
77 * This is a totally ugly layer violation, but needed until
78 * omap_ctrl_set_dsp_boot*() are provided.
79 */
80#define OMAP3_IVA2_BOOTMOD_IDLE 1
81#define OMAP2_CONTROL_GENERAL 0x270
82#define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190)
83#define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194)
84
85#define OMAP343X_CTRL_REGADDR(reg) \
86 OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg))
87
70 88
71/* Forward Declarations: */ 89/* Forward Declarations: */
72static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt); 90static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
@@ -91,6 +109,12 @@ static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
91static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, 109static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
92 u8 *host_buff, u32 dsp_addr, 110 u8 *host_buff, u32 dsp_addr,
93 u32 ul_num_bytes, u32 mem_type); 111 u32 ul_num_bytes, u32 mem_type);
112static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
113 u32 ul_mpu_addr, u32 virt_addr,
114 u32 ul_num_bytes, u32 ul_map_attr,
115 struct page **mapped_pages);
116static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
117 u32 virt_addr, u32 ul_num_bytes);
94static int bridge_dev_create(struct bridge_dev_context 118static int bridge_dev_create(struct bridge_dev_context
95 **dev_cntxt, 119 **dev_cntxt,
96 struct dev_object *hdev_obj, 120 struct dev_object *hdev_obj,
@@ -98,8 +122,57 @@ static int bridge_dev_create(struct bridge_dev_context
98static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, 122static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
99 u32 dw_cmd, void *pargs); 123 u32 dw_cmd, void *pargs);
100static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt); 124static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
125static u32 user_va2_pa(struct mm_struct *mm, u32 address);
126static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
127 u32 va, u32 size,
128 struct hw_mmu_map_attrs_t *map_attrs);
129static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
130 u32 size, struct hw_mmu_map_attrs_t *attrs);
131static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
132 u32 ul_mpu_addr, u32 virt_addr,
133 u32 ul_num_bytes,
134 struct hw_mmu_map_attrs_t *hw_attrs);
135
101bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr); 136bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
102 137
138/* ----------------------------------- Globals */
139
140/* Attributes of L2 page tables for DSP MMU */
141struct page_info {
142 u32 num_entries; /* Number of valid PTEs in the L2 PT */
143};
144
145/* Attributes used to manage the DSP MMU page tables */
146struct pg_table_attrs {
147 spinlock_t pg_lock; /* Critical section object handle */
148
149 u32 l1_base_pa; /* Physical address of the L1 PT */
150 u32 l1_base_va; /* Virtual address of the L1 PT */
151 u32 l1_size; /* Size of the L1 PT */
152 u32 l1_tbl_alloc_pa;
153 /* Physical address of Allocated mem for L1 table. May not be aligned */
154 u32 l1_tbl_alloc_va;
155 /* Virtual address of Allocated mem for L1 table. May not be aligned */
156 u32 l1_tbl_alloc_sz;
157 /* Size of consistent memory allocated for L1 table.
158 * May not be aligned */
159
160 u32 l2_base_pa; /* Physical address of the L2 PT */
161 u32 l2_base_va; /* Virtual address of the L2 PT */
162 u32 l2_size; /* Size of the L2 PT */
163 u32 l2_tbl_alloc_pa;
164 /* Physical address of Allocated mem for L2 table. May not be aligned */
165 u32 l2_tbl_alloc_va;
166 /* Virtual address of Allocated mem for L2 table. May not be aligned */
167 u32 l2_tbl_alloc_sz;
168 /* Size of consistent memory allocated for L2 table.
169 * May not be aligned */
170
171 u32 l2_num_pages; /* Number of allocated L2 PT */
172 /* Array [l2_num_pages] of L2 PT info structs */
173 struct page_info *pg_info;
174};
175
103/* 176/*
104 * This Bridge driver's function interface table. 177 * This Bridge driver's function interface table.
105 */ 178 */
@@ -119,6 +192,8 @@ static struct bridge_drv_interface drv_interface_fxns = {
119 bridge_brd_set_state, 192 bridge_brd_set_state,
120 bridge_brd_mem_copy, 193 bridge_brd_mem_copy,
121 bridge_brd_mem_write, 194 bridge_brd_mem_write,
195 bridge_brd_mem_map,
196 bridge_brd_mem_un_map,
122 /* The following CHNL functions are provided by chnl_io.lib: */ 197 /* The following CHNL functions are provided by chnl_io.lib: */
123 bridge_chnl_create, 198 bridge_chnl_create,
124 bridge_chnl_destroy, 199 bridge_chnl_destroy,
@@ -148,6 +223,27 @@ static struct bridge_drv_interface drv_interface_fxns = {
148 bridge_msg_set_queue_id, 223 bridge_msg_set_queue_id,
149}; 224};
150 225
226static inline void flush_all(struct bridge_dev_context *dev_context)
227{
228 if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
229 dev_context->dw_brd_state == BRD_HIBERNATION)
230 wake_dsp(dev_context, NULL);
231
232 hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base);
233}
234
235static void bad_page_dump(u32 pa, struct page *pg)
236{
237 pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
238 pr_emerg("Bad page state in process '%s'\n"
239 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
240 "Backtrace:\n",
241 current->comm, pg, (int)(2 * sizeof(unsigned long)),
242 (unsigned long)pg->flags, pg->mapping,
243 page_mapcount(pg), page_count(pg));
244 dump_stack();
245}
246
151/* 247/*
152 * ======== bridge_drv_entry ======== 248 * ======== bridge_drv_entry ========
153 * purpose: 249 * purpose:
@@ -203,7 +299,8 @@ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
203 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, 299 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
204 OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); 300 OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
205 } 301 }
206 302 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
303 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
207 dsp_clk_enable(DSP_CLK_IVA2); 304 dsp_clk_enable(DSP_CLK_IVA2);
208 305
209 /* set the device state to IDLE */ 306 /* set the device state to IDLE */
@@ -274,17 +371,14 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
274{ 371{
275 int status = 0; 372 int status = 0;
276 struct bridge_dev_context *dev_context = dev_ctxt; 373 struct bridge_dev_context *dev_context = dev_ctxt;
277 struct iommu *mmu = NULL;
278 struct shm_segs *sm_sg;
279 int l4_i = 0, tlb_i = 0;
280 u32 sg0_da = 0, sg1_da = 0;
281 struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
282 u32 dw_sync_addr = 0; 374 u32 dw_sync_addr = 0;
283 u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */ 375 u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */
284 u32 ul_shm_base_virt; /* Dsp Virt SM base addr */ 376 u32 ul_shm_base_virt; /* Dsp Virt SM base addr */
285 u32 ul_tlb_base_virt; /* Base of MMU TLB entry */ 377 u32 ul_tlb_base_virt; /* Base of MMU TLB entry */
286 /* Offset of shm_base_virt from tlb_base_virt */ 378 /* Offset of shm_base_virt from tlb_base_virt */
287 u32 ul_shm_offset_virt; 379 u32 ul_shm_offset_virt;
380 s32 entry_ndx;
381 s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */
288 struct cfg_hostres *resources = NULL; 382 struct cfg_hostres *resources = NULL;
289 u32 temp; 383 u32 temp;
290 u32 ul_dsp_clk_rate; 384 u32 ul_dsp_clk_rate;
@@ -305,12 +399,12 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
305 ul_shm_base_virt *= DSPWORDSIZE; 399 ul_shm_base_virt *= DSPWORDSIZE;
306 DBC_ASSERT(ul_shm_base_virt != 0); 400 DBC_ASSERT(ul_shm_base_virt != 0);
307 /* DSP Virtual address */ 401 /* DSP Virtual address */
308 ul_tlb_base_virt = dev_context->sh_s.seg0_da; 402 ul_tlb_base_virt = dev_context->atlb_entry[0].ul_dsp_va;
309 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); 403 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
310 ul_shm_offset_virt = 404 ul_shm_offset_virt =
311 ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE); 405 ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
312 /* Kernel logical address */ 406 /* Kernel logical address */
313 ul_shm_base = dev_context->sh_s.seg0_va + ul_shm_offset_virt; 407 ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt;
314 408
315 DBC_ASSERT(ul_shm_base != 0); 409 DBC_ASSERT(ul_shm_base != 0);
316 /* 2nd wd is used as sync field */ 410 /* 2nd wd is used as sync field */
@@ -345,83 +439,78 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
345 OMAP343X_CONTROL_IVA2_BOOTMOD)); 439 OMAP343X_CONTROL_IVA2_BOOTMOD));
346 } 440 }
347 } 441 }
348
349 if (!status) { 442 if (!status) {
443 /* Reset and Unreset the RST2, so that BOOTADDR is copied to
444 * IVA2 SYSC register */
445 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
446 OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
447 udelay(100);
350 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, 448 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
351 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); 449 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
352 mmu = dev_context->dsp_mmu; 450 udelay(100);
353 if (mmu) 451
354 dsp_mmu_exit(mmu); 452 /* Disbale the DSP MMU */
355 mmu = dsp_mmu_init(); 453 hw_mmu_disable(resources->dw_dmmu_base);
356 if (IS_ERR(mmu)) { 454 /* Disable TWL */
357 dev_err(bridge, "dsp_mmu_init failed!\n"); 455 hw_mmu_twl_disable(resources->dw_dmmu_base);
358 dev_context->dsp_mmu = NULL; 456
359 status = (int)mmu; 457 /* Only make TLB entry if both addresses are non-zero */
360 } 458 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
361 } 459 entry_ndx++) {
362 if (!status) { 460 struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
363 dev_context->dsp_mmu = mmu; 461 struct hw_mmu_map_attrs_t map_attrs = {
364 sm_sg = &dev_context->sh_s; 462 .endianism = e->endianism,
365 sg0_da = iommu_kmap(mmu, sm_sg->seg0_da, sm_sg->seg0_pa, 463 .element_size = e->elem_size,
366 sm_sg->seg0_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); 464 .mixed_size = e->mixed_mode,
367 if (IS_ERR_VALUE(sg0_da)) { 465 };
368 status = (int)sg0_da; 466
369 sg0_da = 0; 467 if (!e->ul_gpp_pa || !e->ul_dsp_va)
370 }
371 }
372 if (!status) {
373 sg1_da = iommu_kmap(mmu, sm_sg->seg1_da, sm_sg->seg1_pa,
374 sm_sg->seg1_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
375 if (IS_ERR_VALUE(sg1_da)) {
376 status = (int)sg1_da;
377 sg1_da = 0;
378 }
379 }
380 if (!status) {
381 u32 da;
382 for (tlb_i = 0; tlb_i < BRDIOCTL_NUMOFMMUTLB; tlb_i++) {
383 if (!tlb[tlb_i].ul_gpp_pa)
384 continue; 468 continue;
385 469
386 dev_dbg(bridge, "IOMMU %d GppPa: 0x%x DspVa 0x%x Size" 470 dev_dbg(bridge,
387 " 0x%x\n", tlb_i, tlb[tlb_i].ul_gpp_pa, 471 "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
388 tlb[tlb_i].ul_dsp_va, tlb[tlb_i].ul_size); 472 itmp_entry_ndx,
389 473 e->ul_gpp_pa,
390 da = iommu_kmap(mmu, tlb[tlb_i].ul_dsp_va, 474 e->ul_dsp_va,
391 tlb[tlb_i].ul_gpp_pa, PAGE_SIZE, 475 e->ul_size);
392 IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); 476
393 if (IS_ERR_VALUE(da)) { 477 hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base,
394 status = (int)da; 478 e->ul_gpp_pa,
395 break; 479 e->ul_dsp_va,
396 } 480 e->ul_size,
397 } 481 itmp_entry_ndx,
398 } 482 &map_attrs, 1, 1);
399 if (!status) { 483
400 u32 da; 484 itmp_entry_ndx++;
401 l4_i = 0;
402 while (l4_peripheral_table[l4_i].phys_addr) {
403 da = iommu_kmap(mmu, l4_peripheral_table[l4_i].
404 dsp_virt_addr, l4_peripheral_table[l4_i].
405 phys_addr, PAGE_SIZE,
406 IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
407 if (IS_ERR_VALUE(da)) {
408 status = (int)da;
409 break;
410 }
411 l4_i++;
412 } 485 }
413 } 486 }
414 487
415 /* Lock the above TLB entries and get the BIOS and load monitor timer 488 /* Lock the above TLB entries and get the BIOS and load monitor timer
416 * information */ 489 * information */
417 if (!status) { 490 if (!status) {
491 hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx);
492 hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx);
493 hw_mmu_ttb_set(resources->dw_dmmu_base,
494 dev_context->pt_attrs->l1_base_pa);
495 hw_mmu_twl_enable(resources->dw_dmmu_base);
496 /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
497
498 temp = __raw_readl((resources->dw_dmmu_base) + 0x10);
499 temp = (temp & 0xFFFFFFEF) | 0x11;
500 __raw_writel(temp, (resources->dw_dmmu_base) + 0x10);
501
502 /* Let the DSP MMU run */
503 hw_mmu_enable(resources->dw_dmmu_base);
504
418 /* Enable the BIOS clock */ 505 /* Enable the BIOS clock */
419 (void)dev_get_symbol(dev_context->hdev_obj, 506 (void)dev_get_symbol(dev_context->hdev_obj,
420 BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer); 507 BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
421 (void)dev_get_symbol(dev_context->hdev_obj, 508 (void)dev_get_symbol(dev_context->hdev_obj,
422 BRIDGEINIT_LOADMON_GPTIMER, 509 BRIDGEINIT_LOADMON_GPTIMER,
423 &ul_load_monitor_timer); 510 &ul_load_monitor_timer);
511 }
424 512
513 if (!status) {
425 if (ul_load_monitor_timer != 0xFFFF) { 514 if (ul_load_monitor_timer != 0xFFFF) {
426 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | 515 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
427 ul_load_monitor_timer; 516 ul_load_monitor_timer;
@@ -430,7 +519,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
430 dev_dbg(bridge, "Not able to get the symbol for Load " 519 dev_dbg(bridge, "Not able to get the symbol for Load "
431 "Monitor Timer\n"); 520 "Monitor Timer\n");
432 } 521 }
522 }
433 523
524 if (!status) {
434 if (ul_bios_gp_timer != 0xFFFF) { 525 if (ul_bios_gp_timer != 0xFFFF) {
435 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | 526 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
436 ul_bios_gp_timer; 527 ul_bios_gp_timer;
@@ -439,7 +530,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
439 dev_dbg(bridge, 530 dev_dbg(bridge,
440 "Not able to get the symbol for BIOS Timer\n"); 531 "Not able to get the symbol for BIOS Timer\n");
441 } 532 }
533 }
442 534
535 if (!status) {
443 /* Set the DSP clock rate */ 536 /* Set the DSP clock rate */
444 (void)dev_get_symbol(dev_context->hdev_obj, 537 (void)dev_get_symbol(dev_context->hdev_obj,
445 "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr); 538 "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
@@ -492,6 +585,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
492 585
493 /* Let DSP go */ 586 /* Let DSP go */
494 dev_dbg(bridge, "%s Unreset\n", __func__); 587 dev_dbg(bridge, "%s Unreset\n", __func__);
588 /* Enable DSP MMU Interrupts */
589 hw_mmu_event_enable(resources->dw_dmmu_base,
590 HW_MMU_ALL_INTERRUPTS);
495 /* release the RST1, DSP starts executing now .. */ 591 /* release the RST1, DSP starts executing now .. */
496 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0, 592 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
497 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); 593 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
@@ -521,23 +617,11 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
521 617
522 /* update board state */ 618 /* update board state */
523 dev_context->dw_brd_state = BRD_RUNNING; 619 dev_context->dw_brd_state = BRD_RUNNING;
524 return 0; 620 /* (void)chnlsm_enable_interrupt(dev_context); */
525 } else { 621 } else {
526 dev_context->dw_brd_state = BRD_UNKNOWN; 622 dev_context->dw_brd_state = BRD_UNKNOWN;
527 } 623 }
528 } 624 }
529
530 while (tlb_i--) {
531 if (!tlb[tlb_i].ul_gpp_pa)
532 continue;
533 iommu_kunmap(mmu, tlb[tlb_i].ul_gpp_va);
534 }
535 while (l4_i--)
536 iommu_kunmap(mmu, l4_peripheral_table[l4_i].dsp_virt_addr);
537 if (sg0_da)
538 iommu_kunmap(mmu, sg0_da);
539 if (sg1_da)
540 iommu_kunmap(mmu, sg1_da);
541 return status; 625 return status;
542} 626}
543 627
@@ -553,9 +637,8 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
553{ 637{
554 int status = 0; 638 int status = 0;
555 struct bridge_dev_context *dev_context = dev_ctxt; 639 struct bridge_dev_context *dev_context = dev_ctxt;
640 struct pg_table_attrs *pt_attrs;
556 u32 dsp_pwr_state; 641 u32 dsp_pwr_state;
557 int i;
558 struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
559 struct omap_dsp_platform_data *pdata = 642 struct omap_dsp_platform_data *pdata =
560 omap_dspbridge_dev->dev.platform_data; 643 omap_dspbridge_dev->dev.platform_data;
561 644
@@ -591,37 +674,23 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
591 674
592 dsp_wdt_enable(false); 675 dsp_wdt_enable(false);
593 676
594 /* Reset DSP */ 677 /* This is a good place to clear the MMU page tables as well */
595 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 678 if (dev_context->pt_attrs) {
596 OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); 679 pt_attrs = dev_context->pt_attrs;
597 680 memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
681 memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
682 memset((u8 *) pt_attrs->pg_info, 0x00,
683 (pt_attrs->l2_num_pages * sizeof(struct page_info)));
684 }
598 /* Disable the mailbox interrupts */ 685 /* Disable the mailbox interrupts */
599 if (dev_context->mbox) { 686 if (dev_context->mbox) {
600 omap_mbox_disable_irq(dev_context->mbox, IRQ_RX); 687 omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
601 omap_mbox_put(dev_context->mbox); 688 omap_mbox_put(dev_context->mbox);
602 dev_context->mbox = NULL; 689 dev_context->mbox = NULL;
603 } 690 }
604 if (dev_context->dsp_mmu) { 691 /* Reset IVA2 clocks*/
605 pr_err("Proc stop mmu if statement\n"); 692 (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
606 for (i = 0; i < BRDIOCTL_NUMOFMMUTLB; i++) { 693 OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
607 if (!tlb[i].ul_gpp_pa)
608 continue;
609 iommu_kunmap(dev_context->dsp_mmu, tlb[i].ul_gpp_va);
610 }
611 i = 0;
612 while (l4_peripheral_table[i].phys_addr) {
613 iommu_kunmap(dev_context->dsp_mmu,
614 l4_peripheral_table[i].dsp_virt_addr);
615 i++;
616 }
617 iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg0_da);
618 iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg1_da);
619 dsp_mmu_exit(dev_context->dsp_mmu);
620 dev_context->dsp_mmu = NULL;
621 }
622 /* Reset IVA IOMMU*/
623 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
624 OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
625 694
626 dsp_clock_disable_all(dev_context->dsp_per_clks); 695 dsp_clock_disable_all(dev_context->dsp_per_clks);
627 dsp_clk_disable(DSP_CLK_IVA2); 696 dsp_clk_disable(DSP_CLK_IVA2);
@@ -681,6 +750,10 @@ static int bridge_dev_create(struct bridge_dev_context
681 struct bridge_dev_context *dev_context = NULL; 750 struct bridge_dev_context *dev_context = NULL;
682 s32 entry_ndx; 751 s32 entry_ndx;
683 struct cfg_hostres *resources = config_param; 752 struct cfg_hostres *resources = config_param;
753 struct pg_table_attrs *pt_attrs;
754 u32 pg_tbl_pa;
755 u32 pg_tbl_va;
756 u32 align_size;
684 struct drv_data *drv_datap = dev_get_drvdata(bridge); 757 struct drv_data *drv_datap = dev_get_drvdata(bridge);
685 758
686 /* Allocate and initialize a data structure to contain the bridge driver 759 /* Allocate and initialize a data structure to contain the bridge driver
@@ -711,8 +784,97 @@ static int bridge_dev_create(struct bridge_dev_context
711 if (!dev_context->dw_dsp_base_addr) 784 if (!dev_context->dw_dsp_base_addr)
712 status = -EPERM; 785 status = -EPERM;
713 786
787 pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
788 if (pt_attrs != NULL) {
789 /* Assuming that we use only DSP's memory map
790 * until 0x4000:0000 , we would need only 1024
791 * L1 enties i.e L1 size = 4K */
792 pt_attrs->l1_size = 0x1000;
793 align_size = pt_attrs->l1_size;
794 /* Align sizes are expected to be power of 2 */
795 /* we like to get aligned on L1 table size */
796 pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size,
797 align_size, &pg_tbl_pa);
798
799 /* Check if the PA is aligned for us */
800 if ((pg_tbl_pa) & (align_size - 1)) {
801 /* PA not aligned to page table size ,
802 * try with more allocation and align */
803 mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa,
804 pt_attrs->l1_size);
805 /* we like to get aligned on L1 table size */
806 pg_tbl_va =
807 (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2,
808 align_size, &pg_tbl_pa);
809 /* We should be able to get aligned table now */
810 pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
811 pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
812 pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2;
813 /* Align the PA to the next 'align' boundary */
814 pt_attrs->l1_base_pa =
815 ((pg_tbl_pa) +
816 (align_size - 1)) & (~(align_size - 1));
817 pt_attrs->l1_base_va =
818 pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa);
819 } else {
820 /* We got aligned PA, cool */
821 pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
822 pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
823 pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size;
824 pt_attrs->l1_base_pa = pg_tbl_pa;
825 pt_attrs->l1_base_va = pg_tbl_va;
826 }
827 if (pt_attrs->l1_base_va)
828 memset((u8 *) pt_attrs->l1_base_va, 0x00,
829 pt_attrs->l1_size);
830
831 /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
832 * L4 pages */
833 pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6);
834 pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE *
835 pt_attrs->l2_num_pages;
836 align_size = 4; /* Make it u32 aligned */
837 /* we like to get aligned on L1 table size */
838 pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size,
839 align_size, &pg_tbl_pa);
840 pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
841 pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
842 pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size;
843 pt_attrs->l2_base_pa = pg_tbl_pa;
844 pt_attrs->l2_base_va = pg_tbl_va;
845
846 if (pt_attrs->l2_base_va)
847 memset((u8 *) pt_attrs->l2_base_va, 0x00,
848 pt_attrs->l2_size);
849
850 pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages *
851 sizeof(struct page_info), GFP_KERNEL);
852 dev_dbg(bridge,
853 "L1 pa %x, va %x, size %x\n L2 pa %x, va "
854 "%x, size %x\n", pt_attrs->l1_base_pa,
855 pt_attrs->l1_base_va, pt_attrs->l1_size,
856 pt_attrs->l2_base_pa, pt_attrs->l2_base_va,
857 pt_attrs->l2_size);
858 dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n",
859 pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info);
860 }
861 if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) &&
862 (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL))
863 dev_context->pt_attrs = pt_attrs;
864 else
865 status = -ENOMEM;
866
714 if (!status) { 867 if (!status) {
868 spin_lock_init(&pt_attrs->pg_lock);
715 dev_context->tc_word_swap_on = drv_datap->tc_wordswapon; 869 dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
870
871 /* Set the Clock Divisor for the DSP module */
872 udelay(5);
873 /* MMU address is obtained from the host
874 * resources struct */
875 dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base;
876 }
877 if (!status) {
716 dev_context->hdev_obj = hdev_obj; 878 dev_context->hdev_obj = hdev_obj;
717 /* Store current board state. */ 879 /* Store current board state. */
718 dev_context->dw_brd_state = BRD_UNKNOWN; 880 dev_context->dw_brd_state = BRD_UNKNOWN;
@@ -722,6 +884,23 @@ static int bridge_dev_create(struct bridge_dev_context
722 /* Return ptr to our device state to the DSP API for storage */ 884 /* Return ptr to our device state to the DSP API for storage */
723 *dev_cntxt = dev_context; 885 *dev_cntxt = dev_context;
724 } else { 886 } else {
887 if (pt_attrs != NULL) {
888 kfree(pt_attrs->pg_info);
889
890 if (pt_attrs->l2_tbl_alloc_va) {
891 mem_free_phys_mem((void *)
892 pt_attrs->l2_tbl_alloc_va,
893 pt_attrs->l2_tbl_alloc_pa,
894 pt_attrs->l2_tbl_alloc_sz);
895 }
896 if (pt_attrs->l1_tbl_alloc_va) {
897 mem_free_phys_mem((void *)
898 pt_attrs->l1_tbl_alloc_va,
899 pt_attrs->l1_tbl_alloc_pa,
900 pt_attrs->l1_tbl_alloc_sz);
901 }
902 }
903 kfree(pt_attrs);
725 kfree(dev_context); 904 kfree(dev_context);
726 } 905 }
727func_end: 906func_end:
@@ -789,6 +968,7 @@ static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
789 */ 968 */
790static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) 969static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
791{ 970{
971 struct pg_table_attrs *pt_attrs;
792 int status = 0; 972 int status = 0;
793 struct bridge_dev_context *dev_context = (struct bridge_dev_context *) 973 struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
794 dev_ctxt; 974 dev_ctxt;
@@ -802,6 +982,23 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
802 982
803 /* first put the device to stop state */ 983 /* first put the device to stop state */
804 bridge_brd_stop(dev_context); 984 bridge_brd_stop(dev_context);
985 if (dev_context->pt_attrs) {
986 pt_attrs = dev_context->pt_attrs;
987 kfree(pt_attrs->pg_info);
988
989 if (pt_attrs->l2_tbl_alloc_va) {
990 mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va,
991 pt_attrs->l2_tbl_alloc_pa,
992 pt_attrs->l2_tbl_alloc_sz);
993 }
994 if (pt_attrs->l1_tbl_alloc_va) {
995 mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va,
996 pt_attrs->l1_tbl_alloc_pa,
997 pt_attrs->l1_tbl_alloc_sz);
998 }
999 kfree(pt_attrs);
1000
1001 }
805 1002
806 if (dev_context->resources) { 1003 if (dev_context->resources) {
807 host_res = dev_context->resources; 1004 host_res = dev_context->resources;
@@ -832,6 +1029,8 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
832 iounmap((void *)host_res->dw_mem_base[3]); 1029 iounmap((void *)host_res->dw_mem_base[3]);
833 if (host_res->dw_mem_base[4]) 1030 if (host_res->dw_mem_base[4])
834 iounmap((void *)host_res->dw_mem_base[4]); 1031 iounmap((void *)host_res->dw_mem_base[4]);
1032 if (host_res->dw_dmmu_base)
1033 iounmap(host_res->dw_dmmu_base);
835 if (host_res->dw_per_base) 1034 if (host_res->dw_per_base)
836 iounmap(host_res->dw_per_base); 1035 iounmap(host_res->dw_per_base);
837 if (host_res->dw_per_pm_base) 1036 if (host_res->dw_per_pm_base)
@@ -845,6 +1044,7 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
845 host_res->dw_mem_base[2] = (u32) NULL; 1044 host_res->dw_mem_base[2] = (u32) NULL;
846 host_res->dw_mem_base[3] = (u32) NULL; 1045 host_res->dw_mem_base[3] = (u32) NULL;
847 host_res->dw_mem_base[4] = (u32) NULL; 1046 host_res->dw_mem_base[4] = (u32) NULL;
1047 host_res->dw_dmmu_base = NULL;
848 host_res->dw_sys_ctrl_base = NULL; 1048 host_res->dw_sys_ctrl_base = NULL;
849 1049
850 kfree(host_res); 1050 kfree(host_res);
@@ -928,6 +1128,673 @@ static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
928} 1128}
929 1129
930/* 1130/*
1131 * ======== bridge_brd_mem_map ========
1132 * This function maps MPU buffer to the DSP address space. It performs
1133 * linear to physical address translation if required. It translates each
1134 * page since linear addresses can be physically non-contiguous
1135 * All address & size arguments are assumed to be page aligned (in proc.c)
1136 *
1137 * TODO: Disable MMU while updating the page tables (but that'll stall DSP)
1138 */
1139static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
1140 u32 ul_mpu_addr, u32 virt_addr,
1141 u32 ul_num_bytes, u32 ul_map_attr,
1142 struct page **mapped_pages)
1143{
1144 u32 attrs;
1145 int status = 0;
1146 struct bridge_dev_context *dev_context = dev_ctxt;
1147 struct hw_mmu_map_attrs_t hw_attrs;
1148 struct vm_area_struct *vma;
1149 struct mm_struct *mm = current->mm;
1150 u32 write = 0;
1151 u32 num_usr_pgs = 0;
1152 struct page *mapped_page, *pg;
1153 s32 pg_num;
1154 u32 va = virt_addr;
1155 struct task_struct *curr_task = current;
1156 u32 pg_i = 0;
1157 u32 mpu_addr, pa;
1158
1159 dev_dbg(bridge,
1160 "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
1161 __func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes,
1162 ul_map_attr);
1163 if (ul_num_bytes == 0)
1164 return -EINVAL;
1165
1166 if (ul_map_attr & DSP_MAP_DIR_MASK) {
1167 attrs = ul_map_attr;
1168 } else {
1169 /* Assign default attributes */
1170 attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16);
1171 }
1172 /* Take mapping properties */
1173 if (attrs & DSP_MAPBIGENDIAN)
1174 hw_attrs.endianism = HW_BIG_ENDIAN;
1175 else
1176 hw_attrs.endianism = HW_LITTLE_ENDIAN;
1177
1178 hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t)
1179 ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
1180 /* Ignore element_size if mixed_size is enabled */
1181 if (hw_attrs.mixed_size == 0) {
1182 if (attrs & DSP_MAPELEMSIZE8) {
1183 /* Size is 8 bit */
1184 hw_attrs.element_size = HW_ELEM_SIZE8BIT;
1185 } else if (attrs & DSP_MAPELEMSIZE16) {
1186 /* Size is 16 bit */
1187 hw_attrs.element_size = HW_ELEM_SIZE16BIT;
1188 } else if (attrs & DSP_MAPELEMSIZE32) {
1189 /* Size is 32 bit */
1190 hw_attrs.element_size = HW_ELEM_SIZE32BIT;
1191 } else if (attrs & DSP_MAPELEMSIZE64) {
1192 /* Size is 64 bit */
1193 hw_attrs.element_size = HW_ELEM_SIZE64BIT;
1194 } else {
1195 /*
1196 * Mixedsize isn't enabled, so size can't be
1197 * zero here
1198 */
1199 return -EINVAL;
1200 }
1201 }
1202 if (attrs & DSP_MAPDONOTLOCK)
1203 hw_attrs.donotlockmpupage = 1;
1204 else
1205 hw_attrs.donotlockmpupage = 0;
1206
1207 if (attrs & DSP_MAPVMALLOCADDR) {
1208 return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr,
1209 ul_num_bytes, &hw_attrs);
1210 }
1211 /*
1212 * Do OS-specific user-va to pa translation.
1213 * Combine physically contiguous regions to reduce TLBs.
1214 * Pass the translated pa to pte_update.
1215 */
1216 if ((attrs & DSP_MAPPHYSICALADDR)) {
1217 status = pte_update(dev_context, ul_mpu_addr, virt_addr,
1218 ul_num_bytes, &hw_attrs);
1219 goto func_cont;
1220 }
1221
1222 /*
1223 * Important Note: ul_mpu_addr is mapped from user application process
1224 * to current process - it must lie completely within the current
1225 * virtual memory address space in order to be of use to us here!
1226 */
1227 down_read(&mm->mmap_sem);
1228 vma = find_vma(mm, ul_mpu_addr);
1229 if (vma)
1230 dev_dbg(bridge,
1231 "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
1232 "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1233 ul_num_bytes, vma->vm_start, vma->vm_end,
1234 vma->vm_flags);
1235
1236 /*
1237 * It is observed that under some circumstances, the user buffer is
1238 * spread across several VMAs. So loop through and check if the entire
1239 * user buffer is covered
1240 */
1241 while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) {
1242 /* jump to the next VMA region */
1243 vma = find_vma(mm, vma->vm_end + 1);
1244 dev_dbg(bridge,
1245 "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
1246 "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1247 ul_num_bytes, vma->vm_start, vma->vm_end,
1248 vma->vm_flags);
1249 }
1250 if (!vma) {
1251 pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
1252 __func__, ul_mpu_addr, ul_num_bytes);
1253 status = -EINVAL;
1254 up_read(&mm->mmap_sem);
1255 goto func_cont;
1256 }
1257
1258 if (vma->vm_flags & VM_IO) {
1259 num_usr_pgs = ul_num_bytes / PG_SIZE4K;
1260 mpu_addr = ul_mpu_addr;
1261
1262 /* Get the physical addresses for user buffer */
1263 for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1264 pa = user_va2_pa(mm, mpu_addr);
1265 if (!pa) {
1266 status = -EPERM;
1267 pr_err("DSPBRIDGE: VM_IO mapping physical"
1268 "address is invalid\n");
1269 break;
1270 }
1271 if (pfn_valid(__phys_to_pfn(pa))) {
1272 pg = PHYS_TO_PAGE(pa);
1273 get_page(pg);
1274 if (page_count(pg) < 1) {
1275 pr_err("Bad page in VM_IO buffer\n");
1276 bad_page_dump(pa, pg);
1277 }
1278 }
1279 status = pte_set(dev_context->pt_attrs, pa,
1280 va, HW_PAGE_SIZE4KB, &hw_attrs);
1281 if (status)
1282 break;
1283
1284 va += HW_PAGE_SIZE4KB;
1285 mpu_addr += HW_PAGE_SIZE4KB;
1286 pa += HW_PAGE_SIZE4KB;
1287 }
1288 } else {
1289 num_usr_pgs = ul_num_bytes / PG_SIZE4K;
1290 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
1291 write = 1;
1292
1293 for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1294 pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1,
1295 write, 1, &mapped_page, NULL);
1296 if (pg_num > 0) {
1297 if (page_count(mapped_page) < 1) {
1298 pr_err("Bad page count after doing"
1299 "get_user_pages on"
1300 "user buffer\n");
1301 bad_page_dump(page_to_phys(mapped_page),
1302 mapped_page);
1303 }
1304 status = pte_set(dev_context->pt_attrs,
1305 page_to_phys(mapped_page), va,
1306 HW_PAGE_SIZE4KB, &hw_attrs);
1307 if (status)
1308 break;
1309
1310 if (mapped_pages)
1311 mapped_pages[pg_i] = mapped_page;
1312
1313 va += HW_PAGE_SIZE4KB;
1314 ul_mpu_addr += HW_PAGE_SIZE4KB;
1315 } else {
1316 pr_err("DSPBRIDGE: get_user_pages FAILED,"
1317 "MPU addr = 0x%x,"
1318 "vma->vm_flags = 0x%lx,"
1319 "get_user_pages Err"
1320 "Value = %d, Buffer"
1321 "size=0x%x\n", ul_mpu_addr,
1322 vma->vm_flags, pg_num, ul_num_bytes);
1323 status = -EPERM;
1324 break;
1325 }
1326 }
1327 }
1328 up_read(&mm->mmap_sem);
1329func_cont:
1330 if (status) {
1331 /*
1332 * Roll out the mapped pages incase it failed in middle of
1333 * mapping
1334 */
1335 if (pg_i) {
1336 bridge_brd_mem_un_map(dev_context, virt_addr,
1337 (pg_i * PG_SIZE4K));
1338 }
1339 status = -EPERM;
1340 }
1341 /*
1342 * In any case, flush the TLB
1343 * This is called from here instead from pte_update to avoid unnecessary
1344 * repetition while mapping non-contiguous physical regions of a virtual
1345 * region
1346 */
1347 flush_all(dev_context);
1348 dev_dbg(bridge, "%s status %x\n", __func__, status);
1349 return status;
1350}
1351
1352/*
1353 * ======== bridge_brd_mem_un_map ========
1354 * Invalidate the PTEs for the DSP VA block to be unmapped.
1355 *
1356 * PTEs of a mapped memory block are contiguous in any page table
1357 * So, instead of looking up the PTE address for every 4K block,
1358 * we clear consecutive PTEs until we unmap all the bytes
1359 */
1360static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
1361 u32 virt_addr, u32 ul_num_bytes)
1362{
1363 u32 l1_base_va;
1364 u32 l2_base_va;
1365 u32 l2_base_pa;
1366 u32 l2_page_num;
1367 u32 pte_val;
1368 u32 pte_size;
1369 u32 pte_count;
1370 u32 pte_addr_l1;
1371 u32 pte_addr_l2 = 0;
1372 u32 rem_bytes;
1373 u32 rem_bytes_l2;
1374 u32 va_curr;
1375 struct page *pg = NULL;
1376 int status = 0;
1377 struct bridge_dev_context *dev_context = dev_ctxt;
1378 struct pg_table_attrs *pt = dev_context->pt_attrs;
1379 u32 temp;
1380 u32 paddr;
1381 u32 numof4k_pages = 0;
1382
1383 va_curr = virt_addr;
1384 rem_bytes = ul_num_bytes;
1385 rem_bytes_l2 = 0;
1386 l1_base_va = pt->l1_base_va;
1387 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1388 dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
1389 "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
1390 ul_num_bytes, l1_base_va, pte_addr_l1);
1391
1392 while (rem_bytes && !status) {
1393 u32 va_curr_orig = va_curr;
1394 /* Find whether the L1 PTE points to a valid L2 PT */
1395 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1396 pte_val = *(u32 *) pte_addr_l1;
1397 pte_size = hw_mmu_pte_size_l1(pte_val);
1398
1399 if (pte_size != HW_MMU_COARSE_PAGE_SIZE)
1400 goto skip_coarse_page;
1401
1402 /*
1403 * Get the L2 PA from the L1 PTE, and find
1404 * corresponding L2 VA
1405 */
1406 l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1407 l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1408 l2_page_num =
1409 (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1410 /*
1411 * Find the L2 PTE address from which we will start
1412 * clearing, the number of PTEs to be cleared on this
1413 * page, and the size of VA space that needs to be
1414 * cleared on this L2 page
1415 */
1416 pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr);
1417 pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
1418 pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32);
1419 if (rem_bytes < (pte_count * PG_SIZE4K))
1420 pte_count = rem_bytes / PG_SIZE4K;
1421 rem_bytes_l2 = pte_count * PG_SIZE4K;
1422
1423 /*
1424 * Unmap the VA space on this L2 PT. A quicker way
1425 * would be to clear pte_count entries starting from
1426 * pte_addr_l2. However, below code checks that we don't
1427 * clear invalid entries or less than 64KB for a 64KB
1428 * entry. Similar checking is done for L1 PTEs too
1429 * below
1430 */
1431 while (rem_bytes_l2 && !status) {
1432 pte_val = *(u32 *) pte_addr_l2;
1433 pte_size = hw_mmu_pte_size_l2(pte_val);
1434 /* va_curr aligned to pte_size? */
1435 if (pte_size == 0 || rem_bytes_l2 < pte_size ||
1436 va_curr & (pte_size - 1)) {
1437 status = -EPERM;
1438 break;
1439 }
1440
1441 /* Collect Physical addresses from VA */
1442 paddr = (pte_val & ~(pte_size - 1));
1443 if (pte_size == HW_PAGE_SIZE64KB)
1444 numof4k_pages = 16;
1445 else
1446 numof4k_pages = 1;
1447 temp = 0;
1448 while (temp++ < numof4k_pages) {
1449 if (!pfn_valid(__phys_to_pfn(paddr))) {
1450 paddr += HW_PAGE_SIZE4KB;
1451 continue;
1452 }
1453 pg = PHYS_TO_PAGE(paddr);
1454 if (page_count(pg) < 1) {
1455 pr_info("DSPBRIDGE: UNMAP function: "
1456 "COUNT 0 FOR PA 0x%x, size = "
1457 "0x%x\n", paddr, ul_num_bytes);
1458 bad_page_dump(paddr, pg);
1459 } else {
1460 set_page_dirty(pg);
1461 page_cache_release(pg);
1462 }
1463 paddr += HW_PAGE_SIZE4KB;
1464 }
1465 if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) {
1466 status = -EPERM;
1467 goto EXIT_LOOP;
1468 }
1469
1470 status = 0;
1471 rem_bytes_l2 -= pte_size;
1472 va_curr += pte_size;
1473 pte_addr_l2 += (pte_size >> 12) * sizeof(u32);
1474 }
1475 spin_lock(&pt->pg_lock);
1476 if (rem_bytes_l2 == 0) {
1477 pt->pg_info[l2_page_num].num_entries -= pte_count;
1478 if (pt->pg_info[l2_page_num].num_entries == 0) {
1479 /*
1480 * Clear the L1 PTE pointing to the L2 PT
1481 */
1482 if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig,
1483 HW_MMU_COARSE_PAGE_SIZE))
1484 status = 0;
1485 else {
1486 status = -EPERM;
1487 spin_unlock(&pt->pg_lock);
1488 goto EXIT_LOOP;
1489 }
1490 }
1491 rem_bytes -= pte_count * PG_SIZE4K;
1492 } else
1493 status = -EPERM;
1494
1495 spin_unlock(&pt->pg_lock);
1496 continue;
1497skip_coarse_page:
1498 /* va_curr aligned to pte_size? */
1499 /* pte_size = 1 MB or 16 MB */
1500 if (pte_size == 0 || rem_bytes < pte_size ||
1501 va_curr & (pte_size - 1)) {
1502 status = -EPERM;
1503 break;
1504 }
1505
1506 if (pte_size == HW_PAGE_SIZE1MB)
1507 numof4k_pages = 256;
1508 else
1509 numof4k_pages = 4096;
1510 temp = 0;
1511 /* Collect Physical addresses from VA */
1512 paddr = (pte_val & ~(pte_size - 1));
1513 while (temp++ < numof4k_pages) {
1514 if (pfn_valid(__phys_to_pfn(paddr))) {
1515 pg = PHYS_TO_PAGE(paddr);
1516 if (page_count(pg) < 1) {
1517 pr_info("DSPBRIDGE: UNMAP function: "
1518 "COUNT 0 FOR PA 0x%x, size = "
1519 "0x%x\n", paddr, ul_num_bytes);
1520 bad_page_dump(paddr, pg);
1521 } else {
1522 set_page_dirty(pg);
1523 page_cache_release(pg);
1524 }
1525 }
1526 paddr += HW_PAGE_SIZE4KB;
1527 }
1528 if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) {
1529 status = 0;
1530 rem_bytes -= pte_size;
1531 va_curr += pte_size;
1532 } else {
1533 status = -EPERM;
1534 goto EXIT_LOOP;
1535 }
1536 }
1537 /*
1538 * It is better to flush the TLB here, so that any stale old entries
1539 * get flushed
1540 */
1541EXIT_LOOP:
1542 flush_all(dev_context);
1543 dev_dbg(bridge,
1544 "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
1545 " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1,
1546 pte_addr_l2, rem_bytes, rem_bytes_l2, status);
1547 return status;
1548}
1549
1550/*
1551 * ======== user_va2_pa ========
1552 * Purpose:
1553 * This function walks through the page tables to convert a userland
1554 * virtual address to physical address
1555 */
1556static u32 user_va2_pa(struct mm_struct *mm, u32 address)
1557{
1558 pgd_t *pgd;
1559 pmd_t *pmd;
1560 pte_t *ptep, pte;
1561
1562 pgd = pgd_offset(mm, address);
1563 if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
1564 pmd = pmd_offset(pgd, address);
1565 if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
1566 ptep = pte_offset_map(pmd, address);
1567 if (ptep) {
1568 pte = *ptep;
1569 if (pte_present(pte))
1570 return pte & PAGE_MASK;
1571 }
1572 }
1573 }
1574
1575 return 0;
1576}
1577
1578/*
1579 * ======== pte_update ========
1580 * This function calculates the optimum page-aligned addresses and sizes
1581 * Caller must pass page-aligned values
1582 */
1583static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
1584 u32 va, u32 size,
1585 struct hw_mmu_map_attrs_t *map_attrs)
1586{
1587 u32 i;
1588 u32 all_bits;
1589 u32 pa_curr = pa;
1590 u32 va_curr = va;
1591 u32 num_bytes = size;
1592 struct bridge_dev_context *dev_context = dev_ctxt;
1593 int status = 0;
1594 u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
1595 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
1596 };
1597
1598 while (num_bytes && !status) {
1599 /* To find the max. page size with which both PA & VA are
1600 * aligned */
1601 all_bits = pa_curr | va_curr;
1602
1603 for (i = 0; i < 4; i++) {
1604 if ((num_bytes >= page_size[i]) && ((all_bits &
1605 (page_size[i] -
1606 1)) == 0)) {
1607 status =
1608 pte_set(dev_context->pt_attrs, pa_curr,
1609 va_curr, page_size[i], map_attrs);
1610 pa_curr += page_size[i];
1611 va_curr += page_size[i];
1612 num_bytes -= page_size[i];
1613 /* Don't try smaller sizes. Hopefully we have
1614 * reached an address aligned to a bigger page
1615 * size */
1616 break;
1617 }
1618 }
1619 }
1620
1621 return status;
1622}
1623
1624/*
1625 * ======== pte_set ========
1626 * This function calculates PTE address (MPU virtual) to be updated
1627 * It also manages the L2 page tables
1628 */
1629static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
1630 u32 size, struct hw_mmu_map_attrs_t *attrs)
1631{
1632 u32 i;
1633 u32 pte_val;
1634 u32 pte_addr_l1;
1635 u32 pte_size;
1636 /* Base address of the PT that will be updated */
1637 u32 pg_tbl_va;
1638 u32 l1_base_va;
1639 /* Compiler warns that the next three variables might be used
1640 * uninitialized in this function. Doesn't seem so. Working around,
1641 * anyways. */
1642 u32 l2_base_va = 0;
1643 u32 l2_base_pa = 0;
1644 u32 l2_page_num = 0;
1645 int status = 0;
1646
1647 l1_base_va = pt->l1_base_va;
1648 pg_tbl_va = l1_base_va;
1649 if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) {
1650 /* Find whether the L1 PTE points to a valid L2 PT */
1651 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
1652 if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
1653 pte_val = *(u32 *) pte_addr_l1;
1654 pte_size = hw_mmu_pte_size_l1(pte_val);
1655 } else {
1656 return -EPERM;
1657 }
1658 spin_lock(&pt->pg_lock);
1659 if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1660 /* Get the L2 PA from the L1 PTE, and find
1661 * corresponding L2 VA */
1662 l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1663 l2_base_va =
1664 l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1665 l2_page_num =
1666 (l2_base_pa -
1667 pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1668 } else if (pte_size == 0) {
1669 /* L1 PTE is invalid. Allocate a L2 PT and
1670 * point the L1 PTE to it */
1671 /* Find a free L2 PT. */
1672 for (i = 0; (i < pt->l2_num_pages) &&
1673 (pt->pg_info[i].num_entries != 0); i++)
1674 ;;
1675 if (i < pt->l2_num_pages) {
1676 l2_page_num = i;
1677 l2_base_pa = pt->l2_base_pa + (l2_page_num *
1678 HW_MMU_COARSE_PAGE_SIZE);
1679 l2_base_va = pt->l2_base_va + (l2_page_num *
1680 HW_MMU_COARSE_PAGE_SIZE);
1681 /* Endianness attributes are ignored for
1682 * HW_MMU_COARSE_PAGE_SIZE */
1683 status =
1684 hw_mmu_pte_set(l1_base_va, l2_base_pa, va,
1685 HW_MMU_COARSE_PAGE_SIZE,
1686 attrs);
1687 } else {
1688 status = -ENOMEM;
1689 }
1690 } else {
1691 /* Found valid L1 PTE of another size.
1692 * Should not overwrite it. */
1693 status = -EPERM;
1694 }
1695 if (!status) {
1696 pg_tbl_va = l2_base_va;
1697 if (size == HW_PAGE_SIZE64KB)
1698 pt->pg_info[l2_page_num].num_entries += 16;
1699 else
1700 pt->pg_info[l2_page_num].num_entries++;
1701 dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
1702 "%x, num_entries %x\n", l2_base_va,
1703 l2_base_pa, l2_page_num,
1704 pt->pg_info[l2_page_num].num_entries);
1705 }
1706 spin_unlock(&pt->pg_lock);
1707 }
1708 if (!status) {
1709 dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
1710 pg_tbl_va, pa, va, size);
1711 dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
1712 "mixed_size %x\n", attrs->endianism,
1713 attrs->element_size, attrs->mixed_size);
1714 status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
1715 }
1716
1717 return status;
1718}
1719
1720/* Memory map kernel VA -- memory allocated with vmalloc */
1721static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
1722 u32 ul_mpu_addr, u32 virt_addr,
1723 u32 ul_num_bytes,
1724 struct hw_mmu_map_attrs_t *hw_attrs)
1725{
1726 int status = 0;
1727 struct page *page[1];
1728 u32 i;
1729 u32 pa_curr;
1730 u32 pa_next;
1731 u32 va_curr;
1732 u32 size_curr;
1733 u32 num_pages;
1734 u32 pa;
1735 u32 num_of4k_pages;
1736 u32 temp = 0;
1737
1738 /*
1739 * Do Kernel va to pa translation.
1740 * Combine physically contiguous regions to reduce TLBs.
1741 * Pass the translated pa to pte_update.
1742 */
1743 num_pages = ul_num_bytes / PAGE_SIZE; /* PAGE_SIZE = OS page size */
1744 i = 0;
1745 va_curr = ul_mpu_addr;
1746 page[0] = vmalloc_to_page((void *)va_curr);
1747 pa_next = page_to_phys(page[0]);
1748 while (!status && (i < num_pages)) {
1749 /*
1750 * Reuse pa_next from the previous iteraion to avoid
1751 * an extra va2pa call
1752 */
1753 pa_curr = pa_next;
1754 size_curr = PAGE_SIZE;
1755 /*
1756 * If the next page is physically contiguous,
1757 * map it with the current one by increasing
1758 * the size of the region to be mapped
1759 */
1760 while (++i < num_pages) {
1761 page[0] =
1762 vmalloc_to_page((void *)(va_curr + size_curr));
1763 pa_next = page_to_phys(page[0]);
1764
1765 if (pa_next == (pa_curr + size_curr))
1766 size_curr += PAGE_SIZE;
1767 else
1768 break;
1769
1770 }
1771 if (pa_next == 0) {
1772 status = -ENOMEM;
1773 break;
1774 }
1775 pa = pa_curr;
1776 num_of4k_pages = size_curr / HW_PAGE_SIZE4KB;
1777 while (temp++ < num_of4k_pages) {
1778 get_page(PHYS_TO_PAGE(pa));
1779 pa += HW_PAGE_SIZE4KB;
1780 }
1781 status = pte_update(dev_context, pa_curr, virt_addr +
1782 (va_curr - ul_mpu_addr), size_curr,
1783 hw_attrs);
1784 va_curr += size_curr;
1785 }
1786 /*
1787 * In any case, flush the TLB
1788 * This is called from here instead from pte_update to avoid unnecessary
1789 * repetition while mapping non-contiguous physical regions of a virtual
1790 * region
1791 */
1792 flush_all(dev_context);
1793 dev_dbg(bridge, "%s status %x\n", __func__, status);
1794 return status;
1795}
1796
1797/*
931 * ======== wait_for_start ======== 1798 * ======== wait_for_start ========
932 * Wait for the singal from DSP that it has started, or time out. 1799 * Wait for the singal from DSP that it has started, or time out.
933 */ 1800 */
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
index b57a9fd5e75..fb9026e1403 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
@@ -31,6 +31,10 @@
31#include <dspbridge/dev.h> 31#include <dspbridge/dev.h>
32#include <dspbridge/iodefs.h> 32#include <dspbridge/iodefs.h>
33 33
34/* ------------------------------------ Hardware Abstraction Layer */
35#include <hw_defs.h>
36#include <hw_mmu.h>
37
34#include <dspbridge/pwr_sh.h> 38#include <dspbridge/pwr_sh.h>
35 39
36/* ----------------------------------- Bridge Driver */ 40/* ----------------------------------- Bridge Driver */
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c
index 66dbf02549e..ba2961049da 100644
--- a/drivers/staging/tidspbridge/core/tiomap_io.c
+++ b/drivers/staging/tidspbridge/core/tiomap_io.c
@@ -134,16 +134,17 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
134 134
135 if (!status) { 135 if (!status) {
136 ul_tlb_base_virt = 136 ul_tlb_base_virt =
137 dev_context->sh_s.seg0_da * DSPWORDSIZE; 137 dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
138 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); 138 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
139 dw_ext_prog_virt_mem = dev_context->sh_s.seg0_va; 139 dw_ext_prog_virt_mem =
140 dev_context->atlb_entry[0].ul_gpp_va;
140 141
141 if (!trace_read) { 142 if (!trace_read) {
142 ul_shm_offset_virt = 143 ul_shm_offset_virt =
143 ul_shm_base_virt - ul_tlb_base_virt; 144 ul_shm_base_virt - ul_tlb_base_virt;
144 ul_shm_offset_virt += 145 ul_shm_offset_virt +=
145 PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base + 146 PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base +
146 1, PAGE_SIZE * 16); 147 1, HW_PAGE_SIZE64KB);
147 dw_ext_prog_virt_mem -= ul_shm_offset_virt; 148 dw_ext_prog_virt_mem -= ul_shm_offset_virt;
148 dw_ext_prog_virt_mem += 149 dw_ext_prog_virt_mem +=
149 (ul_ext_base - ul_dyn_ext_base); 150 (ul_ext_base - ul_dyn_ext_base);
@@ -317,9 +318,8 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
317 ret = -EPERM; 318 ret = -EPERM;
318 319
319 if (!ret) { 320 if (!ret) {
320 ul_tlb_base_virt = dev_context->sh_s.seg0_da * 321 ul_tlb_base_virt =
321 DSPWORDSIZE; 322 dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
322
323 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); 323 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
324 324
325 if (symbols_reloaded) { 325 if (symbols_reloaded) {
@@ -337,7 +337,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
337 ul_shm_base_virt - ul_tlb_base_virt; 337 ul_shm_base_virt - ul_tlb_base_virt;
338 if (trace_load) { 338 if (trace_load) {
339 dw_ext_prog_virt_mem = 339 dw_ext_prog_virt_mem =
340 dev_context->sh_s.seg0_va; 340 dev_context->atlb_entry[0].ul_gpp_va;
341 } else { 341 } else {
342 dw_ext_prog_virt_mem = host_res->dw_mem_base[1]; 342 dw_ext_prog_virt_mem = host_res->dw_mem_base[1];
343 dw_ext_prog_virt_mem += 343 dw_ext_prog_virt_mem +=
@@ -393,6 +393,7 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val)
393 omap_dspbridge_dev->dev.platform_data; 393 omap_dspbridge_dev->dev.platform_data;
394 struct cfg_hostres *resources = dev_context->resources; 394 struct cfg_hostres *resources = dev_context->resources;
395 int status = 0; 395 int status = 0;
396 u32 temp;
396 397
397 if (!dev_context->mbox) 398 if (!dev_context->mbox)
398 return 0; 399 return 0;
@@ -436,7 +437,7 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val)
436 omap_mbox_restore_ctx(dev_context->mbox); 437 omap_mbox_restore_ctx(dev_context->mbox);
437 438
438 /* Access MMU SYS CONFIG register to generate a short wakeup */ 439 /* Access MMU SYS CONFIG register to generate a short wakeup */
439 iommu_read_reg(dev_context->dsp_mmu, MMU_SYSCONFIG); 440 temp = readl(resources->dw_dmmu_base + 0x10);
440 441
441 dev_context->dw_brd_state = BRD_RUNNING; 442 dev_context->dw_brd_state = BRD_RUNNING;
442 } else if (dev_context->dw_brd_state == BRD_RETENTION) { 443 } else if (dev_context->dw_brd_state == BRD_RETENTION) {
diff --git a/drivers/staging/tidspbridge/core/ue_deh.c b/drivers/staging/tidspbridge/core/ue_deh.c
index e24ea0c7391..3430418190d 100644
--- a/drivers/staging/tidspbridge/core/ue_deh.c
+++ b/drivers/staging/tidspbridge/core/ue_deh.c
@@ -31,6 +31,57 @@
31#include <dspbridge/drv.h> 31#include <dspbridge/drv.h>
32#include <dspbridge/wdt.h> 32#include <dspbridge/wdt.h>
33 33
34static u32 fault_addr;
35
36static void mmu_fault_dpc(unsigned long data)
37{
38 struct deh_mgr *deh = (void *)data;
39
40 if (!deh)
41 return;
42
43 bridge_deh_notify(deh, DSP_MMUFAULT, 0);
44}
45
46static irqreturn_t mmu_fault_isr(int irq, void *data)
47{
48 struct deh_mgr *deh = data;
49 struct cfg_hostres *resources;
50 u32 event;
51
52 if (!deh)
53 return IRQ_HANDLED;
54
55 resources = deh->hbridge_context->resources;
56 if (!resources) {
57 dev_dbg(bridge, "%s: Failed to get Host Resources\n",
58 __func__);
59 return IRQ_HANDLED;
60 }
61
62 hw_mmu_event_status(resources->dw_dmmu_base, &event);
63 if (event == HW_MMU_TRANSLATION_FAULT) {
64 hw_mmu_fault_addr_read(resources->dw_dmmu_base, &fault_addr);
65 dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__,
66 event, fault_addr);
67 /*
68 * Schedule a DPC directly. In the future, it may be
69 * necessary to check if DSP MMU fault is intended for
70 * Bridge.
71 */
72 tasklet_schedule(&deh->dpc_tasklet);
73
74 /* Disable the MMU events, else once we clear it will
75 * start to raise INTs again */
76 hw_mmu_event_disable(resources->dw_dmmu_base,
77 HW_MMU_TRANSLATION_FAULT);
78 } else {
79 hw_mmu_event_disable(resources->dw_dmmu_base,
80 HW_MMU_ALL_INTERRUPTS);
81 }
82 return IRQ_HANDLED;
83}
84
34int bridge_deh_create(struct deh_mgr **ret_deh, 85int bridge_deh_create(struct deh_mgr **ret_deh,
35 struct dev_object *hdev_obj) 86 struct dev_object *hdev_obj)
36{ 87{
@@ -58,9 +109,18 @@ int bridge_deh_create(struct deh_mgr **ret_deh,
58 } 109 }
59 ntfy_init(deh->ntfy_obj); 110 ntfy_init(deh->ntfy_obj);
60 111
112 /* Create a MMUfault DPC */
113 tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh);
114
61 /* Fill in context structure */ 115 /* Fill in context structure */
62 deh->hbridge_context = hbridge_context; 116 deh->hbridge_context = hbridge_context;
63 117
118 /* Install ISR function for DSP MMU fault */
119 status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0,
120 "DspBridge\tiommu fault", deh);
121 if (status < 0)
122 goto err;
123
64 *ret_deh = deh; 124 *ret_deh = deh;
65 return 0; 125 return 0;
66 126
@@ -80,6 +140,11 @@ int bridge_deh_destroy(struct deh_mgr *deh)
80 ntfy_delete(deh->ntfy_obj); 140 ntfy_delete(deh->ntfy_obj);
81 kfree(deh->ntfy_obj); 141 kfree(deh->ntfy_obj);
82 } 142 }
143 /* Disable DSP MMU fault */
144 free_irq(INT_DSP_MMU_IRQ, deh);
145
146 /* Free DPC object */
147 tasklet_kill(&deh->dpc_tasklet);
83 148
84 /* Deallocate the DEH manager object */ 149 /* Deallocate the DEH manager object */
85 kfree(deh); 150 kfree(deh);
@@ -101,6 +166,48 @@ int bridge_deh_register_notify(struct deh_mgr *deh, u32 event_mask,
101 return ntfy_unregister(deh->ntfy_obj, hnotification); 166 return ntfy_unregister(deh->ntfy_obj, hnotification);
102} 167}
103 168
169#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
170static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
171{
172 struct cfg_hostres *resources;
173 struct hw_mmu_map_attrs_t map_attrs = {
174 .endianism = HW_LITTLE_ENDIAN,
175 .element_size = HW_ELEM_SIZE16BIT,
176 .mixed_size = HW_MMU_CPUES,
177 };
178 void *dummy_va_addr;
179
180 resources = dev_context->resources;
181 dummy_va_addr = (void*)__get_free_page(GFP_ATOMIC);
182
183 /*
184 * Before acking the MMU fault, let's make sure MMU can only
185 * access entry #0. Then add a new entry so that the DSP OS
186 * can continue in order to dump the stack.
187 */
188 hw_mmu_twl_disable(resources->dw_dmmu_base);
189 hw_mmu_tlb_flush_all(resources->dw_dmmu_base);
190
191 hw_mmu_tlb_add(resources->dw_dmmu_base,
192 virt_to_phys(dummy_va_addr), fault_addr,
193 HW_PAGE_SIZE4KB, 1,
194 &map_attrs, HW_SET, HW_SET);
195
196 dsp_clk_enable(DSP_CLK_GPT8);
197
198 dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
199
200 /* Clear MMU interrupt */
201 hw_mmu_event_ack(resources->dw_dmmu_base,
202 HW_MMU_TRANSLATION_FAULT);
203 dump_dsp_stack(dev_context);
204 dsp_clk_disable(DSP_CLK_GPT8);
205
206 hw_mmu_disable(resources->dw_dmmu_base);
207 free_page((unsigned long)dummy_va_addr);
208}
209#endif
210
104static inline const char *event_to_string(int event) 211static inline const char *event_to_string(int event)
105{ 212{
106 switch (event) { 213 switch (event) {
@@ -133,7 +240,13 @@ void bridge_deh_notify(struct deh_mgr *deh, int event, int info)
133#endif 240#endif
134 break; 241 break;
135 case DSP_MMUFAULT: 242 case DSP_MMUFAULT:
136 dev_err(bridge, "%s: %s, addr=0x%x", __func__, str, info); 243 dev_err(bridge, "%s: %s, addr=0x%x", __func__,
244 str, fault_addr);
245#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
246 print_dsp_trace_buffer(dev_context);
247 dump_dl_modules(dev_context);
248 mmu_fault_print_stack(dev_context);
249#endif
137 break; 250 break;
138 default: 251 default:
139 dev_err(bridge, "%s: %s", __func__, str); 252 dev_err(bridge, "%s: %s", __func__, str);
diff --git a/drivers/staging/tidspbridge/hw/EasiGlobal.h b/drivers/staging/tidspbridge/hw/EasiGlobal.h
new file mode 100644
index 00000000000..e48d7f67c60
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/EasiGlobal.h
@@ -0,0 +1,41 @@
1/*
2 * EasiGlobal.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Copyright (C) 2007 Texas Instruments, Inc.
7 *
8 * This package is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#ifndef _EASIGLOBAL_H
18#define _EASIGLOBAL_H
19#include <linux/types.h>
20
21/*
22 * DEFINE: READ_ONLY, WRITE_ONLY & READ_WRITE
23 *
24 * DESCRIPTION: Defines used to describe register types for EASI-checker tests.
25 */
26
27#define READ_ONLY 1
28#define WRITE_ONLY 2
29#define READ_WRITE 3
30
31/*
32 * MACRO: _DEBUG_LEVEL1_EASI
33 *
34 * DESCRIPTION: A MACRO which can be used to indicate that a particular beach
35 * register access function was called.
36 *
37 * NOTE: We currently dont use this functionality.
38 */
39#define _DEBUG_LEVEL1_EASI(easi_num) ((void)0)
40
41#endif /* _EASIGLOBAL_H */
diff --git a/drivers/staging/tidspbridge/hw/MMUAccInt.h b/drivers/staging/tidspbridge/hw/MMUAccInt.h
new file mode 100644
index 00000000000..1cefca321d7
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/MMUAccInt.h
@@ -0,0 +1,76 @@
1/*
2 * MMUAccInt.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Copyright (C) 2007 Texas Instruments, Inc.
7 *
8 * This package is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#ifndef _MMU_ACC_INT_H
18#define _MMU_ACC_INT_H
19
20/* Mappings of level 1 EASI function numbers to function names */
21
22#define EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32 (MMU_BASE_EASIL1 + 3)
23#define EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32 (MMU_BASE_EASIL1 + 17)
24#define EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32 (MMU_BASE_EASIL1 + 39)
25#define EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 51)
26#define EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32 (MMU_BASE_EASIL1 + 102)
27#define EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 103)
28#define EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32 (MMU_BASE_EASIL1 + 156)
29#define EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32 (MMU_BASE_EASIL1 + 174)
30#define EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 180)
31#define EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 190)
32#define EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32 (MMU_BASE_EASIL1 + 194)
33#define EASIL1_MMUMMU_TTB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 198)
34#define EASIL1_MMUMMU_LOCK_READ_REGISTER32 (MMU_BASE_EASIL1 + 203)
35#define EASIL1_MMUMMU_LOCK_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 204)
36#define EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32 (MMU_BASE_EASIL1 + 205)
37#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32 (MMU_BASE_EASIL1 + 209)
38#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32 (MMU_BASE_EASIL1 + 211)
39#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32 (MMU_BASE_EASIL1 + 212)
40#define EASIL1_MMUMMU_LD_TLB_READ_REGISTER32 (MMU_BASE_EASIL1 + 213)
41#define EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 214)
42#define EASIL1_MMUMMU_CAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 226)
43#define EASIL1_MMUMMU_RAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 268)
44#define EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 322)
45
46/* Register offset address definitions */
47#define MMU_MMU_SYSCONFIG_OFFSET 0x10
48#define MMU_MMU_IRQSTATUS_OFFSET 0x18
49#define MMU_MMU_IRQENABLE_OFFSET 0x1c
50#define MMU_MMU_WALKING_ST_OFFSET 0x40
51#define MMU_MMU_CNTL_OFFSET 0x44
52#define MMU_MMU_FAULT_AD_OFFSET 0x48
53#define MMU_MMU_TTB_OFFSET 0x4c
54#define MMU_MMU_LOCK_OFFSET 0x50
55#define MMU_MMU_LD_TLB_OFFSET 0x54
56#define MMU_MMU_CAM_OFFSET 0x58
57#define MMU_MMU_RAM_OFFSET 0x5c
58#define MMU_MMU_GFLUSH_OFFSET 0x60
59#define MMU_MMU_FLUSH_ENTRY_OFFSET 0x64
60/* Bitfield mask and offset declarations */
61#define MMU_MMU_SYSCONFIG_IDLE_MODE_MASK 0x18
62#define MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET 3
63#define MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK 0x1
64#define MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET 0
65#define MMU_MMU_WALKING_ST_TWL_RUNNING_MASK 0x1
66#define MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET 0
67#define MMU_MMU_CNTL_TWL_ENABLE_MASK 0x4
68#define MMU_MMU_CNTL_TWL_ENABLE_OFFSET 2
69#define MMU_MMU_CNTL_MMU_ENABLE_MASK 0x2
70#define MMU_MMU_CNTL_MMU_ENABLE_OFFSET 1
71#define MMU_MMU_LOCK_BASE_VALUE_MASK 0xfc00
72#define MMU_MMU_LOCK_BASE_VALUE_OFFSET 10
73#define MMU_MMU_LOCK_CURRENT_VICTIM_MASK 0x3f0
74#define MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET 4
75
76#endif /* _MMU_ACC_INT_H */
diff --git a/drivers/staging/tidspbridge/hw/MMURegAcM.h b/drivers/staging/tidspbridge/hw/MMURegAcM.h
new file mode 100644
index 00000000000..ab1a16da731
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/MMURegAcM.h
@@ -0,0 +1,225 @@
1/*
2 * MMURegAcM.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Copyright (C) 2007 Texas Instruments, Inc.
7 *
8 * This package is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#ifndef _MMU_REG_ACM_H
18#define _MMU_REG_ACM_H
19
20#include <linux/io.h>
21#include <EasiGlobal.h>
22
23#include "MMUAccInt.h"
24
25#if defined(USE_LEVEL_1_MACROS)
26
27#define MMUMMU_SYSCONFIG_READ_REGISTER32(base_address)\
28 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32),\
29 __raw_readl((base_address)+MMU_MMU_SYSCONFIG_OFFSET))
30
31#define MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32(base_address, value)\
32{\
33 const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
34 register u32 data = __raw_readl((base_address)+offset);\
35 register u32 new_value = (value);\
36 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32);\
37 data &= ~(MMU_MMU_SYSCONFIG_IDLE_MODE_MASK);\
38 new_value <<= MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET;\
39 new_value &= MMU_MMU_SYSCONFIG_IDLE_MODE_MASK;\
40 new_value |= data;\
41 __raw_writel(new_value, base_address+offset);\
42}
43
44#define MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32(base_address, value)\
45{\
46 const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
47 register u32 data = __raw_readl((base_address)+offset);\
48 register u32 new_value = (value);\
49 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32);\
50 data &= ~(MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK);\
51 new_value <<= MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET;\
52 new_value &= MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK;\
53 new_value |= data;\
54 __raw_writel(new_value, base_address+offset);\
55}
56
57#define MMUMMU_IRQSTATUS_READ_REGISTER32(base_address)\
58 (_DEBUG_LEVEL1_EASI(easil1_mmummu_irqstatus_read_register32),\
59 __raw_readl((base_address)+MMU_MMU_IRQSTATUS_OFFSET))
60
61#define MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, value)\
62{\
63 const u32 offset = MMU_MMU_IRQSTATUS_OFFSET;\
64 register u32 new_value = (value);\
65 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32);\
66 __raw_writel(new_value, (base_address)+offset);\
67}
68
69#define MMUMMU_IRQENABLE_READ_REGISTER32(base_address)\
70 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32),\
71 __raw_readl((base_address)+MMU_MMU_IRQENABLE_OFFSET))
72
73#define MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, value)\
74{\
75 const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\
76 register u32 new_value = (value);\
77 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32);\
78 __raw_writel(new_value, (base_address)+offset);\
79}
80
81#define MMUMMU_WALKING_STTWL_RUNNING_READ32(base_address)\
82 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32),\
83 (((__raw_readl(((base_address)+(MMU_MMU_WALKING_ST_OFFSET))))\
84 & MMU_MMU_WALKING_ST_TWL_RUNNING_MASK) >>\
85 MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET))
86
87#define MMUMMU_CNTLTWL_ENABLE_READ32(base_address)\
88 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32),\
89 (((__raw_readl(((base_address)+(MMU_MMU_CNTL_OFFSET)))) &\
90 MMU_MMU_CNTL_TWL_ENABLE_MASK) >>\
91 MMU_MMU_CNTL_TWL_ENABLE_OFFSET))
92
93#define MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, value)\
94{\
95 const u32 offset = MMU_MMU_CNTL_OFFSET;\
96 register u32 data = __raw_readl((base_address)+offset);\
97 register u32 new_value = (value);\
98 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32);\
99 data &= ~(MMU_MMU_CNTL_TWL_ENABLE_MASK);\
100 new_value <<= MMU_MMU_CNTL_TWL_ENABLE_OFFSET;\
101 new_value &= MMU_MMU_CNTL_TWL_ENABLE_MASK;\
102 new_value |= data;\
103 __raw_writel(new_value, base_address+offset);\
104}
105
106#define MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, value)\
107{\
108 const u32 offset = MMU_MMU_CNTL_OFFSET;\
109 register u32 data = __raw_readl((base_address)+offset);\
110 register u32 new_value = (value);\
111 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32);\
112 data &= ~(MMU_MMU_CNTL_MMU_ENABLE_MASK);\
113 new_value <<= MMU_MMU_CNTL_MMU_ENABLE_OFFSET;\
114 new_value &= MMU_MMU_CNTL_MMU_ENABLE_MASK;\
115 new_value |= data;\
116 __raw_writel(new_value, base_address+offset);\
117}
118
119#define MMUMMU_FAULT_AD_READ_REGISTER32(base_address)\
120 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32),\
121 __raw_readl((base_address)+MMU_MMU_FAULT_AD_OFFSET))
122
123#define MMUMMU_TTB_WRITE_REGISTER32(base_address, value)\
124{\
125 const u32 offset = MMU_MMU_TTB_OFFSET;\
126 register u32 new_value = (value);\
127 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_TTB_WRITE_REGISTER32);\
128 __raw_writel(new_value, (base_address)+offset);\
129}
130
131#define MMUMMU_LOCK_READ_REGISTER32(base_address)\
132 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_READ_REGISTER32),\
133 __raw_readl((base_address)+MMU_MMU_LOCK_OFFSET))
134
135#define MMUMMU_LOCK_WRITE_REGISTER32(base_address, value)\
136{\
137 const u32 offset = MMU_MMU_LOCK_OFFSET;\
138 register u32 new_value = (value);\
139 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_WRITE_REGISTER32);\
140 __raw_writel(new_value, (base_address)+offset);\
141}
142
143#define MMUMMU_LOCK_BASE_VALUE_READ32(base_address)\
144 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32),\
145 (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
146 MMU_MMU_LOCK_BASE_VALUE_MASK) >>\
147 MMU_MMU_LOCK_BASE_VALUE_OFFSET))
148
149#define MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, value)\
150{\
151 const u32 offset = MMU_MMU_LOCK_OFFSET;\
152 register u32 data = __raw_readl((base_address)+offset);\
153 register u32 new_value = (value);\
154 _DEBUG_LEVEL1_EASI(easil1_mmummu_lock_base_value_write32);\
155 data &= ~(MMU_MMU_LOCK_BASE_VALUE_MASK);\
156 new_value <<= MMU_MMU_LOCK_BASE_VALUE_OFFSET;\
157 new_value &= MMU_MMU_LOCK_BASE_VALUE_MASK;\
158 new_value |= data;\
159 __raw_writel(new_value, base_address+offset);\
160}
161
162#define MMUMMU_LOCK_CURRENT_VICTIM_READ32(base_address)\
163 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32),\
164 (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
165 MMU_MMU_LOCK_CURRENT_VICTIM_MASK) >>\
166 MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET))
167
168#define MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, value)\
169{\
170 const u32 offset = MMU_MMU_LOCK_OFFSET;\
171 register u32 data = __raw_readl((base_address)+offset);\
172 register u32 new_value = (value);\
173 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32);\
174 data &= ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK);\
175 new_value <<= MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET;\
176 new_value &= MMU_MMU_LOCK_CURRENT_VICTIM_MASK;\
177 new_value |= data;\
178 __raw_writel(new_value, base_address+offset);\
179}
180
181#define MMUMMU_LOCK_CURRENT_VICTIM_SET32(var, value)\
182 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32),\
183 (((var) & ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK)) |\
184 (((value) << MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET) &\
185 MMU_MMU_LOCK_CURRENT_VICTIM_MASK)))
186
187#define MMUMMU_LD_TLB_READ_REGISTER32(base_address)\
188 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_READ_REGISTER32),\
189 __raw_readl((base_address)+MMU_MMU_LD_TLB_OFFSET))
190
191#define MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, value)\
192{\
193 const u32 offset = MMU_MMU_LD_TLB_OFFSET;\
194 register u32 new_value = (value);\
195 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32);\
196 __raw_writel(new_value, (base_address)+offset);\
197}
198
199#define MMUMMU_CAM_WRITE_REGISTER32(base_address, value)\
200{\
201 const u32 offset = MMU_MMU_CAM_OFFSET;\
202 register u32 new_value = (value);\
203 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CAM_WRITE_REGISTER32);\
204 __raw_writel(new_value, (base_address)+offset);\
205}
206
207#define MMUMMU_RAM_WRITE_REGISTER32(base_address, value)\
208{\
209 const u32 offset = MMU_MMU_RAM_OFFSET;\
210 register u32 new_value = (value);\
211 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_RAM_WRITE_REGISTER32);\
212 __raw_writel(new_value, (base_address)+offset);\
213}
214
215#define MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, value)\
216{\
217 const u32 offset = MMU_MMU_FLUSH_ENTRY_OFFSET;\
218 register u32 new_value = (value);\
219 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32);\
220 __raw_writel(new_value, (base_address)+offset);\
221}
222
223#endif /* USE_LEVEL_1_MACROS */
224
225#endif /* _MMU_REG_ACM_H */
diff --git a/drivers/staging/tidspbridge/hw/hw_defs.h b/drivers/staging/tidspbridge/hw/hw_defs.h
new file mode 100644
index 00000000000..d5266d4c163
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/hw_defs.h
@@ -0,0 +1,58 @@
1/*
2 * hw_defs.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Global HW definitions
7 *
8 * Copyright (C) 2007 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#ifndef _HW_DEFS_H
20#define _HW_DEFS_H
21
22/* Page size */
23#define HW_PAGE_SIZE4KB 0x1000
24#define HW_PAGE_SIZE64KB 0x10000
25#define HW_PAGE_SIZE1MB 0x100000
26#define HW_PAGE_SIZE16MB 0x1000000
27
28/* hw_status: return type for HW API */
29typedef long hw_status;
30
31/* Macro used to set and clear any bit */
32#define HW_CLEAR 0
33#define HW_SET 1
34
35/* hw_endianism_t: Enumerated Type used to specify the endianism
36 * Do NOT change these values. They are used as bit fields. */
37enum hw_endianism_t {
38 HW_LITTLE_ENDIAN,
39 HW_BIG_ENDIAN
40};
41
42/* hw_element_size_t: Enumerated Type used to specify the element size
43 * Do NOT change these values. They are used as bit fields. */
44enum hw_element_size_t {
45 HW_ELEM_SIZE8BIT,
46 HW_ELEM_SIZE16BIT,
47 HW_ELEM_SIZE32BIT,
48 HW_ELEM_SIZE64BIT
49};
50
51/* hw_idle_mode_t: Enumerated Type used to specify Idle modes */
52enum hw_idle_mode_t {
53 HW_FORCE_IDLE,
54 HW_NO_IDLE,
55 HW_SMART_IDLE
56};
57
58#endif /* _HW_DEFS_H */
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.c b/drivers/staging/tidspbridge/hw/hw_mmu.c
new file mode 100644
index 00000000000..014f5d5293a
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/hw_mmu.c
@@ -0,0 +1,562 @@
1/*
2 * hw_mmu.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * API definitions to setup MMU TLB and PTE
7 *
8 * Copyright (C) 2007 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#include <linux/io.h>
20#include "MMURegAcM.h"
21#include <hw_defs.h>
22#include <hw_mmu.h>
23#include <linux/types.h>
24#include <linux/err.h>
25
26#define MMU_BASE_VAL_MASK 0xFC00
27#define MMU_PAGE_MAX 3
28#define MMU_ELEMENTSIZE_MAX 3
29#define MMU_ADDR_MASK 0xFFFFF000
30#define MMU_TTB_MASK 0xFFFFC000
31#define MMU_SECTION_ADDR_MASK 0xFFF00000
32#define MMU_SSECTION_ADDR_MASK 0xFF000000
33#define MMU_PAGE_TABLE_MASK 0xFFFFFC00
34#define MMU_LARGE_PAGE_MASK 0xFFFF0000
35#define MMU_SMALL_PAGE_MASK 0xFFFFF000
36
37#define MMU_LOAD_TLB 0x00000001
38#define MMU_GFLUSH 0x60
39
40/*
41 * hw_mmu_page_size_t: Enumerated Type used to specify the MMU Page Size(SLSS)
42 */
43enum hw_mmu_page_size_t {
44 HW_MMU_SECTION,
45 HW_MMU_LARGE_PAGE,
46 HW_MMU_SMALL_PAGE,
47 HW_MMU_SUPERSECTION
48};
49
50/*
51 * FUNCTION : mmu_flush_entry
52 *
53 * INPUTS:
54 *
55 * Identifier : base_address
56 * Type : const u32
57 * Description : Base Address of instance of MMU module
58 *
59 * RETURNS:
60 *
61 * Type : hw_status
62 * Description : 0 -- No errors occured
63 * RET_BAD_NULL_PARAM -- A Pointer
64 * Paramater was set to NULL
65 *
66 * PURPOSE: : Flush the TLB entry pointed by the
67 * lock counter register
68 * even if this entry is set protected
69 *
70 * METHOD: : Check the Input parameter and Flush a
71 * single entry in the TLB.
72 */
73static hw_status mmu_flush_entry(const void __iomem *base_address);
74
75/*
76 * FUNCTION : mmu_set_cam_entry
77 *
78 * INPUTS:
79 *
80 * Identifier : base_address
81 * TypE : const u32
82 * Description : Base Address of instance of MMU module
83 *
84 * Identifier : page_sz
85 * TypE : const u32
86 * Description : It indicates the page size
87 *
88 * Identifier : preserved_bit
89 * Type : const u32
90 * Description : It indicates the TLB entry is preserved entry
91 * or not
92 *
93 * Identifier : valid_bit
94 * Type : const u32
95 * Description : It indicates the TLB entry is valid entry or not
96 *
97 *
98 * Identifier : virtual_addr_tag
99 * Type : const u32
100 * Description : virtual Address
101 *
102 * RETURNS:
103 *
104 * Type : hw_status
105 * Description : 0 -- No errors occured
106 * RET_BAD_NULL_PARAM -- A Pointer Paramater
107 * was set to NULL
108 * RET_PARAM_OUT_OF_RANGE -- Input Parameter out
109 * of Range
110 *
111 * PURPOSE: : Set MMU_CAM reg
112 *
113 * METHOD: : Check the Input parameters and set the CAM entry.
114 */
115static hw_status mmu_set_cam_entry(const void __iomem *base_address,
116 const u32 page_sz,
117 const u32 preserved_bit,
118 const u32 valid_bit,
119 const u32 virtual_addr_tag);
120
121/*
122 * FUNCTION : mmu_set_ram_entry
123 *
124 * INPUTS:
125 *
126 * Identifier : base_address
127 * Type : const u32
128 * Description : Base Address of instance of MMU module
129 *
130 * Identifier : physical_addr
131 * Type : const u32
132 * Description : Physical Address to which the corresponding
133 * virtual Address shouldpoint
134 *
135 * Identifier : endianism
136 * Type : hw_endianism_t
137 * Description : endianism for the given page
138 *
139 * Identifier : element_size
140 * Type : hw_element_size_t
141 * Description : The element size ( 8,16, 32 or 64 bit)
142 *
143 * Identifier : mixed_size
144 * Type : hw_mmu_mixed_size_t
145 * Description : Element Size to follow CPU or TLB
146 *
147 * RETURNS:
148 *
149 * Type : hw_status
150 * Description : 0 -- No errors occured
151 * RET_BAD_NULL_PARAM -- A Pointer Paramater
152 * was set to NULL
153 * RET_PARAM_OUT_OF_RANGE -- Input Parameter
154 * out of Range
155 *
156 * PURPOSE: : Set MMU_CAM reg
157 *
158 * METHOD: : Check the Input parameters and set the RAM entry.
159 */
160static hw_status mmu_set_ram_entry(const void __iomem *base_address,
161 const u32 physical_addr,
162 enum hw_endianism_t endianism,
163 enum hw_element_size_t element_size,
164 enum hw_mmu_mixed_size_t mixed_size);
165
166/* HW FUNCTIONS */
167
168hw_status hw_mmu_enable(const void __iomem *base_address)
169{
170 hw_status status = 0;
171
172 MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_SET);
173
174 return status;
175}
176
177hw_status hw_mmu_disable(const void __iomem *base_address)
178{
179 hw_status status = 0;
180
181 MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_CLEAR);
182
183 return status;
184}
185
186hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
187 u32 num_locked_entries)
188{
189 hw_status status = 0;
190
191 MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, num_locked_entries);
192
193 return status;
194}
195
196hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
197 u32 victim_entry_num)
198{
199 hw_status status = 0;
200
201 MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victim_entry_num);
202
203 return status;
204}
205
206hw_status hw_mmu_event_ack(const void __iomem *base_address, u32 irq_mask)
207{
208 hw_status status = 0;
209
210 MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, irq_mask);
211
212 return status;
213}
214
215hw_status hw_mmu_event_disable(const void __iomem *base_address, u32 irq_mask)
216{
217 hw_status status = 0;
218 u32 irq_reg;
219
220 irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
221
222 MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg & ~irq_mask);
223
224 return status;
225}
226
227hw_status hw_mmu_event_enable(const void __iomem *base_address, u32 irq_mask)
228{
229 hw_status status = 0;
230 u32 irq_reg;
231
232 irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
233
234 MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg | irq_mask);
235
236 return status;
237}
238
239hw_status hw_mmu_event_status(const void __iomem *base_address, u32 *irq_mask)
240{
241 hw_status status = 0;
242
243 *irq_mask = MMUMMU_IRQSTATUS_READ_REGISTER32(base_address);
244
245 return status;
246}
247
248hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, u32 *addr)
249{
250 hw_status status = 0;
251
252 /* read values from register */
253 *addr = MMUMMU_FAULT_AD_READ_REGISTER32(base_address);
254
255 return status;
256}
257
258hw_status hw_mmu_ttb_set(const void __iomem *base_address, u32 ttb_phys_addr)
259{
260 hw_status status = 0;
261 u32 load_ttb;
262
263 load_ttb = ttb_phys_addr & ~0x7FUL;
264 /* write values to register */
265 MMUMMU_TTB_WRITE_REGISTER32(base_address, load_ttb);
266
267 return status;
268}
269
270hw_status hw_mmu_twl_enable(const void __iomem *base_address)
271{
272 hw_status status = 0;
273
274 MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_SET);
275
276 return status;
277}
278
279hw_status hw_mmu_twl_disable(const void __iomem *base_address)
280{
281 hw_status status = 0;
282
283 MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_CLEAR);
284
285 return status;
286}
287
288hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtual_addr,
289 u32 page_sz)
290{
291 hw_status status = 0;
292 u32 virtual_addr_tag;
293 enum hw_mmu_page_size_t pg_size_bits;
294
295 switch (page_sz) {
296 case HW_PAGE_SIZE4KB:
297 pg_size_bits = HW_MMU_SMALL_PAGE;
298 break;
299
300 case HW_PAGE_SIZE64KB:
301 pg_size_bits = HW_MMU_LARGE_PAGE;
302 break;
303
304 case HW_PAGE_SIZE1MB:
305 pg_size_bits = HW_MMU_SECTION;
306 break;
307
308 case HW_PAGE_SIZE16MB:
309 pg_size_bits = HW_MMU_SUPERSECTION;
310 break;
311
312 default:
313 return -EINVAL;
314 }
315
316 /* Generate the 20-bit tag from virtual address */
317 virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
318
319 mmu_set_cam_entry(base_address, pg_size_bits, 0, 0, virtual_addr_tag);
320
321 mmu_flush_entry(base_address);
322
323 return status;
324}
325
326hw_status hw_mmu_tlb_add(const void __iomem *base_address,
327 u32 physical_addr,
328 u32 virtual_addr,
329 u32 page_sz,
330 u32 entry_num,
331 struct hw_mmu_map_attrs_t *map_attrs,
332 s8 preserved_bit, s8 valid_bit)
333{
334 hw_status status = 0;
335 u32 lock_reg;
336 u32 virtual_addr_tag;
337 enum hw_mmu_page_size_t mmu_pg_size;
338
339 /*Check the input Parameters */
340 switch (page_sz) {
341 case HW_PAGE_SIZE4KB:
342 mmu_pg_size = HW_MMU_SMALL_PAGE;
343 break;
344
345 case HW_PAGE_SIZE64KB:
346 mmu_pg_size = HW_MMU_LARGE_PAGE;
347 break;
348
349 case HW_PAGE_SIZE1MB:
350 mmu_pg_size = HW_MMU_SECTION;
351 break;
352
353 case HW_PAGE_SIZE16MB:
354 mmu_pg_size = HW_MMU_SUPERSECTION;
355 break;
356
357 default:
358 return -EINVAL;
359 }
360
361 lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address);
362
363 /* Generate the 20-bit tag from virtual address */
364 virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
365
366 /* Write the fields in the CAM Entry Register */
367 mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, valid_bit,
368 virtual_addr_tag);
369
370 /* Write the different fields of the RAM Entry Register */
371 /* endianism of the page,Element Size of the page (8, 16, 32, 64 bit) */
372 mmu_set_ram_entry(base_address, physical_addr, map_attrs->endianism,
373 map_attrs->element_size, map_attrs->mixed_size);
374
375 /* Update the MMU Lock Register */
376 /* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */
377 MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, entry_num);
378
379 /* Enable loading of an entry in TLB by writing 1
380 into LD_TLB_REG register */
381 MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, MMU_LOAD_TLB);
382
383 MMUMMU_LOCK_WRITE_REGISTER32(base_address, lock_reg);
384
385 return status;
386}
387
388hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
389 u32 physical_addr,
390 u32 virtual_addr,
391 u32 page_sz, struct hw_mmu_map_attrs_t *map_attrs)
392{
393 hw_status status = 0;
394 u32 pte_addr, pte_val;
395 s32 num_entries = 1;
396
397 switch (page_sz) {
398 case HW_PAGE_SIZE4KB:
399 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
400 virtual_addr &
401 MMU_SMALL_PAGE_MASK);
402 pte_val =
403 ((physical_addr & MMU_SMALL_PAGE_MASK) |
404 (map_attrs->endianism << 9) | (map_attrs->
405 element_size << 4) |
406 (map_attrs->mixed_size << 11) | 2);
407 break;
408
409 case HW_PAGE_SIZE64KB:
410 num_entries = 16;
411 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
412 virtual_addr &
413 MMU_LARGE_PAGE_MASK);
414 pte_val =
415 ((physical_addr & MMU_LARGE_PAGE_MASK) |
416 (map_attrs->endianism << 9) | (map_attrs->
417 element_size << 4) |
418 (map_attrs->mixed_size << 11) | 1);
419 break;
420
421 case HW_PAGE_SIZE1MB:
422 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
423 virtual_addr &
424 MMU_SECTION_ADDR_MASK);
425 pte_val =
426 ((((physical_addr & MMU_SECTION_ADDR_MASK) |
427 (map_attrs->endianism << 15) | (map_attrs->
428 element_size << 10) |
429 (map_attrs->mixed_size << 17)) & ~0x40000) | 0x2);
430 break;
431
432 case HW_PAGE_SIZE16MB:
433 num_entries = 16;
434 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
435 virtual_addr &
436 MMU_SSECTION_ADDR_MASK);
437 pte_val =
438 (((physical_addr & MMU_SSECTION_ADDR_MASK) |
439 (map_attrs->endianism << 15) | (map_attrs->
440 element_size << 10) |
441 (map_attrs->mixed_size << 17)
442 ) | 0x40000 | 0x2);
443 break;
444
445 case HW_MMU_COARSE_PAGE_SIZE:
446 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
447 virtual_addr &
448 MMU_SECTION_ADDR_MASK);
449 pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1;
450 break;
451
452 default:
453 return -EINVAL;
454 }
455
456 while (--num_entries >= 0)
457 ((u32 *) pte_addr)[num_entries] = pte_val;
458
459 return status;
460}
461
462hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtual_addr, u32 page_size)
463{
464 hw_status status = 0;
465 u32 pte_addr;
466 s32 num_entries = 1;
467
468 switch (page_size) {
469 case HW_PAGE_SIZE4KB:
470 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
471 virtual_addr &
472 MMU_SMALL_PAGE_MASK);
473 break;
474
475 case HW_PAGE_SIZE64KB:
476 num_entries = 16;
477 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
478 virtual_addr &
479 MMU_LARGE_PAGE_MASK);
480 break;
481
482 case HW_PAGE_SIZE1MB:
483 case HW_MMU_COARSE_PAGE_SIZE:
484 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
485 virtual_addr &
486 MMU_SECTION_ADDR_MASK);
487 break;
488
489 case HW_PAGE_SIZE16MB:
490 num_entries = 16;
491 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
492 virtual_addr &
493 MMU_SSECTION_ADDR_MASK);
494 break;
495
496 default:
497 return -EINVAL;
498 }
499
500 while (--num_entries >= 0)
501 ((u32 *) pte_addr)[num_entries] = 0;
502
503 return status;
504}
505
506/* mmu_flush_entry */
507static hw_status mmu_flush_entry(const void __iomem *base_address)
508{
509 hw_status status = 0;
510 u32 flush_entry_data = 0x1;
511
512 /* write values to register */
513 MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, flush_entry_data);
514
515 return status;
516}
517
518/* mmu_set_cam_entry */
519static hw_status mmu_set_cam_entry(const void __iomem *base_address,
520 const u32 page_sz,
521 const u32 preserved_bit,
522 const u32 valid_bit,
523 const u32 virtual_addr_tag)
524{
525 hw_status status = 0;
526 u32 mmu_cam_reg;
527
528 mmu_cam_reg = (virtual_addr_tag << 12);
529 mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (valid_bit << 2) |
530 (preserved_bit << 3);
531
532 /* write values to register */
533 MMUMMU_CAM_WRITE_REGISTER32(base_address, mmu_cam_reg);
534
535 return status;
536}
537
538/* mmu_set_ram_entry */
539static hw_status mmu_set_ram_entry(const void __iomem *base_address,
540 const u32 physical_addr,
541 enum hw_endianism_t endianism,
542 enum hw_element_size_t element_size,
543 enum hw_mmu_mixed_size_t mixed_size)
544{
545 hw_status status = 0;
546 u32 mmu_ram_reg;
547
548 mmu_ram_reg = (physical_addr & MMU_ADDR_MASK);
549 mmu_ram_reg = (mmu_ram_reg) | ((endianism << 9) | (element_size << 7) |
550 (mixed_size << 6));
551
552 /* write values to register */
553 MMUMMU_RAM_WRITE_REGISTER32(base_address, mmu_ram_reg);
554
555 return status;
556
557}
558
559void hw_mmu_tlb_flush_all(const void __iomem *base)
560{
561 __raw_writeb(1, base + MMU_GFLUSH);
562}
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.h b/drivers/staging/tidspbridge/hw/hw_mmu.h
new file mode 100644
index 00000000000..1458a2c6027
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/hw_mmu.h
@@ -0,0 +1,163 @@
1/*
2 * hw_mmu.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * MMU types and API declarations
7 *
8 * Copyright (C) 2007 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#ifndef _HW_MMU_H
20#define _HW_MMU_H
21
22#include <linux/types.h>
23
24/* Bitmasks for interrupt sources */
25#define HW_MMU_TRANSLATION_FAULT 0x2
26#define HW_MMU_ALL_INTERRUPTS 0x1F
27
28#define HW_MMU_COARSE_PAGE_SIZE 0x400
29
30/* hw_mmu_mixed_size_t: Enumerated Type used to specify whether to follow
31 CPU/TLB Element size */
32enum hw_mmu_mixed_size_t {
33 HW_MMU_TLBES,
34 HW_MMU_CPUES
35};
36
37/* hw_mmu_map_attrs_t: Struct containing MMU mapping attributes */
38struct hw_mmu_map_attrs_t {
39 enum hw_endianism_t endianism;
40 enum hw_element_size_t element_size;
41 enum hw_mmu_mixed_size_t mixed_size;
42 bool donotlockmpupage;
43};
44
45extern hw_status hw_mmu_enable(const void __iomem *base_address);
46
47extern hw_status hw_mmu_disable(const void __iomem *base_address);
48
49extern hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
50 u32 num_locked_entries);
51
52extern hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
53 u32 victim_entry_num);
54
55/* For MMU faults */
56extern hw_status hw_mmu_event_ack(const void __iomem *base_address,
57 u32 irq_mask);
58
59extern hw_status hw_mmu_event_disable(const void __iomem *base_address,
60 u32 irq_mask);
61
62extern hw_status hw_mmu_event_enable(const void __iomem *base_address,
63 u32 irq_mask);
64
65extern hw_status hw_mmu_event_status(const void __iomem *base_address,
66 u32 *irq_mask);
67
68extern hw_status hw_mmu_fault_addr_read(const void __iomem *base_address,
69 u32 *addr);
70
71/* Set the TT base address */
72extern hw_status hw_mmu_ttb_set(const void __iomem *base_address,
73 u32 ttb_phys_addr);
74
75extern hw_status hw_mmu_twl_enable(const void __iomem *base_address);
76
77extern hw_status hw_mmu_twl_disable(const void __iomem *base_address);
78
79extern hw_status hw_mmu_tlb_flush(const void __iomem *base_address,
80 u32 virtual_addr, u32 page_sz);
81
82extern hw_status hw_mmu_tlb_add(const void __iomem *base_address,
83 u32 physical_addr,
84 u32 virtual_addr,
85 u32 page_sz,
86 u32 entry_num,
87 struct hw_mmu_map_attrs_t *map_attrs,
88 s8 preserved_bit, s8 valid_bit);
89
90/* For PTEs */
91extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
92 u32 physical_addr,
93 u32 virtual_addr,
94 u32 page_sz,
95 struct hw_mmu_map_attrs_t *map_attrs);
96
97extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
98 u32 virtual_addr, u32 page_size);
99
100void hw_mmu_tlb_flush_all(const void __iomem *base);
101
102static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va)
103{
104 u32 pte_addr;
105 u32 va31_to20;
106
107 va31_to20 = va >> (20 - 2); /* Left-shift by 2 here itself */
108 va31_to20 &= 0xFFFFFFFCUL;
109 pte_addr = l1_base + va31_to20;
110
111 return pte_addr;
112}
113
114static inline u32 hw_mmu_pte_addr_l2(u32 l2_base, u32 va)
115{
116 u32 pte_addr;
117
118 pte_addr = (l2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC);
119
120 return pte_addr;
121}
122
123static inline u32 hw_mmu_pte_coarse_l1(u32 pte_val)
124{
125 u32 pte_coarse;
126
127 pte_coarse = pte_val & 0xFFFFFC00;
128
129 return pte_coarse;
130}
131
132static inline u32 hw_mmu_pte_size_l1(u32 pte_val)
133{
134 u32 pte_size = 0;
135
136 if ((pte_val & 0x3) == 0x1) {
137 /* Points to L2 PT */
138 pte_size = HW_MMU_COARSE_PAGE_SIZE;
139 }
140
141 if ((pte_val & 0x3) == 0x2) {
142 if (pte_val & (1 << 18))
143 pte_size = HW_PAGE_SIZE16MB;
144 else
145 pte_size = HW_PAGE_SIZE1MB;
146 }
147
148 return pte_size;
149}
150
151static inline u32 hw_mmu_pte_size_l2(u32 pte_val)
152{
153 u32 pte_size = 0;
154
155 if (pte_val & 0x2)
156 pte_size = HW_PAGE_SIZE4KB;
157 else if (pte_val & 0x1)
158 pte_size = HW_PAGE_SIZE64KB;
159
160 return pte_size;
161}
162
163#endif /* _HW_MMU_H */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
index dfb55cca34c..38122dbf877 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
@@ -68,6 +68,7 @@ struct cfg_hostres {
68 void __iomem *dw_per_base; 68 void __iomem *dw_per_base;
69 u32 dw_per_pm_base; 69 u32 dw_per_pm_base;
70 u32 dw_core_pm_base; 70 u32 dw_core_pm_base;
71 void __iomem *dw_dmmu_base;
71 void __iomem *dw_sys_ctrl_base; 72 void __iomem *dw_sys_ctrl_base;
72}; 73};
73 74
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dev.h b/drivers/staging/tidspbridge/include/dspbridge/dev.h
index 9bdd48f5742..357458fadd2 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dev.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dev.h
@@ -27,6 +27,7 @@
27#include <dspbridge/nodedefs.h> 27#include <dspbridge/nodedefs.h>
28#include <dspbridge/dispdefs.h> 28#include <dspbridge/dispdefs.h>
29#include <dspbridge/dspdefs.h> 29#include <dspbridge/dspdefs.h>
30#include <dspbridge/dmm.h>
30#include <dspbridge/host_os.h> 31#include <dspbridge/host_os.h>
31 32
32/* ----------------------------------- This */ 33/* ----------------------------------- This */
@@ -233,6 +234,29 @@ extern int dev_get_cmm_mgr(struct dev_object *hdev_obj,
233 struct cmm_object **mgr); 234 struct cmm_object **mgr);
234 235
235/* 236/*
237 * ======== dev_get_dmm_mgr ========
238 * Purpose:
239 * Retrieve the handle to the dynamic memory manager created for this
240 * device.
241 * Parameters:
242 * hdev_obj: Handle to device object created with
243 * dev_create_device().
244 * *mgr: Ptr to location to store handle.
245 * Returns:
246 * 0: Success.
247 * -EFAULT: Invalid hdev_obj.
248 * Requires:
249 * mgr != NULL.
250 * DEV Initialized.
251 * Ensures:
252 * 0: *mgr contains a handle to a channel manager object,
253 * or NULL.
254 * else: *mgr is NULL.
255 */
256extern int dev_get_dmm_mgr(struct dev_object *hdev_obj,
257 struct dmm_object **mgr);
258
259/*
236 * ======== dev_get_cod_mgr ======== 260 * ======== dev_get_cod_mgr ========
237 * Purpose: 261 * Purpose:
238 * Retrieve the COD manager create for this device. 262 * Retrieve the COD manager create for this device.
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dmm.h b/drivers/staging/tidspbridge/include/dspbridge/dmm.h
new file mode 100644
index 00000000000..6c58335c5f6
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dmm.h
@@ -0,0 +1,75 @@
1/*
2 * dmm.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * The Dynamic Memory Mapping(DMM) module manages the DSP Virtual address
7 * space that can be directly mapped to any MPU buffer or memory region.
8 *
9 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 *
11 * This package is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
17 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
18 */
19
20#ifndef DMM_
21#define DMM_
22
23#include <dspbridge/dbdefs.h>
24
25struct dmm_object;
26
27/* DMM attributes used in dmm_create() */
28struct dmm_mgrattrs {
29 u32 reserved;
30};
31
32#define DMMPOOLSIZE 0x4000000
33
34/*
35 * ======== dmm_get_handle ========
36 * Purpose:
37 * Return the dynamic memory manager object for this device.
38 * This is typically called from the client process.
39 */
40
41extern int dmm_get_handle(void *hprocessor,
42 struct dmm_object **dmm_manager);
43
44extern int dmm_reserve_memory(struct dmm_object *dmm_mgr,
45 u32 size, u32 *prsv_addr);
46
47extern int dmm_un_reserve_memory(struct dmm_object *dmm_mgr,
48 u32 rsv_addr);
49
50extern int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr,
51 u32 size);
52
53extern int dmm_un_map_memory(struct dmm_object *dmm_mgr,
54 u32 addr, u32 *psize);
55
56extern int dmm_destroy(struct dmm_object *dmm_mgr);
57
58extern int dmm_delete_tables(struct dmm_object *dmm_mgr);
59
60extern int dmm_create(struct dmm_object **dmm_manager,
61 struct dev_object *hdev_obj,
62 const struct dmm_mgrattrs *mgr_attrts);
63
64extern bool dmm_init(void);
65
66extern void dmm_exit(void);
67
68extern int dmm_create_tables(struct dmm_object *dmm_mgr,
69 u32 addr, u32 size);
70
71#ifdef DSP_DMM_DEBUG
72u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr);
73#endif
74
75#endif /* DMM_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/drv.h b/drivers/staging/tidspbridge/include/dspbridge/drv.h
index 75a2c9b5c6f..c1f363ec9af 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/drv.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/drv.h
@@ -108,6 +108,12 @@ struct dmm_map_object {
108 struct bridge_dma_map_info dma_info; 108 struct bridge_dma_map_info dma_info;
109}; 109};
110 110
111/* Used for DMM reserved memory accounting */
112struct dmm_rsv_object {
113 struct list_head link;
114 u32 dsp_reserved_addr;
115};
116
111/* New structure (member of process context) abstracts DMM resource info */ 117/* New structure (member of process context) abstracts DMM resource info */
112struct dspheap_res_object { 118struct dspheap_res_object {
113 s32 heap_allocated; /* DMM status */ 119 s32 heap_allocated; /* DMM status */
@@ -159,6 +165,10 @@ struct process_context {
159 struct list_head dmm_map_list; 165 struct list_head dmm_map_list;
160 spinlock_t dmm_map_lock; 166 spinlock_t dmm_map_lock;
161 167
168 /* DMM reserved memory resources */
169 struct list_head dmm_rsv_list;
170 spinlock_t dmm_rsv_lock;
171
162 /* DSP Heap resources */ 172 /* DSP Heap resources */
163 struct dspheap_res_object *pdspheap_list; 173 struct dspheap_res_object *pdspheap_list;
164 174
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h b/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h
deleted file mode 100644
index cb38d4cc073..00000000000
--- a/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * dsp-mmu.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * DSP iommu.
7 *
8 * Copyright (C) 2005-2010 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#ifndef _DSP_MMU_
20#define _DSP_MMU_
21
22#include <plat/iommu.h>
23#include <plat/iovmm.h>
24
25/**
26 * dsp_mmu_init() - initialize dsp_mmu module and returns a handle
27 *
28 * This function initialize dsp mmu module and returns a struct iommu
29 * handle to use it for dsp maps.
30 *
31 */
32struct iommu *dsp_mmu_init(void);
33
34/**
35 * dsp_mmu_exit() - destroy dsp mmu module
36 * @mmu: Pointer to iommu handle.
37 *
38 * This function destroys dsp mmu module.
39 *
40 */
41void dsp_mmu_exit(struct iommu *mmu);
42
43/**
44 * user_to_dsp_map() - maps user to dsp virtual address
45 * @mmu: Pointer to iommu handle.
46 * @uva: Virtual user space address.
47 * @da DSP address
48 * @size Buffer size to map.
49 * @usr_pgs struct page array pointer where the user pages will be stored
50 *
51 * This function maps a user space buffer into DSP virtual address.
52 *
53 */
54u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
55 struct page **usr_pgs);
56
57/**
58 * user_to_dsp_unmap() - unmaps DSP virtual buffer.
59 * @mmu: Pointer to iommu handle.
60 * @da DSP address
61 *
62 * This function unmaps a user space buffer into DSP virtual address.
63 *
64 */
65int user_to_dsp_unmap(struct iommu *mmu, u32 da);
66
67#endif
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
index 61536347481..0ae7d1646a1 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
@@ -162,6 +162,48 @@ typedef int(*fxn_brd_memwrite) (struct bridge_dev_context
162 u32 mem_type); 162 u32 mem_type);
163 163
164/* 164/*
165 * ======== bridge_brd_mem_map ========
166 * Purpose:
167 * Map a MPU memory region to a DSP/IVA memory space
168 * Parameters:
169 * dev_ctxt: Handle to Bridge driver defined device info.
170 * ul_mpu_addr: MPU memory region start address.
171 * virt_addr: DSP/IVA memory region u8 address.
172 * ul_num_bytes: Number of bytes to map.
173 * map_attrs: Mapping attributes (e.g. endianness).
174 * Returns:
175 * 0: Success.
176 * -EPERM: Other, unspecified error.
177 * Requires:
178 * dev_ctxt != NULL;
179 * Ensures:
180 */
181typedef int(*fxn_brd_memmap) (struct bridge_dev_context
182 * dev_ctxt, u32 ul_mpu_addr,
183 u32 virt_addr, u32 ul_num_bytes,
184 u32 map_attr,
185 struct page **mapped_pages);
186
187/*
188 * ======== bridge_brd_mem_un_map ========
189 * Purpose:
190 * UnMap an MPU memory region from DSP/IVA memory space
191 * Parameters:
192 * dev_ctxt: Handle to Bridge driver defined device info.
193 * virt_addr: DSP/IVA memory region u8 address.
194 * ul_num_bytes: Number of bytes to unmap.
195 * Returns:
196 * 0: Success.
197 * -EPERM: Other, unspecified error.
198 * Requires:
199 * dev_ctxt != NULL;
200 * Ensures:
201 */
202typedef int(*fxn_brd_memunmap) (struct bridge_dev_context
203 * dev_ctxt,
204 u32 virt_addr, u32 ul_num_bytes);
205
206/*
165 * ======== bridge_brd_stop ======== 207 * ======== bridge_brd_stop ========
166 * Purpose: 208 * Purpose:
167 * Bring board to the BRD_STOPPED state. 209 * Bring board to the BRD_STOPPED state.
@@ -951,6 +993,8 @@ struct bridge_drv_interface {
951 fxn_brd_setstate pfn_brd_set_state; /* Sets the Board State */ 993 fxn_brd_setstate pfn_brd_set_state; /* Sets the Board State */
952 fxn_brd_memcopy pfn_brd_mem_copy; /* Copies DSP Memory */ 994 fxn_brd_memcopy pfn_brd_mem_copy; /* Copies DSP Memory */
953 fxn_brd_memwrite pfn_brd_mem_write; /* Write DSP Memory w/o halt */ 995 fxn_brd_memwrite pfn_brd_mem_write; /* Write DSP Memory w/o halt */
996 fxn_brd_memmap pfn_brd_mem_map; /* Maps MPU mem to DSP mem */
997 fxn_brd_memunmap pfn_brd_mem_un_map; /* Unmaps MPU mem to DSP mem */
954 fxn_chnl_create pfn_chnl_create; /* Create channel manager. */ 998 fxn_chnl_create pfn_chnl_create; /* Create channel manager. */
955 fxn_chnl_destroy pfn_chnl_destroy; /* Destroy channel manager. */ 999 fxn_chnl_destroy pfn_chnl_destroy; /* Destroy channel manager. */
956 fxn_chnl_open pfn_chnl_open; /* Create a new channel. */ 1000 fxn_chnl_open pfn_chnl_open; /* Create a new channel. */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
index bad180108ad..41e0594dff3 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
@@ -19,6 +19,10 @@
19#ifndef DSPIOCTL_ 19#ifndef DSPIOCTL_
20#define DSPIOCTL_ 20#define DSPIOCTL_
21 21
22/* ------------------------------------ Hardware Abstraction Layer */
23#include <hw_defs.h>
24#include <hw_mmu.h>
25
22/* 26/*
23 * Any IOCTLS at or above this value are reserved for standard Bridge driver 27 * Any IOCTLS at or above this value are reserved for standard Bridge driver
24 * interfaces. 28 * interfaces.
@@ -61,6 +65,9 @@ struct bridge_ioctl_extproc {
61 /* GPP virtual address. __va does not work for ioremapped addresses */ 65 /* GPP virtual address. __va does not work for ioremapped addresses */
62 u32 ul_gpp_va; 66 u32 ul_gpp_va;
63 u32 ul_size; /* Size of the mapped memory in bytes */ 67 u32 ul_size; /* Size of the mapped memory in bytes */
68 enum hw_endianism_t endianism;
69 enum hw_mmu_mixed_size_t mixed_mode;
70 enum hw_element_size_t elem_size;
64}; 71};
65 72
66#endif /* DSPIOCTL_ */ 73#endif /* DSPIOCTL_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/proc.h b/drivers/staging/tidspbridge/include/dspbridge/proc.h
index 2d12aab6b5b..5e09fd165d9 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/proc.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/proc.h
@@ -551,6 +551,29 @@ extern int proc_map(void *hprocessor,
551 struct process_context *pr_ctxt); 551 struct process_context *pr_ctxt);
552 552
553/* 553/*
554 * ======== proc_reserve_memory ========
555 * Purpose:
556 * Reserve a virtually contiguous region of DSP address space.
557 * Parameters:
558 * hprocessor : The processor handle.
559 * ul_size : Size of the address space to reserve.
560 * pp_rsv_addr : Ptr to DSP side reserved u8 address.
561 * Returns:
562 * 0 : Success.
563 * -EFAULT : Invalid processor handle.
564 * -EPERM : General failure.
565 * -ENOMEM : Cannot reserve chunk of this size.
566 * Requires:
567 * pp_rsv_addr is not NULL
568 * PROC Initialized.
569 * Ensures:
570 * Details:
571 */
572extern int proc_reserve_memory(void *hprocessor,
573 u32 ul_size, void **pp_rsv_addr,
574 struct process_context *pr_ctxt);
575
576/*
554 * ======== proc_un_map ======== 577 * ======== proc_un_map ========
555 * Purpose: 578 * Purpose:
556 * Removes a MPU buffer mapping from the DSP address space. 579 * Removes a MPU buffer mapping from the DSP address space.
@@ -572,4 +595,27 @@ extern int proc_map(void *hprocessor,
572extern int proc_un_map(void *hprocessor, void *map_addr, 595extern int proc_un_map(void *hprocessor, void *map_addr,
573 struct process_context *pr_ctxt); 596 struct process_context *pr_ctxt);
574 597
598/*
599 * ======== proc_un_reserve_memory ========
600 * Purpose:
601 * Frees a previously reserved region of DSP address space.
602 * Parameters:
603 * hprocessor : The processor handle.
604 * prsv_addr : Ptr to DSP side reservedBYTE address.
605 * Returns:
606 * 0 : Success.
607 * -EFAULT : Invalid processor handle.
608 * -EPERM : General failure.
609 * -ENOENT : Cannot find a reserved region starting with this
610 * : address.
611 * Requires:
612 * prsv_addr is not NULL
613 * PROC Initialized.
614 * Ensures:
615 * Details:
616 */
617extern int proc_un_reserve_memory(void *hprocessor,
618 void *prsv_addr,
619 struct process_context *pr_ctxt);
620
575#endif /* PROC_ */ 621#endif /* PROC_ */
diff --git a/drivers/staging/tidspbridge/pmgr/dev.c b/drivers/staging/tidspbridge/pmgr/dev.c
index 7b30267ef0e..132e960967b 100644
--- a/drivers/staging/tidspbridge/pmgr/dev.c
+++ b/drivers/staging/tidspbridge/pmgr/dev.c
@@ -34,6 +34,7 @@
34#include <dspbridge/cod.h> 34#include <dspbridge/cod.h>
35#include <dspbridge/drv.h> 35#include <dspbridge/drv.h>
36#include <dspbridge/proc.h> 36#include <dspbridge/proc.h>
37#include <dspbridge/dmm.h>
37 38
38/* ----------------------------------- Resource Manager */ 39/* ----------------------------------- Resource Manager */
39#include <dspbridge/mgr.h> 40#include <dspbridge/mgr.h>
@@ -74,6 +75,7 @@ struct dev_object {
74 struct msg_mgr *hmsg_mgr; /* Message manager. */ 75 struct msg_mgr *hmsg_mgr; /* Message manager. */
75 struct io_mgr *hio_mgr; /* IO manager (CHNL, msg_ctrl) */ 76 struct io_mgr *hio_mgr; /* IO manager (CHNL, msg_ctrl) */
76 struct cmm_object *hcmm_mgr; /* SM memory manager. */ 77 struct cmm_object *hcmm_mgr; /* SM memory manager. */
78 struct dmm_object *dmm_mgr; /* Dynamic memory manager. */
77 struct ldr_module *module_obj; /* Bridge Module handle. */ 79 struct ldr_module *module_obj; /* Bridge Module handle. */
78 u32 word_size; /* DSP word size: quick access. */ 80 u32 word_size; /* DSP word size: quick access. */
79 struct drv_object *hdrv_obj; /* Driver Object */ 81 struct drv_object *hdrv_obj; /* Driver Object */
@@ -248,6 +250,9 @@ int dev_create_device(struct dev_object **device_obj,
248 /* Instantiate the DEH module */ 250 /* Instantiate the DEH module */
249 status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj); 251 status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj);
250 } 252 }
253 /* Create DMM mgr . */
254 status = dmm_create(&dev_obj->dmm_mgr,
255 (struct dev_object *)dev_obj, NULL);
251 } 256 }
252 /* Add the new DEV_Object to the global list: */ 257 /* Add the new DEV_Object to the global list: */
253 if (!status) { 258 if (!status) {
@@ -273,6 +278,8 @@ leave:
273 kfree(dev_obj->proc_list); 278 kfree(dev_obj->proc_list);
274 if (dev_obj->cod_mgr) 279 if (dev_obj->cod_mgr)
275 cod_delete(dev_obj->cod_mgr); 280 cod_delete(dev_obj->cod_mgr);
281 if (dev_obj->dmm_mgr)
282 dmm_destroy(dev_obj->dmm_mgr);
276 kfree(dev_obj); 283 kfree(dev_obj);
277 } 284 }
278 285
@@ -382,6 +389,11 @@ int dev_destroy_device(struct dev_object *hdev_obj)
382 dev_obj->hcmm_mgr = NULL; 389 dev_obj->hcmm_mgr = NULL;
383 } 390 }
384 391
392 if (dev_obj->dmm_mgr) {
393 dmm_destroy(dev_obj->dmm_mgr);
394 dev_obj->dmm_mgr = NULL;
395 }
396
385 /* Call the driver's bridge_dev_destroy() function: */ 397 /* Call the driver's bridge_dev_destroy() function: */
386 /* Require of DevDestroy */ 398 /* Require of DevDestroy */
387 if (dev_obj->hbridge_context) { 399 if (dev_obj->hbridge_context) {
@@ -462,6 +474,32 @@ int dev_get_cmm_mgr(struct dev_object *hdev_obj,
462} 474}
463 475
464/* 476/*
477 * ======== dev_get_dmm_mgr ========
478 * Purpose:
479 * Retrieve the handle to the dynamic memory manager created for this
480 * device.
481 */
482int dev_get_dmm_mgr(struct dev_object *hdev_obj,
483 struct dmm_object **mgr)
484{
485 int status = 0;
486 struct dev_object *dev_obj = hdev_obj;
487
488 DBC_REQUIRE(refs > 0);
489 DBC_REQUIRE(mgr != NULL);
490
491 if (hdev_obj) {
492 *mgr = dev_obj->dmm_mgr;
493 } else {
494 *mgr = NULL;
495 status = -EFAULT;
496 }
497
498 DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
499 return status;
500}
501
502/*
465 * ======== dev_get_cod_mgr ======== 503 * ======== dev_get_cod_mgr ========
466 * Purpose: 504 * Purpose:
467 * Retrieve the COD manager create for this device. 505 * Retrieve the COD manager create for this device.
@@ -713,8 +751,10 @@ void dev_exit(void)
713 751
714 refs--; 752 refs--;
715 753
716 if (refs == 0) 754 if (refs == 0) {
717 cmm_exit(); 755 cmm_exit();
756 dmm_exit();
757 }
718 758
719 DBC_ENSURE(refs >= 0); 759 DBC_ENSURE(refs >= 0);
720} 760}
@@ -726,12 +766,25 @@ void dev_exit(void)
726 */ 766 */
727bool dev_init(void) 767bool dev_init(void)
728{ 768{
729 bool ret = true; 769 bool cmm_ret, dmm_ret, ret = true;
730 770
731 DBC_REQUIRE(refs >= 0); 771 DBC_REQUIRE(refs >= 0);
732 772
733 if (refs == 0) 773 if (refs == 0) {
734 ret = cmm_init(); 774 cmm_ret = cmm_init();
775 dmm_ret = dmm_init();
776
777 ret = cmm_ret && dmm_ret;
778
779 if (!ret) {
780 if (cmm_ret)
781 cmm_exit();
782
783 if (dmm_ret)
784 dmm_exit();
785
786 }
787 }
735 788
736 if (ret) 789 if (ret)
737 refs++; 790 refs++;
@@ -1065,6 +1118,8 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
1065 STORE_FXN(fxn_brd_setstate, pfn_brd_set_state); 1118 STORE_FXN(fxn_brd_setstate, pfn_brd_set_state);
1066 STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy); 1119 STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy);
1067 STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write); 1120 STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write);
1121 STORE_FXN(fxn_brd_memmap, pfn_brd_mem_map);
1122 STORE_FXN(fxn_brd_memunmap, pfn_brd_mem_un_map);
1068 STORE_FXN(fxn_chnl_create, pfn_chnl_create); 1123 STORE_FXN(fxn_chnl_create, pfn_chnl_create);
1069 STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy); 1124 STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy);
1070 STORE_FXN(fxn_chnl_open, pfn_chnl_open); 1125 STORE_FXN(fxn_chnl_open, pfn_chnl_open);
diff --git a/drivers/staging/tidspbridge/pmgr/dmm.c b/drivers/staging/tidspbridge/pmgr/dmm.c
new file mode 100644
index 00000000000..8685233d762
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/dmm.c
@@ -0,0 +1,533 @@
1/*
2 * dmm.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * The Dynamic Memory Manager (DMM) module manages the DSP Virtual address
7 * space that can be directly mapped to any MPU buffer or memory region
8 *
9 * Notes:
10 * Region: Generic memory entitiy having a start address and a size
11 * Chunk: Reserved region
12 *
13 * Copyright (C) 2005-2006 Texas Instruments, Inc.
14 *
15 * This package is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License version 2 as
17 * published by the Free Software Foundation.
18 *
19 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
21 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
22 */
23#include <linux/types.h>
24
25/* ----------------------------------- Host OS */
26#include <dspbridge/host_os.h>
27
28/* ----------------------------------- DSP/BIOS Bridge */
29#include <dspbridge/dbdefs.h>
30
31/* ----------------------------------- Trace & Debug */
32#include <dspbridge/dbc.h>
33
34/* ----------------------------------- OS Adaptation Layer */
35#include <dspbridge/sync.h>
36
37/* ----------------------------------- Platform Manager */
38#include <dspbridge/dev.h>
39#include <dspbridge/proc.h>
40
41/* ----------------------------------- This */
42#include <dspbridge/dmm.h>
43
44/* ----------------------------------- Defines, Data Structures, Typedefs */
45#define DMM_ADDR_VIRTUAL(a) \
46 (((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\
47 dyn_mem_map_beg)
48#define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K)
49
50/* DMM Mgr */
51struct dmm_object {
52 /* Dmm Lock is used to serialize access mem manager for
53 * multi-threads. */
54 spinlock_t dmm_lock; /* Lock to access dmm mgr */
55};
56
57/* ----------------------------------- Globals */
58static u32 refs; /* module reference count */
59struct map_page {
60 u32 region_size:15;
61 u32 mapped_size:15;
62 u32 reserved:1;
63 u32 mapped:1;
64};
65
66/* Create the free list */
67static struct map_page *virtual_mapping_table;
68static u32 free_region; /* The index of free region */
69static u32 free_size;
70static u32 dyn_mem_map_beg; /* The Beginning of dynamic memory mapping */
71static u32 table_size; /* The size of virt and phys pages tables */
72
73/* ----------------------------------- Function Prototypes */
74static struct map_page *get_region(u32 addr);
75static struct map_page *get_free_region(u32 len);
76static struct map_page *get_mapped_region(u32 addrs);
77
78/* ======== dmm_create_tables ========
79 * Purpose:
80 * Create table to hold the information of physical address
81 * the buffer pages that is passed by the user, and the table
82 * to hold the information of the virtual memory that is reserved
83 * for DSP.
84 */
85int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size)
86{
87 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
88 int status = 0;
89
90 status = dmm_delete_tables(dmm_obj);
91 if (!status) {
92 dyn_mem_map_beg = addr;
93 table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K;
94 /* Create the free list */
95 virtual_mapping_table = __vmalloc(table_size *
96 sizeof(struct map_page), GFP_KERNEL |
97 __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
98 if (virtual_mapping_table == NULL)
99 status = -ENOMEM;
100 else {
101 /* On successful allocation,
102 * all entries are zero ('free') */
103 free_region = 0;
104 free_size = table_size * PG_SIZE4K;
105 virtual_mapping_table[0].region_size = table_size;
106 }
107 }
108
109 if (status)
110 pr_err("%s: failure, status 0x%x\n", __func__, status);
111
112 return status;
113}
114
115/*
116 * ======== dmm_create ========
117 * Purpose:
118 * Create a dynamic memory manager object.
119 */
120int dmm_create(struct dmm_object **dmm_manager,
121 struct dev_object *hdev_obj,
122 const struct dmm_mgrattrs *mgr_attrts)
123{
124 struct dmm_object *dmm_obj = NULL;
125 int status = 0;
126 DBC_REQUIRE(refs > 0);
127 DBC_REQUIRE(dmm_manager != NULL);
128
129 *dmm_manager = NULL;
130 /* create, zero, and tag a cmm mgr object */
131 dmm_obj = kzalloc(sizeof(struct dmm_object), GFP_KERNEL);
132 if (dmm_obj != NULL) {
133 spin_lock_init(&dmm_obj->dmm_lock);
134 *dmm_manager = dmm_obj;
135 } else {
136 status = -ENOMEM;
137 }
138
139 return status;
140}
141
142/*
143 * ======== dmm_destroy ========
144 * Purpose:
145 * Release the communication memory manager resources.
146 */
147int dmm_destroy(struct dmm_object *dmm_mgr)
148{
149 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
150 int status = 0;
151
152 DBC_REQUIRE(refs > 0);
153 if (dmm_mgr) {
154 status = dmm_delete_tables(dmm_obj);
155 if (!status)
156 kfree(dmm_obj);
157 } else
158 status = -EFAULT;
159
160 return status;
161}
162
163/*
164 * ======== dmm_delete_tables ========
165 * Purpose:
166 * Delete DMM Tables.
167 */
168int dmm_delete_tables(struct dmm_object *dmm_mgr)
169{
170 int status = 0;
171
172 DBC_REQUIRE(refs > 0);
173 /* Delete all DMM tables */
174 if (dmm_mgr)
175 vfree(virtual_mapping_table);
176 else
177 status = -EFAULT;
178 return status;
179}
180
181/*
182 * ======== dmm_exit ========
183 * Purpose:
184 * Discontinue usage of module; free resources when reference count
185 * reaches 0.
186 */
187void dmm_exit(void)
188{
189 DBC_REQUIRE(refs > 0);
190
191 refs--;
192}
193
194/*
195 * ======== dmm_get_handle ========
196 * Purpose:
197 * Return the dynamic memory manager object for this device.
198 * This is typically called from the client process.
199 */
200int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
201{
202 int status = 0;
203 struct dev_object *hdev_obj;
204
205 DBC_REQUIRE(refs > 0);
206 DBC_REQUIRE(dmm_manager != NULL);
207 if (hprocessor != NULL)
208 status = proc_get_dev_object(hprocessor, &hdev_obj);
209 else
210 hdev_obj = dev_get_first(); /* default */
211
212 if (!status)
213 status = dev_get_dmm_mgr(hdev_obj, dmm_manager);
214
215 return status;
216}
217
218/*
219 * ======== dmm_init ========
220 * Purpose:
221 * Initializes private state of DMM module.
222 */
223bool dmm_init(void)
224{
225 bool ret = true;
226
227 DBC_REQUIRE(refs >= 0);
228
229 if (ret)
230 refs++;
231
232 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
233
234 virtual_mapping_table = NULL;
235 table_size = 0;
236
237 return ret;
238}
239
240/*
241 * ======== dmm_map_memory ========
242 * Purpose:
243 * Add a mapping block to the reserved chunk. DMM assumes that this block
244 * will be mapped in the DSP/IVA's address space. DMM returns an error if a
245 * mapping overlaps another one. This function stores the info that will be
246 * required later while unmapping the block.
247 */
248int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size)
249{
250 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
251 struct map_page *chunk;
252 int status = 0;
253
254 spin_lock(&dmm_obj->dmm_lock);
255 /* Find the Reserved memory chunk containing the DSP block to
256 * be mapped */
257 chunk = (struct map_page *)get_region(addr);
258 if (chunk != NULL) {
259 /* Mark the region 'mapped', leave the 'reserved' info as-is */
260 chunk->mapped = true;
261 chunk->mapped_size = (size / PG_SIZE4K);
262 } else
263 status = -ENOENT;
264 spin_unlock(&dmm_obj->dmm_lock);
265
266 dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, "
267 "chunk %p", __func__, dmm_mgr, addr, size, status, chunk);
268
269 return status;
270}
271
272/*
273 * ======== dmm_reserve_memory ========
274 * Purpose:
275 * Reserve a chunk of virtually contiguous DSP/IVA address space.
276 */
277int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size,
278 u32 *prsv_addr)
279{
280 int status = 0;
281 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
282 struct map_page *node;
283 u32 rsv_addr = 0;
284 u32 rsv_size = 0;
285
286 spin_lock(&dmm_obj->dmm_lock);
287
288 /* Try to get a DSP chunk from the free list */
289 node = get_free_region(size);
290 if (node != NULL) {
291 /* DSP chunk of given size is available. */
292 rsv_addr = DMM_ADDR_VIRTUAL(node);
293 /* Calculate the number entries to use */
294 rsv_size = size / PG_SIZE4K;
295 if (rsv_size < node->region_size) {
296 /* Mark remainder of free region */
297 node[rsv_size].mapped = false;
298 node[rsv_size].reserved = false;
299 node[rsv_size].region_size =
300 node->region_size - rsv_size;
301 node[rsv_size].mapped_size = 0;
302 }
303 /* get_region will return first fit chunk. But we only use what
304 is requested. */
305 node->mapped = false;
306 node->reserved = true;
307 node->region_size = rsv_size;
308 node->mapped_size = 0;
309 /* Return the chunk's starting address */
310 *prsv_addr = rsv_addr;
311 } else
312 /*dSP chunk of given size is not available */
313 status = -ENOMEM;
314
315 spin_unlock(&dmm_obj->dmm_lock);
316
317 dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, "
318 "rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size,
319 prsv_addr, status, rsv_addr, rsv_size);
320
321 return status;
322}
323
324/*
325 * ======== dmm_un_map_memory ========
326 * Purpose:
327 * Remove the mapped block from the reserved chunk.
328 */
329int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize)
330{
331 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
332 struct map_page *chunk;
333 int status = 0;
334
335 spin_lock(&dmm_obj->dmm_lock);
336 chunk = get_mapped_region(addr);
337 if (chunk == NULL)
338 status = -ENOENT;
339
340 if (!status) {
341 /* Unmap the region */
342 *psize = chunk->mapped_size * PG_SIZE4K;
343 chunk->mapped = false;
344 chunk->mapped_size = 0;
345 }
346 spin_unlock(&dmm_obj->dmm_lock);
347
348 dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, "
349 "chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk);
350
351 return status;
352}
353
354/*
355 * ======== dmm_un_reserve_memory ========
356 * Purpose:
357 * Free a chunk of reserved DSP/IVA address space.
358 */
359int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, u32 rsv_addr)
360{
361 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
362 struct map_page *chunk;
363 u32 i;
364 int status = 0;
365 u32 chunk_size;
366
367 spin_lock(&dmm_obj->dmm_lock);
368
369 /* Find the chunk containing the reserved address */
370 chunk = get_mapped_region(rsv_addr);
371 if (chunk == NULL)
372 status = -ENOENT;
373
374 if (!status) {
375 /* Free all the mapped pages for this reserved region */
376 i = 0;
377 while (i < chunk->region_size) {
378 if (chunk[i].mapped) {
379 /* Remove mapping from the page tables. */
380 chunk_size = chunk[i].mapped_size;
381 /* Clear the mapping flags */
382 chunk[i].mapped = false;
383 chunk[i].mapped_size = 0;
384 i += chunk_size;
385 } else
386 i++;
387 }
388 /* Clear the flags (mark the region 'free') */
389 chunk->reserved = false;
390 /* NOTE: We do NOT coalesce free regions here.
391 * Free regions are coalesced in get_region(), as it traverses
392 *the whole mapping table
393 */
394 }
395 spin_unlock(&dmm_obj->dmm_lock);
396
397 dev_dbg(bridge, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p",
398 __func__, dmm_mgr, rsv_addr, status, chunk);
399
400 return status;
401}
402
403/*
404 * ======== get_region ========
405 * Purpose:
406 * Returns a region containing the specified memory region
407 */
408static struct map_page *get_region(u32 addr)
409{
410 struct map_page *curr_region = NULL;
411 u32 i = 0;
412
413 if (virtual_mapping_table != NULL) {
414 /* find page mapped by this address */
415 i = DMM_ADDR_TO_INDEX(addr);
416 if (i < table_size)
417 curr_region = virtual_mapping_table + i;
418 }
419
420 dev_dbg(bridge, "%s: curr_region %p, free_region %d, free_size %d\n",
421 __func__, curr_region, free_region, free_size);
422 return curr_region;
423}
424
425/*
426 * ======== get_free_region ========
427 * Purpose:
428 * Returns the requested free region
429 */
430static struct map_page *get_free_region(u32 len)
431{
432 struct map_page *curr_region = NULL;
433 u32 i = 0;
434 u32 region_size = 0;
435 u32 next_i = 0;
436
437 if (virtual_mapping_table == NULL)
438 return curr_region;
439 if (len > free_size) {
440 /* Find the largest free region
441 * (coalesce during the traversal) */
442 while (i < table_size) {
443 region_size = virtual_mapping_table[i].region_size;
444 next_i = i + region_size;
445 if (virtual_mapping_table[i].reserved == false) {
446 /* Coalesce, if possible */
447 if (next_i < table_size &&
448 virtual_mapping_table[next_i].reserved
449 == false) {
450 virtual_mapping_table[i].region_size +=
451 virtual_mapping_table
452 [next_i].region_size;
453 continue;
454 }
455 region_size *= PG_SIZE4K;
456 if (region_size > free_size) {
457 free_region = i;
458 free_size = region_size;
459 }
460 }
461 i = next_i;
462 }
463 }
464 if (len <= free_size) {
465 curr_region = virtual_mapping_table + free_region;
466 free_region += (len / PG_SIZE4K);
467 free_size -= len;
468 }
469 return curr_region;
470}
471
472/*
473 * ======== get_mapped_region ========
474 * Purpose:
475 * Returns the requestedmapped region
476 */
477static struct map_page *get_mapped_region(u32 addrs)
478{
479 u32 i = 0;
480 struct map_page *curr_region = NULL;
481
482 if (virtual_mapping_table == NULL)
483 return curr_region;
484
485 i = DMM_ADDR_TO_INDEX(addrs);
486 if (i < table_size && (virtual_mapping_table[i].mapped ||
487 virtual_mapping_table[i].reserved))
488 curr_region = virtual_mapping_table + i;
489 return curr_region;
490}
491
492#ifdef DSP_DMM_DEBUG
493u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr)
494{
495 struct map_page *curr_node = NULL;
496 u32 i;
497 u32 freemem = 0;
498 u32 bigsize = 0;
499
500 spin_lock(&dmm_mgr->dmm_lock);
501
502 if (virtual_mapping_table != NULL) {
503 for (i = 0; i < table_size; i +=
504 virtual_mapping_table[i].region_size) {
505 curr_node = virtual_mapping_table + i;
506 if (curr_node->reserved) {
507 /*printk("RESERVED size = 0x%x, "
508 "Map size = 0x%x\n",
509 (curr_node->region_size * PG_SIZE4K),
510 (curr_node->mapped == false) ? 0 :
511 (curr_node->mapped_size * PG_SIZE4K));
512 */
513 } else {
514/* printk("UNRESERVED size = 0x%x\n",
515 (curr_node->region_size * PG_SIZE4K));
516 */
517 freemem += (curr_node->region_size * PG_SIZE4K);
518 if (curr_node->region_size > bigsize)
519 bigsize = curr_node->region_size;
520 }
521 }
522 }
523 spin_unlock(&dmm_mgr->dmm_lock);
524 printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n",
525 freemem / (1024 * 1024));
526 printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n",
527 (((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024));
528 printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n",
529 (bigsize * PG_SIZE4K / (1024 * 1024)));
530
531 return 0;
532}
533#endif
diff --git a/drivers/staging/tidspbridge/pmgr/dspapi.c b/drivers/staging/tidspbridge/pmgr/dspapi.c
index 981551ce4d7..86ca785f191 100644
--- a/drivers/staging/tidspbridge/pmgr/dspapi.c
+++ b/drivers/staging/tidspbridge/pmgr/dspapi.c
@@ -993,10 +993,27 @@ u32 procwrap_register_notify(union trapped_args *args, void *pr_ctxt)
993/* 993/*
994 * ======== procwrap_reserve_memory ======== 994 * ======== procwrap_reserve_memory ========
995 */ 995 */
996u32 __deprecated procwrap_reserve_memory(union trapped_args *args, 996u32 procwrap_reserve_memory(union trapped_args *args, void *pr_ctxt)
997 void *pr_ctxt)
998{ 997{
999 return 0; 998 int status;
999 void *prsv_addr;
1000 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
1001
1002 if ((args->args_proc_rsvmem.ul_size <= 0) ||
1003 (args->args_proc_rsvmem.ul_size & (PG_SIZE4K - 1)) != 0)
1004 return -EINVAL;
1005
1006 status = proc_reserve_memory(hprocessor,
1007 args->args_proc_rsvmem.ul_size, &prsv_addr,
1008 pr_ctxt);
1009 if (!status) {
1010 if (put_user(prsv_addr, args->args_proc_rsvmem.pp_rsv_addr)) {
1011 status = -EINVAL;
1012 proc_un_reserve_memory(args->args_proc_rsvmem.
1013 hprocessor, prsv_addr, pr_ctxt);
1014 }
1015 }
1016 return status;
1000} 1017}
1001 1018
1002/* 1019/*
@@ -1025,10 +1042,15 @@ u32 procwrap_un_map(union trapped_args *args, void *pr_ctxt)
1025/* 1042/*
1026 * ======== procwrap_un_reserve_memory ======== 1043 * ======== procwrap_un_reserve_memory ========
1027 */ 1044 */
1028u32 __deprecated procwrap_un_reserve_memory(union trapped_args *args, 1045u32 procwrap_un_reserve_memory(union trapped_args *args, void *pr_ctxt)
1029 void *pr_ctxt)
1030{ 1046{
1031 return 0; 1047 int status;
1048 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
1049
1050 status = proc_un_reserve_memory(hprocessor,
1051 args->args_proc_unrsvmem.prsv_addr,
1052 pr_ctxt);
1053 return status;
1032} 1054}
1033 1055
1034/* 1056/*
diff --git a/drivers/staging/tidspbridge/rmgr/drv.c b/drivers/staging/tidspbridge/rmgr/drv.c
index 91cc168516e..81b1b901355 100644
--- a/drivers/staging/tidspbridge/rmgr/drv.c
+++ b/drivers/staging/tidspbridge/rmgr/drv.c
@@ -146,6 +146,7 @@ int drv_remove_all_dmm_res_elements(void *process_ctxt)
146 struct process_context *ctxt = (struct process_context *)process_ctxt; 146 struct process_context *ctxt = (struct process_context *)process_ctxt;
147 int status = 0; 147 int status = 0;
148 struct dmm_map_object *temp_map, *map_obj; 148 struct dmm_map_object *temp_map, *map_obj;
149 struct dmm_rsv_object *temp_rsv, *rsv_obj;
149 150
150 /* Free DMM mapped memory resources */ 151 /* Free DMM mapped memory resources */
151 list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) { 152 list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) {
@@ -155,6 +156,16 @@ int drv_remove_all_dmm_res_elements(void *process_ctxt)
155 pr_err("%s: proc_un_map failed!" 156 pr_err("%s: proc_un_map failed!"
156 " status = 0x%xn", __func__, status); 157 " status = 0x%xn", __func__, status);
157 } 158 }
159
160 /* Free DMM reserved memory resources */
161 list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) {
162 status = proc_un_reserve_memory(ctxt->hprocessor, (void *)
163 rsv_obj->dsp_reserved_addr,
164 ctxt);
165 if (status)
166 pr_err("%s: proc_un_reserve_memory failed!"
167 " status = 0x%xn", __func__, status);
168 }
158 return status; 169 return status;
159} 170}
160 171
@@ -732,6 +743,7 @@ static int request_bridge_resources(struct cfg_hostres *res)
732 host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE); 743 host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE);
733 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]); 744 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]);
734 dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]); 745 dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]);
746 dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
735 747
736 /* for 24xx base port is not mapping the mamory for DSP 748 /* for 24xx base port is not mapping the mamory for DSP
737 * internal memory TODO Do a ioremap here */ 749 * internal memory TODO Do a ioremap here */
@@ -785,6 +797,8 @@ int drv_request_bridge_res_dsp(void **phost_resources)
785 OMAP_PER_PRM_SIZE); 797 OMAP_PER_PRM_SIZE);
786 host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE, 798 host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
787 OMAP_CORE_PRM_SIZE); 799 OMAP_CORE_PRM_SIZE);
800 host_res->dw_dmmu_base = ioremap(OMAP_DMMU_BASE,
801 OMAP_DMMU_SIZE);
788 802
789 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", 803 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n",
790 host_res->dw_mem_base[0]); 804 host_res->dw_mem_base[0]);
@@ -796,6 +810,7 @@ int drv_request_bridge_res_dsp(void **phost_resources)
796 host_res->dw_mem_base[3]); 810 host_res->dw_mem_base[3]);
797 dev_dbg(bridge, "dw_mem_base[4] 0x%x\n", 811 dev_dbg(bridge, "dw_mem_base[4] 0x%x\n",
798 host_res->dw_mem_base[4]); 812 host_res->dw_mem_base[4]);
813 dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
799 814
800 shm_size = drv_datap->shm_size; 815 shm_size = drv_datap->shm_size;
801 if (shm_size >= 0x10000) { 816 if (shm_size >= 0x10000) {
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index 34be43fec04..324fcdffb3b 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -509,6 +509,8 @@ static int bridge_open(struct inode *ip, struct file *filp)
509 pr_ctxt->res_state = PROC_RES_ALLOCATED; 509 pr_ctxt->res_state = PROC_RES_ALLOCATED;
510 spin_lock_init(&pr_ctxt->dmm_map_lock); 510 spin_lock_init(&pr_ctxt->dmm_map_lock);
511 INIT_LIST_HEAD(&pr_ctxt->dmm_map_list); 511 INIT_LIST_HEAD(&pr_ctxt->dmm_map_list);
512 spin_lock_init(&pr_ctxt->dmm_rsv_lock);
513 INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list);
512 514
513 pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL); 515 pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
514 if (pr_ctxt->node_id) { 516 if (pr_ctxt->node_id) {
diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c
index a660247f527..1562f3c1281 100644
--- a/drivers/staging/tidspbridge/rmgr/node.c
+++ b/drivers/staging/tidspbridge/rmgr/node.c
@@ -56,6 +56,7 @@
56/* ----------------------------------- This */ 56/* ----------------------------------- This */
57#include <dspbridge/nodepriv.h> 57#include <dspbridge/nodepriv.h>
58#include <dspbridge/node.h> 58#include <dspbridge/node.h>
59#include <dspbridge/dmm.h>
59 60
60/* Static/Dynamic Loader includes */ 61/* Static/Dynamic Loader includes */
61#include <dspbridge/dbll.h> 62#include <dspbridge/dbll.h>
@@ -316,6 +317,10 @@ int node_allocate(struct proc_object *hprocessor,
316 u32 mapped_addr = 0; 317 u32 mapped_addr = 0;
317 u32 map_attrs = 0x0; 318 u32 map_attrs = 0x0;
318 struct dsp_processorstate proc_state; 319 struct dsp_processorstate proc_state;
320#ifdef DSP_DMM_DEBUG
321 struct dmm_object *dmm_mgr;
322 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
323#endif
319 324
320 void *node_res; 325 void *node_res;
321 326
@@ -425,12 +430,34 @@ int node_allocate(struct proc_object *hprocessor,
425 if (status) 430 if (status)
426 goto func_cont; 431 goto func_cont;
427 432
433 status = proc_reserve_memory(hprocessor,
434 pnode->create_args.asa.task_arg_obj.
435 heap_size + PAGE_SIZE,
436 (void **)&(pnode->create_args.asa.
437 task_arg_obj.udsp_heap_res_addr),
438 pr_ctxt);
439 if (status) {
440 pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
441 __func__, status);
442 goto func_cont;
443 }
444#ifdef DSP_DMM_DEBUG
445 status = dmm_get_handle(p_proc_object, &dmm_mgr);
446 if (!dmm_mgr) {
447 status = DSP_EHANDLE;
448 goto func_cont;
449 }
450
451 dmm_mem_map_dump(dmm_mgr);
452#endif
453
428 map_attrs |= DSP_MAPLITTLEENDIAN; 454 map_attrs |= DSP_MAPLITTLEENDIAN;
429 map_attrs |= DSP_MAPELEMSIZE32; 455 map_attrs |= DSP_MAPELEMSIZE32;
430 map_attrs |= DSP_MAPVIRTUALADDR; 456 map_attrs |= DSP_MAPVIRTUALADDR;
431 status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr, 457 status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
432 pnode->create_args.asa.task_arg_obj.heap_size, 458 pnode->create_args.asa.task_arg_obj.heap_size,
433 NULL, (void **)&mapped_addr, map_attrs, 459 (void *)pnode->create_args.asa.task_arg_obj.
460 udsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
434 pr_ctxt); 461 pr_ctxt);
435 if (status) 462 if (status)
436 pr_err("%s: Failed to map memory for Heap: 0x%x\n", 463 pr_err("%s: Failed to map memory for Heap: 0x%x\n",
@@ -2484,7 +2511,11 @@ static void delete_node(struct node_object *hnode,
2484 struct stream_chnl stream; 2511 struct stream_chnl stream;
2485 struct node_msgargs node_msg_args; 2512 struct node_msgargs node_msg_args;
2486 struct node_taskargs task_arg_obj; 2513 struct node_taskargs task_arg_obj;
2487 2514#ifdef DSP_DMM_DEBUG
2515 struct dmm_object *dmm_mgr;
2516 struct proc_object *p_proc_object =
2517 (struct proc_object *)hnode->hprocessor;
2518#endif
2488 int status; 2519 int status;
2489 if (!hnode) 2520 if (!hnode)
2490 goto func_end; 2521 goto func_end;
@@ -2545,6 +2576,19 @@ static void delete_node(struct node_object *hnode,
2545 status = proc_un_map(hnode->hprocessor, (void *) 2576 status = proc_un_map(hnode->hprocessor, (void *)
2546 task_arg_obj.udsp_heap_addr, 2577 task_arg_obj.udsp_heap_addr,
2547 pr_ctxt); 2578 pr_ctxt);
2579
2580 status = proc_un_reserve_memory(hnode->hprocessor,
2581 (void *)
2582 task_arg_obj.
2583 udsp_heap_res_addr,
2584 pr_ctxt);
2585#ifdef DSP_DMM_DEBUG
2586 status = dmm_get_handle(p_proc_object, &dmm_mgr);
2587 if (dmm_mgr)
2588 dmm_mem_map_dump(dmm_mgr);
2589 else
2590 status = DSP_EHANDLE;
2591#endif
2548 } 2592 }
2549 } 2593 }
2550 if (node_type != NODE_MESSAGE) { 2594 if (node_type != NODE_MESSAGE) {
diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c
index 7a15a02efed..b47d7aa747b 100644
--- a/drivers/staging/tidspbridge/rmgr/proc.c
+++ b/drivers/staging/tidspbridge/rmgr/proc.c
@@ -39,6 +39,7 @@
39#include <dspbridge/cod.h> 39#include <dspbridge/cod.h>
40#include <dspbridge/dev.h> 40#include <dspbridge/dev.h>
41#include <dspbridge/procpriv.h> 41#include <dspbridge/procpriv.h>
42#include <dspbridge/dmm.h>
42 43
43/* ----------------------------------- Resource Manager */ 44/* ----------------------------------- Resource Manager */
44#include <dspbridge/mgr.h> 45#include <dspbridge/mgr.h>
@@ -51,7 +52,6 @@
51#include <dspbridge/msg.h> 52#include <dspbridge/msg.h>
52#include <dspbridge/dspioctl.h> 53#include <dspbridge/dspioctl.h>
53#include <dspbridge/drv.h> 54#include <dspbridge/drv.h>
54#include <_tiomap.h>
55 55
56/* ----------------------------------- This */ 56/* ----------------------------------- This */
57#include <dspbridge/proc.h> 57#include <dspbridge/proc.h>
@@ -151,21 +151,34 @@ static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt,
151 return map_obj; 151 return map_obj;
152} 152}
153 153
154static int match_exact_map_obj(struct dmm_map_object *map_obj,
155 u32 dsp_addr, u32 size)
156{
157 if (map_obj->dsp_addr == dsp_addr && map_obj->size != size)
158 pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n",
159 __func__, dsp_addr, map_obj->size, size);
160
161 return map_obj->dsp_addr == dsp_addr &&
162 map_obj->size == size;
163}
164
154static void remove_mapping_information(struct process_context *pr_ctxt, 165static void remove_mapping_information(struct process_context *pr_ctxt,
155 u32 dsp_addr) 166 u32 dsp_addr, u32 size)
156{ 167{
157 struct dmm_map_object *map_obj; 168 struct dmm_map_object *map_obj;
158 169
159 pr_debug("%s: looking for virt 0x%x\n", __func__, dsp_addr); 170 pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__,
171 dsp_addr, size);
160 172
161 spin_lock(&pr_ctxt->dmm_map_lock); 173 spin_lock(&pr_ctxt->dmm_map_lock);
162 list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) { 174 list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
163 pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x\n", 175 pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
164 __func__, 176 __func__,
165 map_obj->mpu_addr, 177 map_obj->mpu_addr,
166 map_obj->dsp_addr); 178 map_obj->dsp_addr,
179 map_obj->size);
167 180
168 if (map_obj->dsp_addr == dsp_addr) { 181 if (match_exact_map_obj(map_obj, dsp_addr, size)) {
169 pr_debug("%s: match, deleting map info\n", __func__); 182 pr_debug("%s: match, deleting map info\n", __func__);
170 list_del(&map_obj->link); 183 list_del(&map_obj->link);
171 kfree(map_obj->dma_info.sg); 184 kfree(map_obj->dma_info.sg);
@@ -1077,6 +1090,7 @@ int proc_load(void *hprocessor, const s32 argc_index,
1077 s32 cnew_envp; /* " " in new_envp[] */ 1090 s32 cnew_envp; /* " " in new_envp[] */
1078 s32 nproc_id = 0; /* Anticipate MP version. */ 1091 s32 nproc_id = 0; /* Anticipate MP version. */
1079 struct dcd_manager *hdcd_handle; 1092 struct dcd_manager *hdcd_handle;
1093 struct dmm_object *dmm_mgr;
1080 u32 dw_ext_end; 1094 u32 dw_ext_end;
1081 u32 proc_id; 1095 u32 proc_id;
1082 int brd_state; 1096 int brd_state;
@@ -1267,6 +1281,25 @@ int proc_load(void *hprocessor, const s32 argc_index,
1267 if (!status) 1281 if (!status)
1268 status = cod_get_sym_value(cod_mgr, EXTEND, 1282 status = cod_get_sym_value(cod_mgr, EXTEND,
1269 &dw_ext_end); 1283 &dw_ext_end);
1284
1285 /* Reset DMM structs and add an initial free chunk */
1286 if (!status) {
1287 status =
1288 dev_get_dmm_mgr(p_proc_object->hdev_obj,
1289 &dmm_mgr);
1290 if (dmm_mgr) {
1291 /* Set dw_ext_end to DMM START u8
1292 * address */
1293 dw_ext_end =
1294 (dw_ext_end + 1) * DSPWORDSIZE;
1295 /* DMM memory is from EXT_END */
1296 status = dmm_create_tables(dmm_mgr,
1297 dw_ext_end,
1298 DMMPOOLSIZE);
1299 } else {
1300 status = -EFAULT;
1301 }
1302 }
1270 } 1303 }
1271 } 1304 }
1272 /* Restore the original argv[0] */ 1305 /* Restore the original argv[0] */
@@ -1319,10 +1352,12 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
1319{ 1352{
1320 u32 va_align; 1353 u32 va_align;
1321 u32 pa_align; 1354 u32 pa_align;
1355 struct dmm_object *dmm_mgr;
1322 u32 size_align; 1356 u32 size_align;
1323 int status = 0; 1357 int status = 0;
1324 struct proc_object *p_proc_object = (struct proc_object *)hprocessor; 1358 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1325 struct dmm_map_object *map_obj; 1359 struct dmm_map_object *map_obj;
1360 u32 tmp_addr = 0;
1326 1361
1327#ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK 1362#ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK
1328 if ((ul_map_attr & BUFMODE_MASK) != RBUF) { 1363 if ((ul_map_attr & BUFMODE_MASK) != RBUF) {
@@ -1347,30 +1382,33 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
1347 } 1382 }
1348 /* Critical section */ 1383 /* Critical section */
1349 mutex_lock(&proc_lock); 1384 mutex_lock(&proc_lock);
1385 dmm_get_handle(p_proc_object, &dmm_mgr);
1386 if (dmm_mgr)
1387 status = dmm_map_memory(dmm_mgr, va_align, size_align);
1388 else
1389 status = -EFAULT;
1350 1390
1351 /* Add mapping to the page tables. */ 1391 /* Add mapping to the page tables. */
1352 if (!status) { 1392 if (!status) {
1393
1394 /* Mapped address = MSB of VA | LSB of PA */
1395 tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1)));
1353 /* mapped memory resource tracking */ 1396 /* mapped memory resource tracking */
1354 map_obj = add_mapping_info(pr_ctxt, pa_align, va_align, 1397 map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr,
1355 size_align); 1398 size_align);
1356 if (!map_obj) { 1399 if (!map_obj)
1357 status = -ENOMEM; 1400 status = -ENOMEM;
1358 } else { 1401 else
1359 va_align = user_to_dsp_map( 1402 status = (*p_proc_object->intf_fxns->pfn_brd_mem_map)
1360 p_proc_object->hbridge_context->dsp_mmu, 1403 (p_proc_object->hbridge_context, pa_align, va_align,
1361 pa_align, va_align, size_align, 1404 size_align, ul_map_attr, map_obj->pages);
1362 map_obj->pages);
1363 if (IS_ERR_VALUE(va_align))
1364 status = (int)va_align;
1365 }
1366 } 1405 }
1367 if (!status) { 1406 if (!status) {
1368 /* Mapped address = MSB of VA | LSB of PA */ 1407 /* Mapped address = MSB of VA | LSB of PA */
1369 map_obj->dsp_addr = (va_align | 1408 *pp_map_addr = (void *) tmp_addr;
1370 ((u32)pmpu_addr & (PG_SIZE4K - 1)));
1371 *pp_map_addr = (void *)map_obj->dsp_addr;
1372 } else { 1409 } else {
1373 remove_mapping_information(pr_ctxt, va_align); 1410 remove_mapping_information(pr_ctxt, tmp_addr, size_align);
1411 dmm_un_map_memory(dmm_mgr, va_align, &size_align);
1374 } 1412 }
1375 mutex_unlock(&proc_lock); 1413 mutex_unlock(&proc_lock);
1376 1414
@@ -1463,6 +1501,55 @@ func_end:
1463} 1501}
1464 1502
1465/* 1503/*
1504 * ======== proc_reserve_memory ========
1505 * Purpose:
1506 * Reserve a virtually contiguous region of DSP address space.
1507 */
1508int proc_reserve_memory(void *hprocessor, u32 ul_size,
1509 void **pp_rsv_addr,
1510 struct process_context *pr_ctxt)
1511{
1512 struct dmm_object *dmm_mgr;
1513 int status = 0;
1514 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1515 struct dmm_rsv_object *rsv_obj;
1516
1517 if (!p_proc_object) {
1518 status = -EFAULT;
1519 goto func_end;
1520 }
1521
1522 status = dmm_get_handle(p_proc_object, &dmm_mgr);
1523 if (!dmm_mgr) {
1524 status = -EFAULT;
1525 goto func_end;
1526 }
1527
1528 status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr);
1529 if (status != 0)
1530 goto func_end;
1531
1532 /*
1533 * A successful reserve should be followed by insertion of rsv_obj
1534 * into dmm_rsv_list, so that reserved memory resource tracking
1535 * remains uptodate
1536 */
1537 rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL);
1538 if (rsv_obj) {
1539 rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr;
1540 spin_lock(&pr_ctxt->dmm_rsv_lock);
1541 list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list);
1542 spin_unlock(&pr_ctxt->dmm_rsv_lock);
1543 }
1544
1545func_end:
1546 dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p "
1547 "status 0x%x\n", __func__, hprocessor,
1548 ul_size, pp_rsv_addr, status);
1549 return status;
1550}
1551
1552/*
1466 * ======== proc_start ======== 1553 * ======== proc_start ========
1467 * Purpose: 1554 * Purpose:
1468 * Start a processor running. 1555 * Start a processor running.
@@ -1610,7 +1697,9 @@ int proc_un_map(void *hprocessor, void *map_addr,
1610{ 1697{
1611 int status = 0; 1698 int status = 0;
1612 struct proc_object *p_proc_object = (struct proc_object *)hprocessor; 1699 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1700 struct dmm_object *dmm_mgr;
1613 u32 va_align; 1701 u32 va_align;
1702 u32 size_align;
1614 1703
1615 va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K); 1704 va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K);
1616 if (!p_proc_object) { 1705 if (!p_proc_object) {
@@ -1618,11 +1707,24 @@ int proc_un_map(void *hprocessor, void *map_addr,
1618 goto func_end; 1707 goto func_end;
1619 } 1708 }
1620 1709
1710 status = dmm_get_handle(hprocessor, &dmm_mgr);
1711 if (!dmm_mgr) {
1712 status = -EFAULT;
1713 goto func_end;
1714 }
1715
1621 /* Critical section */ 1716 /* Critical section */
1622 mutex_lock(&proc_lock); 1717 mutex_lock(&proc_lock);
1718 /*
1719 * Update DMM structures. Get the size to unmap.
1720 * This function returns error if the VA is not mapped
1721 */
1722 status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align);
1623 /* Remove mapping from the page tables. */ 1723 /* Remove mapping from the page tables. */
1624 status = user_to_dsp_unmap(p_proc_object->hbridge_context->dsp_mmu, 1724 if (!status) {
1625 va_align); 1725 status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map)
1726 (p_proc_object->hbridge_context, va_align, size_align);
1727 }
1626 1728
1627 mutex_unlock(&proc_lock); 1729 mutex_unlock(&proc_lock);
1628 if (status) 1730 if (status)
@@ -1633,7 +1735,7 @@ int proc_un_map(void *hprocessor, void *map_addr,
1633 * from dmm_map_list, so that mapped memory resource tracking 1735 * from dmm_map_list, so that mapped memory resource tracking
1634 * remains uptodate 1736 * remains uptodate
1635 */ 1737 */
1636 remove_mapping_information(pr_ctxt, (u32) map_addr); 1738 remove_mapping_information(pr_ctxt, (u32) map_addr, size_align);
1637 1739
1638func_end: 1740func_end:
1639 dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n", 1741 dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n",
@@ -1642,6 +1744,55 @@ func_end:
1642} 1744}
1643 1745
1644/* 1746/*
1747 * ======== proc_un_reserve_memory ========
1748 * Purpose:
1749 * Frees a previously reserved region of DSP address space.
1750 */
1751int proc_un_reserve_memory(void *hprocessor, void *prsv_addr,
1752 struct process_context *pr_ctxt)
1753{
1754 struct dmm_object *dmm_mgr;
1755 int status = 0;
1756 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1757 struct dmm_rsv_object *rsv_obj;
1758
1759 if (!p_proc_object) {
1760 status = -EFAULT;
1761 goto func_end;
1762 }
1763
1764 status = dmm_get_handle(p_proc_object, &dmm_mgr);
1765 if (!dmm_mgr) {
1766 status = -EFAULT;
1767 goto func_end;
1768 }
1769
1770 status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr);
1771 if (status != 0)
1772 goto func_end;
1773
1774 /*
1775 * A successful unreserve should be followed by removal of rsv_obj
1776 * from dmm_rsv_list, so that reserved memory resource tracking
1777 * remains uptodate
1778 */
1779 spin_lock(&pr_ctxt->dmm_rsv_lock);
1780 list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) {
1781 if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) {
1782 list_del(&rsv_obj->link);
1783 kfree(rsv_obj);
1784 break;
1785 }
1786 }
1787 spin_unlock(&pr_ctxt->dmm_rsv_lock);
1788
1789func_end:
1790 dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n",
1791 __func__, hprocessor, prsv_addr, status);
1792 return status;
1793}
1794
1795/*
1645 * ======== = proc_monitor ======== == 1796 * ======== = proc_monitor ======== ==
1646 * Purpose: 1797 * Purpose:
1647 * Place the Processor in Monitor State. This is an internal 1798 * Place the Processor in Monitor State. This is an internal
diff --git a/drivers/staging/tm6000/tm6000-video.c b/drivers/staging/tm6000/tm6000-video.c
index 9ec82796634..c5690b2a892 100644
--- a/drivers/staging/tm6000/tm6000-video.c
+++ b/drivers/staging/tm6000/tm6000-video.c
@@ -1032,6 +1032,7 @@ static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *norm)
1032 struct tm6000_fh *fh=priv; 1032 struct tm6000_fh *fh=priv;
1033 struct tm6000_core *dev = fh->dev; 1033 struct tm6000_core *dev = fh->dev;
1034 1034
1035 dev->norm = *norm;
1035 rc = tm6000_init_analog_mode(dev); 1036 rc = tm6000_init_analog_mode(dev);
1036 1037
1037 fh->width = dev->width; 1038 fh->width = dev->width;
diff --git a/drivers/staging/udlfb/udlfb.c b/drivers/staging/udlfb/udlfb.c
index 5969e848d29..fed25105970 100644
--- a/drivers/staging/udlfb/udlfb.c
+++ b/drivers/staging/udlfb/udlfb.c
@@ -887,7 +887,7 @@ static int dlfb_ops_open(struct fb_info *info, int user)
887 887
888 struct fb_deferred_io *fbdefio; 888 struct fb_deferred_io *fbdefio;
889 889
890 fbdefio = kmalloc(GFP_KERNEL, sizeof(struct fb_deferred_io)); 890 fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
891 891
892 if (fbdefio) { 892 if (fbdefio) {
893 fbdefio->delay = DL_DEFIO_WRITE_DELAY; 893 fbdefio->delay = DL_DEFIO_WRITE_DELAY;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index e992d5d9e15..7cc3d2407d1 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1675,13 +1675,14 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
1675 1675
1676 { 1676 {
1677 char essid[IW_ESSID_MAX_SIZE+1]; 1677 char essid[IW_ESSID_MAX_SIZE+1];
1678 if (wrq->u.essid.pointer) 1678 if (wrq->u.essid.pointer) {
1679 rc = iwctl_giwessid(dev, NULL, 1679 rc = iwctl_giwessid(dev, NULL,
1680 &(wrq->u.essid), essid); 1680 &(wrq->u.essid), essid);
1681 if (copy_to_user(wrq->u.essid.pointer, 1681 if (copy_to_user(wrq->u.essid.pointer,
1682 essid, 1682 essid,
1683 wrq->u.essid.length) ) 1683 wrq->u.essid.length) )
1684 rc = -EFAULT; 1684 rc = -EFAULT;
1685 }
1685 } 1686 }
1686 break; 1687 break;
1687 1688
diff --git a/drivers/staging/westbridge/astoria/api/src/cyasusb.c b/drivers/staging/westbridge/astoria/api/src/cyasusb.c
index 5a219701206..7777d9a60a5 100644
--- a/drivers/staging/westbridge/astoria/api/src/cyasusb.c
+++ b/drivers/staging/westbridge/astoria/api/src/cyasusb.c
@@ -1417,7 +1417,6 @@ cy_as_usb_set_enum_config(cy_as_device_handle handle,
1417 */ 1417 */
1418 bus_mask = 0; 1418 bus_mask = 0;
1419 media_mask = 0; 1419 media_mask = 0;
1420 media_mask = 0;
1421 for (bus = 0; bus < CY_AS_MAX_BUSES; bus++) { 1420 for (bus = 0; bus < CY_AS_MAX_BUSES; bus++) {
1422 for (device = 0; device < CY_AS_MAX_STORAGE_DEVICES; device++) { 1421 for (device = 0; device < CY_AS_MAX_STORAGE_DEVICES; device++) {
1423 if (config_p->devices_to_enumerate[bus][device] == 1422 if (config_p->devices_to_enumerate[bus][device] ==
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 4af83d5318f..6a71f52c59b 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -139,7 +139,7 @@ exit:
139} 139}
140 140
141int prism2_add_key(struct wiphy *wiphy, struct net_device *dev, 141int prism2_add_key(struct wiphy *wiphy, struct net_device *dev,
142 u8 key_index, const u8 *mac_addr, 142 u8 key_index, bool pairwise, const u8 *mac_addr,
143 struct key_params *params) 143 struct key_params *params)
144{ 144{
145 wlandevice_t *wlandev = dev->ml_priv; 145 wlandevice_t *wlandev = dev->ml_priv;
@@ -198,7 +198,7 @@ exit:
198} 198}
199 199
200int prism2_get_key(struct wiphy *wiphy, struct net_device *dev, 200int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
201 u8 key_index, const u8 *mac_addr, void *cookie, 201 u8 key_index, bool pairwise, const u8 *mac_addr, void *cookie,
202 void (*callback)(void *cookie, struct key_params*)) 202 void (*callback)(void *cookie, struct key_params*))
203{ 203{
204 wlandevice_t *wlandev = dev->ml_priv; 204 wlandevice_t *wlandev = dev->ml_priv;
@@ -227,7 +227,7 @@ int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
227} 227}
228 228
229int prism2_del_key(struct wiphy *wiphy, struct net_device *dev, 229int prism2_del_key(struct wiphy *wiphy, struct net_device *dev,
230 u8 key_index, const u8 *mac_addr) 230 u8 key_index, bool pairwise, const u8 *mac_addr)
231{ 231{
232 wlandevice_t *wlandev = dev->ml_priv; 232 wlandevice_t *wlandev = dev->ml_priv;
233 u32 did; 233 u32 did;
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index aa1792c8429..b7b4a733b46 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -522,8 +522,8 @@ static int p80211netdev_ethtool(wlandevice_t *wlandev, void __user *useraddr)
522 if (copy_to_user(useraddr, &edata, sizeof(edata))) 522 if (copy_to_user(useraddr, &edata, sizeof(edata)))
523 return -EFAULT; 523 return -EFAULT;
524 return 0; 524 return 0;
525 }
526#endif 525#endif
526 }
527 527
528 return -EOPNOTSUPP; 528 return -EOPNOTSUPP;
529} 529}
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
new file mode 100644
index 00000000000..c43ef48b1a0
--- /dev/null
+++ b/drivers/tty/Makefile
@@ -0,0 +1,11 @@
1obj-y += tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \
2 tty_buffer.o tty_port.o tty_mutex.o
3obj-$(CONFIG_LEGACY_PTYS) += pty.o
4obj-$(CONFIG_UNIX98_PTYS) += pty.o
5obj-$(CONFIG_AUDIT) += tty_audit.o
6obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
7obj-$(CONFIG_N_HDLC) += n_hdlc.o
8obj-$(CONFIG_N_GSM) += n_gsm.o
9obj-$(CONFIG_R3964) += n_r3964.o
10
11obj-y += vt/
diff --git a/drivers/char/n_gsm.c b/drivers/tty/n_gsm.c
index 04ef3ef0a42..81b46585edf 100644
--- a/drivers/char/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -716,8 +716,8 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
716 if (msg->len < 128) 716 if (msg->len < 128)
717 *--dp = (msg->len << 1) | EA; 717 *--dp = (msg->len << 1) | EA;
718 else { 718 else {
719 *--dp = (msg->len >> 6) | EA; 719 *--dp = ((msg->len & 127) << 1) | EA;
720 *--dp = (msg->len & 127) << 1; 720 *--dp = (msg->len >> 6) & 0xfe;
721 } 721 }
722 } 722 }
723 723
@@ -2375,6 +2375,7 @@ static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm,
2375 gsm->mru = c->mru; 2375 gsm->mru = c->mru;
2376 gsm->encoding = c->encapsulation; 2376 gsm->encoding = c->encapsulation;
2377 gsm->adaption = c->adaption; 2377 gsm->adaption = c->adaption;
2378 gsm->n2 = c->n2;
2378 2379
2379 if (c->i == 1) 2380 if (c->i == 1)
2380 gsm->ftype = UIH; 2381 gsm->ftype = UIH;
diff --git a/drivers/char/n_hdlc.c b/drivers/tty/n_hdlc.c
index 47d32281032..47d32281032 100644
--- a/drivers/char/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
diff --git a/drivers/char/n_r3964.c b/drivers/tty/n_r3964.c
index 88dda0c45ee..88dda0c45ee 100644
--- a/drivers/char/n_r3964.c
+++ b/drivers/tty/n_r3964.c
diff --git a/drivers/char/n_tty.c b/drivers/tty/n_tty.c
index 428f4fe0b5f..428f4fe0b5f 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/tty/n_tty.c
diff --git a/drivers/char/pty.c b/drivers/tty/pty.c
index 923a4858550..923a4858550 100644
--- a/drivers/char/pty.c
+++ b/drivers/tty/pty.c
diff --git a/drivers/char/sysrq.c b/drivers/tty/sysrq.c
index eaa5d3efa79..eaa5d3efa79 100644
--- a/drivers/char/sysrq.c
+++ b/drivers/tty/sysrq.c
diff --git a/drivers/char/tty_audit.c b/drivers/tty/tty_audit.c
index f64582b0f62..f64582b0f62 100644
--- a/drivers/char/tty_audit.c
+++ b/drivers/tty/tty_audit.c
diff --git a/drivers/char/tty_buffer.c b/drivers/tty/tty_buffer.c
index cc1e9850d65..d8210ca0072 100644
--- a/drivers/char/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -413,7 +413,8 @@ static void flush_to_ldisc(struct work_struct *work)
413 spin_lock_irqsave(&tty->buf.lock, flags); 413 spin_lock_irqsave(&tty->buf.lock, flags);
414 414
415 if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) { 415 if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
416 struct tty_buffer *head; 416 struct tty_buffer *head, *tail = tty->buf.tail;
417 int seen_tail = 0;
417 while ((head = tty->buf.head) != NULL) { 418 while ((head = tty->buf.head) != NULL) {
418 int count; 419 int count;
419 char *char_buf; 420 char *char_buf;
@@ -423,6 +424,15 @@ static void flush_to_ldisc(struct work_struct *work)
423 if (!count) { 424 if (!count) {
424 if (head->next == NULL) 425 if (head->next == NULL)
425 break; 426 break;
427 /*
428 There's a possibility tty might get new buffer
429 added during the unlock window below. We could
430 end up spinning in here forever hogging the CPU
431 completely. To avoid this let's have a rest each
432 time we processed the tail buffer.
433 */
434 if (tail == head)
435 seen_tail = 1;
426 tty->buf.head = head->next; 436 tty->buf.head = head->next;
427 tty_buffer_free(tty, head); 437 tty_buffer_free(tty, head);
428 continue; 438 continue;
@@ -432,7 +442,7 @@ static void flush_to_ldisc(struct work_struct *work)
432 line discipline as we want to empty the queue */ 442 line discipline as we want to empty the queue */
433 if (test_bit(TTY_FLUSHPENDING, &tty->flags)) 443 if (test_bit(TTY_FLUSHPENDING, &tty->flags))
434 break; 444 break;
435 if (!tty->receive_room) { 445 if (!tty->receive_room || seen_tail) {
436 schedule_delayed_work(&tty->buf.work, 1); 446 schedule_delayed_work(&tty->buf.work, 1);
437 break; 447 break;
438 } 448 }
diff --git a/drivers/char/tty_io.c b/drivers/tty/tty_io.c
index c05c5af5aa0..c05c5af5aa0 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/tty/tty_io.c
diff --git a/drivers/char/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 0c188997145..0c188997145 100644
--- a/drivers/char/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
diff --git a/drivers/char/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 412f9775d19..d8e96b00502 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -47,6 +47,7 @@
47 47
48static DEFINE_SPINLOCK(tty_ldisc_lock); 48static DEFINE_SPINLOCK(tty_ldisc_lock);
49static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait); 49static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
50static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_idle);
50/* Line disc dispatch table */ 51/* Line disc dispatch table */
51static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS]; 52static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS];
52 53
@@ -83,6 +84,7 @@ static void put_ldisc(struct tty_ldisc *ld)
83 return; 84 return;
84 } 85 }
85 local_irq_restore(flags); 86 local_irq_restore(flags);
87 wake_up(&tty_ldisc_idle);
86} 88}
87 89
88/** 90/**
@@ -531,6 +533,23 @@ static int tty_ldisc_halt(struct tty_struct *tty)
531} 533}
532 534
533/** 535/**
536 * tty_ldisc_wait_idle - wait for the ldisc to become idle
537 * @tty: tty to wait for
538 *
539 * Wait for the line discipline to become idle. The discipline must
540 * have been halted for this to guarantee it remains idle.
541 */
542static int tty_ldisc_wait_idle(struct tty_struct *tty)
543{
544 int ret;
545 ret = wait_event_interruptible_timeout(tty_ldisc_idle,
546 atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
547 if (ret < 0)
548 return ret;
549 return ret > 0 ? 0 : -EBUSY;
550}
551
552/**
534 * tty_set_ldisc - set line discipline 553 * tty_set_ldisc - set line discipline
535 * @tty: the terminal to set 554 * @tty: the terminal to set
536 * @ldisc: the line discipline 555 * @ldisc: the line discipline
@@ -634,8 +653,17 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
634 653
635 flush_scheduled_work(); 654 flush_scheduled_work();
636 655
656 retval = tty_ldisc_wait_idle(tty);
657
637 tty_lock(); 658 tty_lock();
638 mutex_lock(&tty->ldisc_mutex); 659 mutex_lock(&tty->ldisc_mutex);
660
661 /* handle wait idle failure locked */
662 if (retval) {
663 tty_ldisc_put(new_ldisc);
664 goto enable;
665 }
666
639 if (test_bit(TTY_HUPPED, &tty->flags)) { 667 if (test_bit(TTY_HUPPED, &tty->flags)) {
640 /* We were raced by the hangup method. It will have stomped 668 /* We were raced by the hangup method. It will have stomped
641 the ldisc data and closed the ldisc down */ 669 the ldisc data and closed the ldisc down */
@@ -669,6 +697,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
669 697
670 tty_ldisc_put(o_ldisc); 698 tty_ldisc_put(o_ldisc);
671 699
700enable:
672 /* 701 /*
673 * Allow ldisc referencing to occur again 702 * Allow ldisc referencing to occur again
674 */ 703 */
@@ -714,9 +743,12 @@ static void tty_reset_termios(struct tty_struct *tty)
714 * state closed 743 * state closed
715 */ 744 */
716 745
717static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc) 746static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
718{ 747{
719 struct tty_ldisc *ld; 748 struct tty_ldisc *ld = tty_ldisc_get(ldisc);
749
750 if (IS_ERR(ld))
751 return -1;
720 752
721 tty_ldisc_close(tty, tty->ldisc); 753 tty_ldisc_close(tty, tty->ldisc);
722 tty_ldisc_put(tty->ldisc); 754 tty_ldisc_put(tty->ldisc);
@@ -724,10 +756,10 @@ static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
724 /* 756 /*
725 * Switch the line discipline back 757 * Switch the line discipline back
726 */ 758 */
727 ld = tty_ldisc_get(ldisc);
728 BUG_ON(IS_ERR(ld));
729 tty_ldisc_assign(tty, ld); 759 tty_ldisc_assign(tty, ld);
730 tty_set_termios_ldisc(tty, ldisc); 760 tty_set_termios_ldisc(tty, ldisc);
761
762 return 0;
731} 763}
732 764
733/** 765/**
@@ -802,13 +834,16 @@ void tty_ldisc_hangup(struct tty_struct *tty)
802 a FIXME */ 834 a FIXME */
803 if (tty->ldisc) { /* Not yet closed */ 835 if (tty->ldisc) { /* Not yet closed */
804 if (reset == 0) { 836 if (reset == 0) {
805 tty_ldisc_reinit(tty, tty->termios->c_line); 837
806 err = tty_ldisc_open(tty, tty->ldisc); 838 if (!tty_ldisc_reinit(tty, tty->termios->c_line))
839 err = tty_ldisc_open(tty, tty->ldisc);
840 else
841 err = 1;
807 } 842 }
808 /* If the re-open fails or we reset then go to N_TTY. The 843 /* If the re-open fails or we reset then go to N_TTY. The
809 N_TTY open cannot fail */ 844 N_TTY open cannot fail */
810 if (reset || err) { 845 if (reset || err) {
811 tty_ldisc_reinit(tty, N_TTY); 846 BUG_ON(tty_ldisc_reinit(tty, N_TTY));
812 WARN_ON(tty_ldisc_open(tty, tty->ldisc)); 847 WARN_ON(tty_ldisc_open(tty, tty->ldisc));
813 } 848 }
814 tty_ldisc_enable(tty); 849 tty_ldisc_enable(tty);
diff --git a/drivers/char/tty_mutex.c b/drivers/tty/tty_mutex.c
index 133697540c7..133697540c7 100644
--- a/drivers/char/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
diff --git a/drivers/char/tty_port.c b/drivers/tty/tty_port.c
index 33d37d230f8..33d37d230f8 100644
--- a/drivers/char/tty_port.c
+++ b/drivers/tty/tty_port.c
diff --git a/drivers/char/.gitignore b/drivers/tty/vt/.gitignore
index 83683a2d8e6..83683a2d8e6 100644
--- a/drivers/char/.gitignore
+++ b/drivers/tty/vt/.gitignore
diff --git a/drivers/tty/vt/Makefile b/drivers/tty/vt/Makefile
new file mode 100644
index 00000000000..14a51c9960d
--- /dev/null
+++ b/drivers/tty/vt/Makefile
@@ -0,0 +1,34 @@
1#
2# This file contains the font map for the default (hardware) font
3#
4FONTMAPFILE = cp437.uni
5
6obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o \
7 selection.o keyboard.o
8obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o
9obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o
10
11# Files generated that shall be removed upon make clean
12clean-files := consolemap_deftbl.c defkeymap.c
13
14quiet_cmd_conmk = CONMK $@
15 cmd_conmk = scripts/conmakehash $< > $@
16
17$(obj)/consolemap_deftbl.c: $(src)/$(FONTMAPFILE)
18 $(call cmd,conmk)
19
20$(obj)/defkeymap.o: $(obj)/defkeymap.c
21
22# Uncomment if you're changing the keymap and have an appropriate
23# loadkeys version for the map. By default, we'll use the shipped
24# versions.
25# GENERATE_KEYMAP := 1
26
27ifdef GENERATE_KEYMAP
28
29$(obj)/defkeymap.c: $(obj)/%.c: $(src)/%.map
30 loadkeys --mktable $< > $@.tmp
31 sed -e 's/^static *//' $@.tmp > $@
32 rm $@.tmp
33
34endif
diff --git a/drivers/char/consolemap.c b/drivers/tty/vt/consolemap.c
index 45d3e80156d..45d3e80156d 100644
--- a/drivers/char/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
diff --git a/drivers/char/cp437.uni b/drivers/tty/vt/cp437.uni
index bc6163484f6..bc6163484f6 100644
--- a/drivers/char/cp437.uni
+++ b/drivers/tty/vt/cp437.uni
diff --git a/drivers/char/defkeymap.c_shipped b/drivers/tty/vt/defkeymap.c_shipped
index d2208dfe3f6..d2208dfe3f6 100644
--- a/drivers/char/defkeymap.c_shipped
+++ b/drivers/tty/vt/defkeymap.c_shipped
diff --git a/drivers/char/defkeymap.map b/drivers/tty/vt/defkeymap.map
index 50b30cace26..50b30cace26 100644
--- a/drivers/char/defkeymap.map
+++ b/drivers/tty/vt/defkeymap.map
diff --git a/drivers/char/keyboard.c b/drivers/tty/vt/keyboard.c
index e95d7876ca6..e95d7876ca6 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
diff --git a/drivers/char/selection.c b/drivers/tty/vt/selection.c
index ebae344ce91..ebae344ce91 100644
--- a/drivers/char/selection.c
+++ b/drivers/tty/vt/selection.c
diff --git a/drivers/char/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 273ab44cc91..eab3a1ff99e 100644
--- a/drivers/char/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -553,12 +553,12 @@ static unsigned int
553vcs_poll(struct file *file, poll_table *wait) 553vcs_poll(struct file *file, poll_table *wait)
554{ 554{
555 struct vcs_poll_data *poll = vcs_poll_data_get(file); 555 struct vcs_poll_data *poll = vcs_poll_data_get(file);
556 int ret = 0; 556 int ret = DEFAULT_POLLMASK|POLLERR|POLLPRI;
557 557
558 if (poll) { 558 if (poll) {
559 poll_wait(file, &poll->waitq, wait); 559 poll_wait(file, &poll->waitq, wait);
560 if (!poll->seen_last_update) 560 if (poll->seen_last_update)
561 ret = POLLIN | POLLRDNORM; 561 ret = DEFAULT_POLLMASK;
562 } 562 }
563 return ret; 563 return ret;
564} 564}
diff --git a/drivers/char/vt.c b/drivers/tty/vt/vt.c
index a8ec48ed14d..a8ec48ed14d 100644
--- a/drivers/char/vt.c
+++ b/drivers/tty/vt/vt.c
diff --git a/drivers/char/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 6b68a0fb461..6b68a0fb461 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index f1aaff6202a..045bb4b823e 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -965,10 +965,11 @@ static int proc_getdriver(struct dev_state *ps, void __user *arg)
965 965
966static int proc_connectinfo(struct dev_state *ps, void __user *arg) 966static int proc_connectinfo(struct dev_state *ps, void __user *arg)
967{ 967{
968 struct usbdevfs_connectinfo ci; 968 struct usbdevfs_connectinfo ci = {
969 .devnum = ps->dev->devnum,
970 .slow = ps->dev->speed == USB_SPEED_LOW
971 };
969 972
970 ci.devnum = ps->dev->devnum;
971 ci.slow = ps->dev->speed == USB_SPEED_LOW;
972 if (copy_to_user(arg, &ci, sizeof(ci))) 973 if (copy_to_user(arg, &ci, sizeof(ci)))
973 return -EFAULT; 974 return -EFAULT;
974 return 0; 975 return 0;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index b739ca81465..607d0db4a98 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -158,7 +158,7 @@ config USB_GADGET_FSL_USB2
158 boolean "Freescale Highspeed USB DR Peripheral Controller" 158 boolean "Freescale Highspeed USB DR Peripheral Controller"
159 depends on FSL_SOC || ARCH_MXC 159 depends on FSL_SOC || ARCH_MXC
160 select USB_GADGET_DUALSPEED 160 select USB_GADGET_DUALSPEED
161 select USB_FSL_MPH_DR_OF 161 select USB_FSL_MPH_DR_OF if OF
162 help 162 help
163 Some of Freescale PowerPC processors have a High Speed 163 Some of Freescale PowerPC processors have a High Speed
164 Dual-Role(DR) USB controller, which supports device mode. 164 Dual-Role(DR) USB controller, which supports device mode.
diff --git a/drivers/usb/gadget/goku_udc.h b/drivers/usb/gadget/goku_udc.h
index 566cb231905..e7e0c69d3b1 100644
--- a/drivers/usb/gadget/goku_udc.h
+++ b/drivers/usb/gadget/goku_udc.h
@@ -251,7 +251,8 @@ struct goku_udc {
251 got_region:1, 251 got_region:1,
252 req_config:1, 252 req_config:1,
253 configured:1, 253 configured:1,
254 enabled:1; 254 enabled:1,
255 registered:1;
255 256
256 /* pci state used to access those endpoints */ 257 /* pci state used to access those endpoints */
257 struct pci_dev *pdev; 258 struct pci_dev *pdev;
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index cb23355f52d..fbe86ca9580 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -811,7 +811,6 @@ int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
811 INFO(dev, "MAC %pM\n", net->dev_addr); 811 INFO(dev, "MAC %pM\n", net->dev_addr);
812 INFO(dev, "HOST MAC %pM\n", dev->host_mac); 812 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
813 813
814 netif_stop_queue(net);
815 the_dev = dev; 814 the_dev = dev;
816 } 815 }
817 816
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index 01e5354a4c2..40f7716b31f 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -105,11 +105,15 @@ struct gs_port {
105 wait_queue_head_t close_wait; /* wait for last close */ 105 wait_queue_head_t close_wait; /* wait for last close */
106 106
107 struct list_head read_pool; 107 struct list_head read_pool;
108 int read_started;
109 int read_allocated;
108 struct list_head read_queue; 110 struct list_head read_queue;
109 unsigned n_read; 111 unsigned n_read;
110 struct tasklet_struct push; 112 struct tasklet_struct push;
111 113
112 struct list_head write_pool; 114 struct list_head write_pool;
115 int write_started;
116 int write_allocated;
113 struct gs_buf port_write_buf; 117 struct gs_buf port_write_buf;
114 wait_queue_head_t drain_wait; /* wait while writes drain */ 118 wait_queue_head_t drain_wait; /* wait while writes drain */
115 119
@@ -363,6 +367,9 @@ __acquires(&port->port_lock)
363 struct usb_request *req; 367 struct usb_request *req;
364 int len; 368 int len;
365 369
370 if (port->write_started >= QUEUE_SIZE)
371 break;
372
366 req = list_entry(pool->next, struct usb_request, list); 373 req = list_entry(pool->next, struct usb_request, list);
367 len = gs_send_packet(port, req->buf, in->maxpacket); 374 len = gs_send_packet(port, req->buf, in->maxpacket);
368 if (len == 0) { 375 if (len == 0) {
@@ -397,6 +404,8 @@ __acquires(&port->port_lock)
397 break; 404 break;
398 } 405 }
399 406
407 port->write_started++;
408
400 /* abort immediately after disconnect */ 409 /* abort immediately after disconnect */
401 if (!port->port_usb) 410 if (!port->port_usb)
402 break; 411 break;
@@ -418,7 +427,6 @@ __acquires(&port->port_lock)
418{ 427{
419 struct list_head *pool = &port->read_pool; 428 struct list_head *pool = &port->read_pool;
420 struct usb_ep *out = port->port_usb->out; 429 struct usb_ep *out = port->port_usb->out;
421 unsigned started = 0;
422 430
423 while (!list_empty(pool)) { 431 while (!list_empty(pool)) {
424 struct usb_request *req; 432 struct usb_request *req;
@@ -430,6 +438,9 @@ __acquires(&port->port_lock)
430 if (!tty) 438 if (!tty)
431 break; 439 break;
432 440
441 if (port->read_started >= QUEUE_SIZE)
442 break;
443
433 req = list_entry(pool->next, struct usb_request, list); 444 req = list_entry(pool->next, struct usb_request, list);
434 list_del(&req->list); 445 list_del(&req->list);
435 req->length = out->maxpacket; 446 req->length = out->maxpacket;
@@ -447,13 +458,13 @@ __acquires(&port->port_lock)
447 list_add(&req->list, pool); 458 list_add(&req->list, pool);
448 break; 459 break;
449 } 460 }
450 started++; 461 port->read_started++;
451 462
452 /* abort immediately after disconnect */ 463 /* abort immediately after disconnect */
453 if (!port->port_usb) 464 if (!port->port_usb)
454 break; 465 break;
455 } 466 }
456 return started; 467 return port->read_started;
457} 468}
458 469
459/* 470/*
@@ -535,6 +546,7 @@ static void gs_rx_push(unsigned long _port)
535 } 546 }
536recycle: 547recycle:
537 list_move(&req->list, &port->read_pool); 548 list_move(&req->list, &port->read_pool);
549 port->read_started--;
538 } 550 }
539 551
540 /* Push from tty to ldisc; without low_latency set this is handled by 552 /* Push from tty to ldisc; without low_latency set this is handled by
@@ -587,6 +599,7 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
587 599
588 spin_lock(&port->port_lock); 600 spin_lock(&port->port_lock);
589 list_add(&req->list, &port->write_pool); 601 list_add(&req->list, &port->write_pool);
602 port->write_started--;
590 603
591 switch (req->status) { 604 switch (req->status) {
592 default: 605 default:
@@ -608,7 +621,8 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
608 spin_unlock(&port->port_lock); 621 spin_unlock(&port->port_lock);
609} 622}
610 623
611static void gs_free_requests(struct usb_ep *ep, struct list_head *head) 624static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
625 int *allocated)
612{ 626{
613 struct usb_request *req; 627 struct usb_request *req;
614 628
@@ -616,25 +630,31 @@ static void gs_free_requests(struct usb_ep *ep, struct list_head *head)
616 req = list_entry(head->next, struct usb_request, list); 630 req = list_entry(head->next, struct usb_request, list);
617 list_del(&req->list); 631 list_del(&req->list);
618 gs_free_req(ep, req); 632 gs_free_req(ep, req);
633 if (allocated)
634 (*allocated)--;
619 } 635 }
620} 636}
621 637
622static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, 638static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
623 void (*fn)(struct usb_ep *, struct usb_request *)) 639 void (*fn)(struct usb_ep *, struct usb_request *),
640 int *allocated)
624{ 641{
625 int i; 642 int i;
626 struct usb_request *req; 643 struct usb_request *req;
644 int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
627 645
628 /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't 646 /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
629 * do quite that many this time, don't fail ... we just won't 647 * do quite that many this time, don't fail ... we just won't
630 * be as speedy as we might otherwise be. 648 * be as speedy as we might otherwise be.
631 */ 649 */
632 for (i = 0; i < QUEUE_SIZE; i++) { 650 for (i = 0; i < n; i++) {
633 req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC); 651 req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
634 if (!req) 652 if (!req)
635 return list_empty(head) ? -ENOMEM : 0; 653 return list_empty(head) ? -ENOMEM : 0;
636 req->complete = fn; 654 req->complete = fn;
637 list_add_tail(&req->list, head); 655 list_add_tail(&req->list, head);
656 if (allocated)
657 (*allocated)++;
638 } 658 }
639 return 0; 659 return 0;
640} 660}
@@ -661,14 +681,15 @@ static int gs_start_io(struct gs_port *port)
661 * configurations may use different endpoints with a given port; 681 * configurations may use different endpoints with a given port;
662 * and high speed vs full speed changes packet sizes too. 682 * and high speed vs full speed changes packet sizes too.
663 */ 683 */
664 status = gs_alloc_requests(ep, head, gs_read_complete); 684 status = gs_alloc_requests(ep, head, gs_read_complete,
685 &port->read_allocated);
665 if (status) 686 if (status)
666 return status; 687 return status;
667 688
668 status = gs_alloc_requests(port->port_usb->in, &port->write_pool, 689 status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
669 gs_write_complete); 690 gs_write_complete, &port->write_allocated);
670 if (status) { 691 if (status) {
671 gs_free_requests(ep, head); 692 gs_free_requests(ep, head, &port->read_allocated);
672 return status; 693 return status;
673 } 694 }
674 695
@@ -680,8 +701,9 @@ static int gs_start_io(struct gs_port *port)
680 if (started) { 701 if (started) {
681 tty_wakeup(port->port_tty); 702 tty_wakeup(port->port_tty);
682 } else { 703 } else {
683 gs_free_requests(ep, head); 704 gs_free_requests(ep, head, &port->read_allocated);
684 gs_free_requests(port->port_usb->in, &port->write_pool); 705 gs_free_requests(port->port_usb->in, &port->write_pool,
706 &port->write_allocated);
685 status = -EIO; 707 status = -EIO;
686 } 708 }
687 709
@@ -1315,8 +1337,12 @@ void gserial_disconnect(struct gserial *gser)
1315 spin_lock_irqsave(&port->port_lock, flags); 1337 spin_lock_irqsave(&port->port_lock, flags);
1316 if (port->open_count == 0 && !port->openclose) 1338 if (port->open_count == 0 && !port->openclose)
1317 gs_buf_free(&port->port_write_buf); 1339 gs_buf_free(&port->port_write_buf);
1318 gs_free_requests(gser->out, &port->read_pool); 1340 gs_free_requests(gser->out, &port->read_pool, NULL);
1319 gs_free_requests(gser->out, &port->read_queue); 1341 gs_free_requests(gser->out, &port->read_queue, NULL);
1320 gs_free_requests(gser->in, &port->write_pool); 1342 gs_free_requests(gser->in, &port->write_pool, NULL);
1343
1344 port->read_allocated = port->read_started =
1345 port->write_allocated = port->write_started = 0;
1346
1321 spin_unlock_irqrestore(&port->port_lock, flags); 1347 spin_unlock_irqrestore(&port->port_lock, flags);
1322} 1348}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 2391c396ca3..6f4f8e6a40c 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -122,7 +122,7 @@ config USB_EHCI_FSL
122 bool "Support for Freescale on-chip EHCI USB controller" 122 bool "Support for Freescale on-chip EHCI USB controller"
123 depends on USB_EHCI_HCD && FSL_SOC 123 depends on USB_EHCI_HCD && FSL_SOC
124 select USB_EHCI_ROOT_HUB_TT 124 select USB_EHCI_ROOT_HUB_TT
125 select USB_FSL_MPH_DR_OF 125 select USB_FSL_MPH_DR_OF if OF
126 ---help--- 126 ---help---
127 Variation of ARC USB block used in some Freescale chips. 127 Variation of ARC USB block used in some Freescale chips.
128 128
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index ac9c4d7c44a..bce85055019 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -36,6 +36,8 @@ struct ehci_mxc_priv {
36static int ehci_mxc_setup(struct usb_hcd *hcd) 36static int ehci_mxc_setup(struct usb_hcd *hcd)
37{ 37{
38 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 38 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
39 struct device *dev = hcd->self.controller;
40 struct mxc_usbh_platform_data *pdata = dev_get_platdata(dev);
39 int retval; 41 int retval;
40 42
41 /* EHCI registers start at offset 0x100 */ 43 /* EHCI registers start at offset 0x100 */
@@ -63,6 +65,12 @@ static int ehci_mxc_setup(struct usb_hcd *hcd)
63 65
64 ehci_reset(ehci); 66 ehci_reset(ehci);
65 67
68 /* set up the PORTSCx register */
69 ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]);
70
71 /* is this really needed? */
72 msleep(10);
73
66 ehci_port_power(ehci, 0); 74 ehci_port_power(ehci, 0);
67 return 0; 75 return 0;
68} 76}
@@ -114,7 +122,7 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
114 struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data; 122 struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
115 struct usb_hcd *hcd; 123 struct usb_hcd *hcd;
116 struct resource *res; 124 struct resource *res;
117 int irq, ret, temp; 125 int irq, ret;
118 struct ehci_mxc_priv *priv; 126 struct ehci_mxc_priv *priv;
119 struct device *dev = &pdev->dev; 127 struct device *dev = &pdev->dev;
120 128
@@ -188,10 +196,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
188 clk_enable(priv->ahbclk); 196 clk_enable(priv->ahbclk);
189 } 197 }
190 198
191 /* set up the PORTSCx register */
192 ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]);
193 mdelay(10);
194
195 /* setup specific usb hw */ 199 /* setup specific usb hw */
196 ret = mxc_initialize_usb_hw(pdev->id, pdata->flags); 200 ret = mxc_initialize_usb_hw(pdev->id, pdata->flags);
197 if (ret < 0) 201 if (ret < 0)
diff --git a/drivers/usb/host/ohci-jz4740.c b/drivers/usb/host/ohci-jz4740.c
index 10e1872f3ab..931d588c3fb 100644
--- a/drivers/usb/host/ohci-jz4740.c
+++ b/drivers/usb/host/ohci-jz4740.c
@@ -273,4 +273,4 @@ static struct platform_driver ohci_hcd_jz4740_driver = {
273 }, 273 },
274}; 274};
275 275
276MODULE_ALIAS("platfrom:jz4740-ohci"); 276MODULE_ALIAS("platform:jz4740-ohci");
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 37566419877..c9078e4e1f4 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -553,6 +553,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
553 /* needed for power consumption */ 553 /* needed for power consumption */
554 struct usb_config_descriptor *cfg_descriptor = &dev->udev->actconfig->desc; 554 struct usb_config_descriptor *cfg_descriptor = &dev->udev->actconfig->desc;
555 555
556 memset(&info, 0, sizeof(info));
556 /* directly from the descriptor */ 557 /* directly from the descriptor */
557 info.vendor = le16_to_cpu(dev->udev->descriptor.idVendor); 558 info.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
558 info.product = dev->product_id; 559 info.product = dev->product_id;
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 70d00e99a4b..dd573abd2d1 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3008,6 +3008,7 @@ sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3008#else 3008#else
3009 x.sisusb_conactive = 0; 3009 x.sisusb_conactive = 0;
3010#endif 3010#endif
3011 memset(x.sisusb_reserved, 0, sizeof(x.sisusb_reserved));
3011 3012
3012 if (copy_to_user((void __user *)arg, &x, sizeof(x))) 3013 if (copy_to_user((void __user *)arg, &x, sizeof(x)))
3013 retval = -EFAULT; 3014 retval = -EFAULT;
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index 611a9d27436..fcb5206a65b 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -171,8 +171,9 @@ static irqreturn_t blackfin_interrupt(int irq, void *__hci)
171 } 171 }
172 172
173 /* Start sampling ID pin, when plug is removed from MUSB */ 173 /* Start sampling ID pin, when plug is removed from MUSB */
174 if (is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE 174 if ((is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE
175 || musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) { 175 || musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) ||
176 (musb->int_usb & MUSB_INTR_DISCONNECT && is_host_active(musb))) {
176 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); 177 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
177 musb->a_wait_bcon = TIMER_DELAY; 178 musb->a_wait_bcon = TIMER_DELAY;
178 } 179 }
@@ -323,30 +324,8 @@ int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
323 return -EIO; 324 return -EIO;
324} 325}
325 326
326int __init musb_platform_init(struct musb *musb, void *board_data) 327static void musb_platform_reg_init(struct musb *musb)
327{ 328{
328
329 /*
330 * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE
331 * and OTG HOST modes, while rev 1.1 and greater require PE7 to
332 * be low for DEVICE mode and high for HOST mode. We set it high
333 * here because we are in host mode
334 */
335
336 if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) {
337 printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d \n",
338 musb->config->gpio_vrsel);
339 return -ENODEV;
340 }
341 gpio_direction_output(musb->config->gpio_vrsel, 0);
342
343 usb_nop_xceiv_register();
344 musb->xceiv = otg_get_transceiver();
345 if (!musb->xceiv) {
346 gpio_free(musb->config->gpio_vrsel);
347 return -ENODEV;
348 }
349
350 if (ANOMALY_05000346) { 329 if (ANOMALY_05000346) {
351 bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value); 330 bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value);
352 SSYNC(); 331 SSYNC();
@@ -358,7 +337,8 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
358 } 337 }
359 338
360 /* Configure PLL oscillator register */ 339 /* Configure PLL oscillator register */
361 bfin_write_USB_PLLOSC_CTRL(0x30a8); 340 bfin_write_USB_PLLOSC_CTRL(0x3080 |
341 ((480/musb->config->clkin) << 1));
362 SSYNC(); 342 SSYNC();
363 343
364 bfin_write_USB_SRP_CLKDIV((get_sclk()/1000) / 32 - 1); 344 bfin_write_USB_SRP_CLKDIV((get_sclk()/1000) / 32 - 1);
@@ -380,6 +360,33 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
380 EP2_RX_ENA | EP3_RX_ENA | EP4_RX_ENA | 360 EP2_RX_ENA | EP3_RX_ENA | EP4_RX_ENA |
381 EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA); 361 EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA);
382 SSYNC(); 362 SSYNC();
363}
364
365int __init musb_platform_init(struct musb *musb, void *board_data)
366{
367
368 /*
369 * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE
370 * and OTG HOST modes, while rev 1.1 and greater require PE7 to
371 * be low for DEVICE mode and high for HOST mode. We set it high
372 * here because we are in host mode
373 */
374
375 if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) {
376 printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d\n",
377 musb->config->gpio_vrsel);
378 return -ENODEV;
379 }
380 gpio_direction_output(musb->config->gpio_vrsel, 0);
381
382 usb_nop_xceiv_register();
383 musb->xceiv = otg_get_transceiver();
384 if (!musb->xceiv) {
385 gpio_free(musb->config->gpio_vrsel);
386 return -ENODEV;
387 }
388
389 musb_platform_reg_init(musb);
383 390
384 if (is_host_enabled(musb)) { 391 if (is_host_enabled(musb)) {
385 musb->board_set_vbus = bfin_set_vbus; 392 musb->board_set_vbus = bfin_set_vbus;
@@ -394,6 +401,27 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
394 return 0; 401 return 0;
395} 402}
396 403
404#ifdef CONFIG_PM
405void musb_platform_save_context(struct musb *musb,
406 struct musb_context_registers *musb_context)
407{
408 if (is_host_active(musb))
409 /*
410 * During hibernate gpio_vrsel will change from high to low
411 * low which will generate wakeup event resume the system
412 * immediately. Set it to 0 before hibernate to avoid this
413 * wakeup event.
414 */
415 gpio_set_value(musb->config->gpio_vrsel, 0);
416}
417
418void musb_platform_restore_context(struct musb *musb,
419 struct musb_context_registers *musb_context)
420{
421 musb_platform_reg_init(musb);
422}
423#endif
424
397int musb_platform_exit(struct musb *musb) 425int musb_platform_exit(struct musb *musb)
398{ 426{
399 gpio_free(musb->config->gpio_vrsel); 427 gpio_free(musb->config->gpio_vrsel);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c9f9024c551..e6669fc3b80 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -552,7 +552,8 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
552 if (int_usb & MUSB_INTR_SESSREQ) { 552 if (int_usb & MUSB_INTR_SESSREQ) {
553 void __iomem *mbase = musb->mregs; 553 void __iomem *mbase = musb->mregs;
554 554
555 if (devctl & MUSB_DEVCTL_BDEVICE) { 555 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
556 && (devctl & MUSB_DEVCTL_BDEVICE)) {
556 DBG(3, "SessReq while on B state\n"); 557 DBG(3, "SessReq while on B state\n");
557 return IRQ_HANDLED; 558 return IRQ_HANDLED;
558 } 559 }
@@ -1052,6 +1053,11 @@ static void musb_shutdown(struct platform_device *pdev)
1052 clk_put(musb->clock); 1053 clk_put(musb->clock);
1053 spin_unlock_irqrestore(&musb->lock, flags); 1054 spin_unlock_irqrestore(&musb->lock, flags);
1054 1055
1056 if (!is_otg_enabled(musb) && is_host_enabled(musb))
1057 usb_remove_hcd(musb_to_hcd(musb));
1058 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
1059 musb_platform_exit(musb);
1060
1055 /* FIXME power down */ 1061 /* FIXME power down */
1056} 1062}
1057 1063
@@ -2244,13 +2250,6 @@ static int __exit musb_remove(struct platform_device *pdev)
2244 */ 2250 */
2245 musb_exit_debugfs(musb); 2251 musb_exit_debugfs(musb);
2246 musb_shutdown(pdev); 2252 musb_shutdown(pdev);
2247#ifdef CONFIG_USB_MUSB_HDRC_HCD
2248 if (musb->board_mode == MUSB_HOST)
2249 usb_remove_hcd(musb_to_hcd(musb));
2250#endif
2251 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2252 musb_platform_exit(musb);
2253 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2254 2253
2255 musb_free(musb); 2254 musb_free(musb);
2256 iounmap(ctrl_base); 2255 iounmap(ctrl_base);
@@ -2411,9 +2410,6 @@ static int musb_suspend(struct device *dev)
2411 unsigned long flags; 2410 unsigned long flags;
2412 struct musb *musb = dev_to_musb(&pdev->dev); 2411 struct musb *musb = dev_to_musb(&pdev->dev);
2413 2412
2414 if (!musb->clock)
2415 return 0;
2416
2417 spin_lock_irqsave(&musb->lock, flags); 2413 spin_lock_irqsave(&musb->lock, flags);
2418 2414
2419 if (is_peripheral_active(musb)) { 2415 if (is_peripheral_active(musb)) {
@@ -2428,10 +2424,12 @@ static int musb_suspend(struct device *dev)
2428 2424
2429 musb_save_context(musb); 2425 musb_save_context(musb);
2430 2426
2431 if (musb->set_clock) 2427 if (musb->clock) {
2432 musb->set_clock(musb->clock, 0); 2428 if (musb->set_clock)
2433 else 2429 musb->set_clock(musb->clock, 0);
2434 clk_disable(musb->clock); 2430 else
2431 clk_disable(musb->clock);
2432 }
2435 spin_unlock_irqrestore(&musb->lock, flags); 2433 spin_unlock_irqrestore(&musb->lock, flags);
2436 return 0; 2434 return 0;
2437} 2435}
@@ -2441,13 +2439,12 @@ static int musb_resume_noirq(struct device *dev)
2441 struct platform_device *pdev = to_platform_device(dev); 2439 struct platform_device *pdev = to_platform_device(dev);
2442 struct musb *musb = dev_to_musb(&pdev->dev); 2440 struct musb *musb = dev_to_musb(&pdev->dev);
2443 2441
2444 if (!musb->clock) 2442 if (musb->clock) {
2445 return 0; 2443 if (musb->set_clock)
2446 2444 musb->set_clock(musb->clock, 1);
2447 if (musb->set_clock) 2445 else
2448 musb->set_clock(musb->clock, 1); 2446 clk_enable(musb->clock);
2449 else 2447 }
2450 clk_enable(musb->clock);
2451 2448
2452 musb_restore_context(musb); 2449 musb_restore_context(musb);
2453 2450
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 69797e5b46a..febaabcc2b3 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -487,7 +487,7 @@ struct musb_context_registers {
487}; 487};
488 488
489#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ 489#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
490 defined(CONFIG_ARCH_OMAP4) 490 defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_BLACKFIN)
491extern void musb_platform_save_context(struct musb *musb, 491extern void musb_platform_save_context(struct musb *musb,
492 struct musb_context_registers *musb_context); 492 struct musb_context_registers *musb_context);
493extern void musb_platform_restore_context(struct musb *musb, 493extern void musb_platform_restore_context(struct musb *musb,
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 5d815049cba..36cfd060dbe 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -644,10 +644,8 @@ static void rxstate(struct musb *musb, struct musb_request *req)
644 */ 644 */
645 645
646 csr |= MUSB_RXCSR_DMAENAB; 646 csr |= MUSB_RXCSR_DMAENAB;
647 if (!musb_ep->hb_mult &&
648 musb_ep->hw_ep->rx_double_buffered)
649 csr |= MUSB_RXCSR_AUTOCLEAR;
650#ifdef USE_MODE1 647#ifdef USE_MODE1
648 csr |= MUSB_RXCSR_AUTOCLEAR;
651 /* csr |= MUSB_RXCSR_DMAMODE; */ 649 /* csr |= MUSB_RXCSR_DMAMODE; */
652 650
653 /* this special sequence (enabling and then 651 /* this special sequence (enabling and then
@@ -656,6 +654,10 @@ static void rxstate(struct musb *musb, struct musb_request *req)
656 */ 654 */
657 musb_writew(epio, MUSB_RXCSR, 655 musb_writew(epio, MUSB_RXCSR,
658 csr | MUSB_RXCSR_DMAMODE); 656 csr | MUSB_RXCSR_DMAMODE);
657#else
658 if (!musb_ep->hb_mult &&
659 musb_ep->hw_ep->rx_double_buffered)
660 csr |= MUSB_RXCSR_AUTOCLEAR;
659#endif 661#endif
660 musb_writew(epio, MUSB_RXCSR, csr); 662 musb_writew(epio, MUSB_RXCSR, csr);
661 663
@@ -807,7 +809,7 @@ void musb_g_rx(struct musb *musb, u8 epnum)
807 809
808#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) 810#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
809 /* Autoclear doesn't clear RxPktRdy for short packets */ 811 /* Autoclear doesn't clear RxPktRdy for short packets */
810 if ((dma->desired_mode == 0) 812 if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
811 || (dma->actual_len 813 || (dma->actual_len
812 & (musb_ep->packet_sz - 1))) { 814 & (musb_ep->packet_sz - 1))) {
813 /* ack the read! */ 815 /* ack the read! */
@@ -818,8 +820,16 @@ void musb_g_rx(struct musb *musb, u8 epnum)
818 /* incomplete, and not short? wait for next IN packet */ 820 /* incomplete, and not short? wait for next IN packet */
819 if ((request->actual < request->length) 821 if ((request->actual < request->length)
820 && (musb_ep->dma->actual_len 822 && (musb_ep->dma->actual_len
821 == musb_ep->packet_sz)) 823 == musb_ep->packet_sz)) {
824 /* In double buffer case, continue to unload fifo if
825 * there is Rx packet in FIFO.
826 **/
827 csr = musb_readw(epio, MUSB_RXCSR);
828 if ((csr & MUSB_RXCSR_RXPKTRDY) &&
829 hw_ep->rx_double_buffered)
830 goto exit;
822 return; 831 return;
832 }
823#endif 833#endif
824 musb_g_giveback(musb_ep, request, 0); 834 musb_g_giveback(musb_ep, request, 0);
825 835
@@ -827,7 +837,7 @@ void musb_g_rx(struct musb *musb, u8 epnum)
827 if (!request) 837 if (!request)
828 return; 838 return;
829 } 839 }
830 840exit:
831 /* Analyze request */ 841 /* Analyze request */
832 rxstate(musb, to_musb_request(request)); 842 rxstate(musb, to_musb_request(request));
833} 843}
@@ -916,13 +926,9 @@ static int musb_gadget_enable(struct usb_ep *ep,
916 * likewise high bandwidth periodic tx 926 * likewise high bandwidth periodic tx
917 */ 927 */
918 /* Set TXMAXP with the FIFO size of the endpoint 928 /* Set TXMAXP with the FIFO size of the endpoint
919 * to disable double buffering mode. Currently, It seems that double 929 * to disable double buffering mode.
920 * buffering has problem if musb RTL revision number < 2.0.
921 */ 930 */
922 if (musb->hwvers < MUSB_HWVERS_2000) 931 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
923 musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
924 else
925 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
926 932
927 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; 933 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
928 if (musb_readw(regs, MUSB_TXCSR) 934 if (musb_readw(regs, MUSB_TXCSR)
@@ -958,10 +964,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
958 /* Set RXMAXP with the FIFO size of the endpoint 964 /* Set RXMAXP with the FIFO size of the endpoint
959 * to disable double buffering mode. 965 * to disable double buffering mode.
960 */ 966 */
961 if (musb->hwvers < MUSB_HWVERS_2000) 967 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
962 musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_rx);
963 else
964 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
965 968
966 /* force shared fifo to OUT-only mode */ 969 /* force shared fifo to OUT-only mode */
967 if (hw_ep->is_shared_fifo) { 970 if (hw_ep->is_shared_fifo) {
@@ -1166,8 +1169,6 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1166 : DMA_FROM_DEVICE); 1169 : DMA_FROM_DEVICE);
1167 request->mapped = 0; 1170 request->mapped = 0;
1168 } 1171 }
1169 } else if (!req->buf) {
1170 return -ENODATA;
1171 } else 1172 } else
1172 request->mapped = 0; 1173 request->mapped = 0;
1173 1174
@@ -1695,8 +1696,10 @@ int __init musb_gadget_setup(struct musb *musb)
1695 musb_platform_try_idle(musb, 0); 1696 musb_platform_try_idle(musb, 0);
1696 1697
1697 status = device_register(&musb->g.dev); 1698 status = device_register(&musb->g.dev);
1698 if (status != 0) 1699 if (status != 0) {
1700 put_device(&musb->g.dev);
1699 the_gadget = NULL; 1701 the_gadget = NULL;
1702 }
1700 return status; 1703 return status;
1701} 1704}
1702 1705
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 244267527a6..5a727c5b867 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -633,8 +633,9 @@ static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum)
633 return 0; 633 return 0;
634} 634}
635 635
636static inline void musb_read_txhubport(void __iomem *mbase, u8 epnum) 636static inline u8 musb_read_txhubport(void __iomem *mbase, u8 epnum)
637{ 637{
638 return 0;
638} 639}
639 640
640#endif /* CONFIG_BLACKFIN */ 641#endif /* CONFIG_BLACKFIN */
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 6f771af5cbd..563114d613d 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -158,6 +158,8 @@ static int dma_channel_program(struct dma_channel *channel,
158 dma_addr_t dma_addr, u32 len) 158 dma_addr_t dma_addr, u32 len)
159{ 159{
160 struct musb_dma_channel *musb_channel = channel->private_data; 160 struct musb_dma_channel *musb_channel = channel->private_data;
161 struct musb_dma_controller *controller = musb_channel->controller;
162 struct musb *musb = controller->private_data;
161 163
162 DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n", 164 DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n",
163 musb_channel->epnum, 165 musb_channel->epnum,
@@ -167,6 +169,18 @@ static int dma_channel_program(struct dma_channel *channel,
167 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || 169 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
168 channel->status == MUSB_DMA_STATUS_BUSY); 170 channel->status == MUSB_DMA_STATUS_BUSY);
169 171
172 /*
173 * The DMA engine in RTL1.8 and above cannot handle
174 * DMA addresses that are not aligned to a 4 byte boundary.
175 * It ends up masking the last two bits of the address
176 * programmed in DMA_ADDR.
177 *
178 * Fail such DMA transfers, so that the backup PIO mode
179 * can carry out the transfer
180 */
181 if ((musb->hwvers >= MUSB_HWVERS_1800) && (dma_addr % 4))
182 return false;
183
170 channel->actual_len = 0; 184 channel->actual_len = 0;
171 musb_channel->start_addr = dma_addr; 185 musb_channel->start_addr = dma_addr;
172 musb_channel->len = len; 186 musb_channel->len = len;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 89a9a584780..76f8b355667 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -794,6 +794,8 @@ static struct usb_device_id id_table_combined [] = {
794 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) }, 794 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) },
795 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) }, 795 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
796 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) }, 796 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
797 { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
798 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
797 { }, /* Optional parameter entry */ 799 { }, /* Optional parameter entry */
798 { } /* Terminating entry */ 800 { } /* Terminating entry */
799}; 801};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 7dfe02f1fb6..263f6255119 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1100,3 +1100,10 @@
1100#define FTDI_SCIENCESCOPE_LOGBOOKML_PID 0xFF18 1100#define FTDI_SCIENCESCOPE_LOGBOOKML_PID 0xFF18
1101#define FTDI_SCIENCESCOPE_LS_LOGBOOK_PID 0xFF1C 1101#define FTDI_SCIENCESCOPE_LS_LOGBOOK_PID 0xFF1C
1102#define FTDI_SCIENCESCOPE_HS_LOGBOOK_PID 0xFF1D 1102#define FTDI_SCIENCESCOPE_HS_LOGBOOK_PID 0xFF1D
1103
1104/*
1105 * Milkymist One JTAG/Serial
1106 */
1107#define QIHARDWARE_VID 0x20B7
1108#define MILKYMISTONE_JTAGSERIAL_PID 0x0713
1109
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2297fb1bcf6..ef2977d3a61 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -518,7 +518,7 @@ static const struct usb_device_id option_ids[] = {
518 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) }, 518 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) },
519 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, 519 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
520 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, 520 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
521 { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) }, 521 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
522 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, 522 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
523 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, 523 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
524 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, 524 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 2054b1e25a6..d1268191acb 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -331,10 +331,7 @@ static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
331 331
332 iu->iu_id = IU_ID_COMMAND; 332 iu->iu_id = IU_ID_COMMAND;
333 iu->tag = cpu_to_be16(stream_id); 333 iu->tag = cpu_to_be16(stream_id);
334 if (sdev->ordered_tags && (cmnd->request->cmd_flags & REQ_HARDBARRIER)) 334 iu->prio_attr = UAS_SIMPLE_TAG;
335 iu->prio_attr = UAS_ORDERED_TAG;
336 else
337 iu->prio_attr = UAS_SIMPLE_TAG;
338 iu->len = len; 335 iu->len = len;
339 int_to_scsilun(sdev->lun, &iu->lun); 336 int_to_scsilun(sdev->lun, &iu->lun);
340 memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len); 337 memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
diff --git a/drivers/uwb/allocator.c b/drivers/uwb/allocator.c
index 436e4f7110c..e45e673b877 100644
--- a/drivers/uwb/allocator.c
+++ b/drivers/uwb/allocator.c
@@ -326,7 +326,8 @@ int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *availab
326 int bit_index; 326 int bit_index;
327 327
328 ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL); 328 ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL);
329 329 if (!ai)
330 return UWB_RSV_ALLOC_NOT_FOUND;
330 ai->min_mas = rsv->min_mas; 331 ai->min_mas = rsv->min_mas;
331 ai->max_mas = rsv->max_mas; 332 ai->max_mas = rsv->max_mas;
332 ai->max_interval = rsv->max_interval; 333 ai->max_interval = rsv->max_interval;
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 3ec24609151..734c650a47c 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -502,8 +502,10 @@ static ssize_t adp8860_bl_l1_daylight_max_store(struct device *dev,
502 struct device_attribute *attr, const char *buf, size_t count) 502 struct device_attribute *attr, const char *buf, size_t count)
503{ 503{
504 struct adp8860_bl *data = dev_get_drvdata(dev); 504 struct adp8860_bl *data = dev_get_drvdata(dev);
505 int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
506 if (ret)
507 return ret;
505 508
506 strict_strtoul(buf, 10, &data->cached_daylight_max);
507 return adp8860_store(dev, buf, count, ADP8860_BLMX1); 509 return adp8860_store(dev, buf, count, ADP8860_BLMX1);
508} 510}
509static DEVICE_ATTR(l1_daylight_max, 0664, adp8860_bl_l1_daylight_max_show, 511static DEVICE_ATTR(l1_daylight_max, 0664, adp8860_bl_l1_daylight_max_show,
@@ -614,7 +616,7 @@ static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev,
614 if (val == 0) { 616 if (val == 0) {
615 /* Enable automatic ambient light sensing */ 617 /* Enable automatic ambient light sensing */
616 adp8860_set_bits(data->client, ADP8860_MDCR, CMP_AUTOEN); 618 adp8860_set_bits(data->client, ADP8860_MDCR, CMP_AUTOEN);
617 } else if ((val > 0) && (val < 6)) { 619 } else if ((val > 0) && (val <= 3)) {
618 /* Disable automatic ambient light sensing */ 620 /* Disable automatic ambient light sensing */
619 adp8860_clr_bits(data->client, ADP8860_MDCR, CMP_AUTOEN); 621 adp8860_clr_bits(data->client, ADP8860_MDCR, CMP_AUTOEN);
620 622
@@ -622,7 +624,7 @@ static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev,
622 mutex_lock(&data->lock); 624 mutex_lock(&data->lock);
623 adp8860_read(data->client, ADP8860_CFGR, &reg_val); 625 adp8860_read(data->client, ADP8860_CFGR, &reg_val);
624 reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT); 626 reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT);
625 reg_val |= val << CFGR_BLV_SHIFT; 627 reg_val |= (val - 1) << CFGR_BLV_SHIFT;
626 adp8860_write(data->client, ADP8860_CFGR, reg_val); 628 adp8860_write(data->client, ADP8860_CFGR, reg_val);
627 mutex_unlock(&data->lock); 629 mutex_unlock(&data->lock);
628 } 630 }
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 9093ef0fa86..c67801e57aa 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -78,7 +78,7 @@ static int l4f00242t03_lcd_power_set(struct lcd_device *ld, int power)
78 const u16 slpin = 0x10; 78 const u16 slpin = 0x10;
79 const u16 disoff = 0x28; 79 const u16 disoff = 0x28;
80 80
81 if (power) { 81 if (power <= FB_BLANK_NORMAL) {
82 if (priv->lcd_on) 82 if (priv->lcd_on)
83 return 0; 83 return 0;
84 84
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
index abc43a0eb97..5d3cf33953a 100644
--- a/drivers/video/backlight/lms283gf05.c
+++ b/drivers/video/backlight/lms283gf05.c
@@ -129,7 +129,7 @@ static int lms283gf05_power_set(struct lcd_device *ld, int power)
129 struct spi_device *spi = st->spi; 129 struct spi_device *spi = st->spi;
130 struct lms283gf05_pdata *pdata = spi->dev.platform_data; 130 struct lms283gf05_pdata *pdata = spi->dev.platform_data;
131 131
132 if (power) { 132 if (power <= FB_BLANK_NORMAL) {
133 if (pdata) 133 if (pdata)
134 lms283gf05_reset(pdata->reset_gpio, 134 lms283gf05_reset(pdata->reset_gpio,
135 pdata->reset_inverted); 135 pdata->reset_inverted);
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
index 9fb533f6373..1485f7345f4 100644
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -335,6 +335,24 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
335 }, 335 },
336 .driver_data = (void *)&nvidia_chipset_data, 336 .driver_data = (void *)&nvidia_chipset_data,
337 }, 337 },
338 {
339 .callback = mbp_dmi_match,
340 .ident = "MacBookAir 3,1",
341 .matches = {
342 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
343 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,1"),
344 },
345 .driver_data = (void *)&nvidia_chipset_data,
346 },
347 {
348 .callback = mbp_dmi_match,
349 .ident = "MacBookAir 3,2",
350 .matches = {
351 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
352 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,2"),
353 },
354 .driver_data = (void *)&nvidia_chipset_data,
355 },
338 { } 356 { }
339}; 357};
340 358
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 55044351889..21866ec6965 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -25,6 +25,7 @@ struct pwm_bl_data {
25 struct pwm_device *pwm; 25 struct pwm_device *pwm;
26 struct device *dev; 26 struct device *dev;
27 unsigned int period; 27 unsigned int period;
28 unsigned int lth_brightness;
28 int (*notify)(struct device *, 29 int (*notify)(struct device *,
29 int brightness); 30 int brightness);
30}; 31};
@@ -48,7 +49,9 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
48 pwm_config(pb->pwm, 0, pb->period); 49 pwm_config(pb->pwm, 0, pb->period);
49 pwm_disable(pb->pwm); 50 pwm_disable(pb->pwm);
50 } else { 51 } else {
51 pwm_config(pb->pwm, brightness * pb->period / max, pb->period); 52 brightness = pb->lth_brightness +
53 (brightness * (pb->period - pb->lth_brightness) / max);
54 pwm_config(pb->pwm, brightness, pb->period);
52 pwm_enable(pb->pwm); 55 pwm_enable(pb->pwm);
53 } 56 }
54 return 0; 57 return 0;
@@ -92,6 +95,8 @@ static int pwm_backlight_probe(struct platform_device *pdev)
92 95
93 pb->period = data->pwm_period_ns; 96 pb->period = data->pwm_period_ns;
94 pb->notify = data->notify; 97 pb->notify = data->notify;
98 pb->lth_brightness = data->lth_brightness *
99 (data->pwm_period_ns / data->max_brightness);
95 pb->dev = &pdev->dev; 100 pb->dev = &pdev->dev;
96 101
97 pb->pwm = pwm_request(data->pwm_id, "backlight"); 102 pb->pwm = pwm_request(data->pwm_id, "backlight");
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index a3128c9cb7a..5927db0da99 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -729,10 +729,10 @@ static ssize_t s6e63m0_sysfs_show_gamma_table(struct device *dev,
729 729
730 return strlen(buf); 730 return strlen(buf);
731} 731}
732static DEVICE_ATTR(gamma_table, 0644, 732static DEVICE_ATTR(gamma_table, 0444,
733 s6e63m0_sysfs_show_gamma_table, NULL); 733 s6e63m0_sysfs_show_gamma_table, NULL);
734 734
735static int __init s6e63m0_probe(struct spi_device *spi) 735static int __devinit s6e63m0_probe(struct spi_device *spi)
736{ 736{
737 int ret = 0; 737 int ret = 0;
738 struct s6e63m0 *lcd = NULL; 738 struct s6e63m0 *lcd = NULL;
@@ -829,6 +829,9 @@ static int __devexit s6e63m0_remove(struct spi_device *spi)
829 struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev); 829 struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev);
830 830
831 s6e63m0_power(lcd, FB_BLANK_POWERDOWN); 831 s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
832 device_remove_file(&spi->dev, &dev_attr_gamma_table);
833 device_remove_file(&spi->dev, &dev_attr_gamma_mode);
834 backlight_device_unregister(lcd->bd);
832 lcd_device_unregister(lcd->ld); 835 lcd_device_unregister(lcd->ld);
833 kfree(lcd); 836 kfree(lcd);
834 837
diff --git a/drivers/video/omap2/vram.c b/drivers/video/omap2/vram.c
index fed2a72bc6b..2fd7e5271be 100644
--- a/drivers/video/omap2/vram.c
+++ b/drivers/video/omap2/vram.c
@@ -554,9 +554,15 @@ void __init omap_vram_reserve_sdram_memblock(void)
554 size = PAGE_ALIGN(size); 554 size = PAGE_ALIGN(size);
555 555
556 if (paddr) { 556 if (paddr) {
557 if ((paddr & ~PAGE_MASK) || 557 if (paddr & ~PAGE_MASK) {
558 !memblock_is_region_memory(paddr, size)) { 558 pr_err("VRAM start address 0x%08x not page aligned\n",
559 pr_err("Illegal SDRAM region for VRAM\n"); 559 paddr);
560 return;
561 }
562
563 if (!memblock_is_region_memory(paddr, size)) {
564 pr_err("Illegal SDRAM region 0x%08x..0x%08x for VRAM\n",
565 paddr, paddr + size - 1);
560 return; 566 return;
561 } 567 }
562 568
@@ -570,9 +576,12 @@ void __init omap_vram_reserve_sdram_memblock(void)
570 return; 576 return;
571 } 577 }
572 } else { 578 } else {
573 paddr = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_REAL_LIMIT); 579 paddr = memblock_alloc(size, PAGE_SIZE);
574 } 580 }
575 581
582 memblock_free(paddr, size);
583 memblock_remove(paddr, size);
584
576 omap_vram_add_region(paddr, size); 585 omap_vram_add_region(paddr, size);
577 586
578 pr_info("Reserving %u bytes SDRAM for VRAM\n", size); 587 pr_info("Reserving %u bytes SDRAM for VRAM\n", size);
diff --git a/drivers/video/riva/rivafb-i2c.c b/drivers/video/riva/rivafb-i2c.c
index a0e22ac483a..167400e2a18 100644
--- a/drivers/video/riva/rivafb-i2c.c
+++ b/drivers/video/riva/rivafb-i2c.c
@@ -94,7 +94,6 @@ static int __devinit riva_setup_i2c_bus(struct riva_i2c_chan *chan,
94 94
95 strcpy(chan->adapter.name, name); 95 strcpy(chan->adapter.name, name);
96 chan->adapter.owner = THIS_MODULE; 96 chan->adapter.owner = THIS_MODULE;
97 chan->adapter.id = I2C_HW_B_RIVA;
98 chan->adapter.class = i2c_class; 97 chan->adapter.class = i2c_class;
99 chan->adapter.algo_data = &chan->algo; 98 chan->adapter.algo_data = &chan->algo;
100 chan->adapter.dev.parent = &chan->par->pdev->dev; 99 chan->adapter.dev.parent = &chan->par->pdev->dev;
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
index 55b3077ff6f..d7df10315d8 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -1071,6 +1071,10 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
1071 if (!hdmi->info) 1071 if (!hdmi->info)
1072 goto out; 1072 goto out;
1073 1073
1074 hdmi->monspec.modedb_len = 0;
1075 fb_destroy_modedb(hdmi->monspec.modedb);
1076 hdmi->monspec.modedb = NULL;
1077
1074 acquire_console_sem(); 1078 acquire_console_sem();
1075 1079
1076 /* HDMI disconnect */ 1080 /* HDMI disconnect */
@@ -1078,7 +1082,6 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
1078 1082
1079 release_console_sem(); 1083 release_console_sem();
1080 pm_runtime_put(hdmi->dev); 1084 pm_runtime_put(hdmi->dev);
1081 fb_destroy_modedb(hdmi->monspec.modedb);
1082 } 1085 }
1083 1086
1084out: 1087out:
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 50963739a40..9b1364723c6 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -115,15 +115,16 @@ static const struct fb_videomode default_720p = {
115 .xres = 1280, 115 .xres = 1280,
116 .yres = 720, 116 .yres = 720,
117 117
118 .left_margin = 200, 118 .left_margin = 220,
119 .right_margin = 88, 119 .right_margin = 110,
120 .hsync_len = 48, 120 .hsync_len = 40,
121 121
122 .upper_margin = 20, 122 .upper_margin = 20,
123 .lower_margin = 5, 123 .lower_margin = 5,
124 .vsync_len = 5, 124 .vsync_len = 5,
125 125
126 .pixclock = 13468, 126 .pixclock = 13468,
127 .refresh = 60,
127 .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT, 128 .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT,
128}; 129};
129 130
@@ -1197,6 +1198,7 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
1197 const struct fb_videomode *mode = cfg->lcd_cfg; 1198 const struct fb_videomode *mode = cfg->lcd_cfg;
1198 unsigned long max_size = 0; 1199 unsigned long max_size = 0;
1199 int k; 1200 int k;
1201 int num_cfg;
1200 1202
1201 ch->info = framebuffer_alloc(0, &pdev->dev); 1203 ch->info = framebuffer_alloc(0, &pdev->dev);
1202 if (!ch->info) { 1204 if (!ch->info) {
@@ -1232,8 +1234,14 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
1232 info->fix = sh_mobile_lcdc_fix; 1234 info->fix = sh_mobile_lcdc_fix;
1233 info->fix.smem_len = max_size * (cfg->bpp / 8) * 2; 1235 info->fix.smem_len = max_size * (cfg->bpp / 8) * 2;
1234 1236
1235 if (!mode) 1237 if (!mode) {
1236 mode = &default_720p; 1238 mode = &default_720p;
1239 num_cfg = 1;
1240 } else {
1241 num_cfg = ch->cfg.num_cfg;
1242 }
1243
1244 fb_videomode_to_modelist(mode, num_cfg, &info->modelist);
1237 1245
1238 fb_videomode_to_var(var, mode); 1246 fb_videomode_to_var(var, mode);
1239 /* Default Y virtual resolution is 2x panel size */ 1247 /* Default Y virtual resolution is 2x panel size */
@@ -1281,10 +1289,6 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
1281 1289
1282 for (i = 0; i < j; i++) { 1290 for (i = 0; i < j; i++) {
1283 struct sh_mobile_lcdc_chan *ch = priv->ch + i; 1291 struct sh_mobile_lcdc_chan *ch = priv->ch + i;
1284 const struct fb_videomode *mode = ch->cfg.lcd_cfg;
1285
1286 if (!mode)
1287 mode = &default_720p;
1288 1292
1289 info = ch->info; 1293 info = ch->info;
1290 1294
@@ -1297,7 +1301,6 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
1297 } 1301 }
1298 } 1302 }
1299 1303
1300 fb_videomode_to_modelist(mode, ch->cfg.num_cfg, &info->modelist);
1301 error = register_framebuffer(info); 1304 error = register_framebuffer(info);
1302 if (error < 0) 1305 if (error < 0)
1303 goto err1; 1306 goto err1;
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index b52f8e4ef1f..3dde12b0ab0 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -4181,6 +4181,9 @@ static void __devinit
4181sisfb_post_map_vram(struct sis_video_info *ivideo, unsigned int *mapsize, 4181sisfb_post_map_vram(struct sis_video_info *ivideo, unsigned int *mapsize,
4182 unsigned int min) 4182 unsigned int min)
4183{ 4183{
4184 if (*mapsize < (min << 20))
4185 return;
4186
4184 ivideo->video_vbase = ioremap(ivideo->video_base, (*mapsize)); 4187 ivideo->video_vbase = ioremap(ivideo->video_base, (*mapsize));
4185 4188
4186 if(!ivideo->video_vbase) { 4189 if(!ivideo->video_vbase) {
@@ -4514,7 +4517,7 @@ sisfb_post_sis300(struct pci_dev *pdev)
4514 } else { 4517 } else {
4515#endif 4518#endif
4516 /* Need to map max FB size for finding out about RAM size */ 4519 /* Need to map max FB size for finding out about RAM size */
4517 mapsize = 64 << 20; 4520 mapsize = ivideo->video_size;
4518 sisfb_post_map_vram(ivideo, &mapsize, 4); 4521 sisfb_post_map_vram(ivideo, &mapsize, 4);
4519 4522
4520 if(ivideo->video_vbase) { 4523 if(ivideo->video_vbase) {
@@ -4680,7 +4683,7 @@ sisfb_post_xgi_ramsize(struct sis_video_info *ivideo)
4680 orSISIDXREG(SISSR, 0x20, (0x80 | 0x04)); 4683 orSISIDXREG(SISSR, 0x20, (0x80 | 0x04));
4681 4684
4682 /* Need to map max FB size for finding out about RAM size */ 4685 /* Need to map max FB size for finding out about RAM size */
4683 mapsize = 256 << 20; 4686 mapsize = ivideo->video_size;
4684 sisfb_post_map_vram(ivideo, &mapsize, 32); 4687 sisfb_post_map_vram(ivideo, &mapsize, 32);
4685 4688
4686 if(!ivideo->video_vbase) { 4689 if(!ivideo->video_vbase) {
@@ -5936,6 +5939,7 @@ sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5936 } 5939 }
5937 5940
5938 ivideo->video_base = pci_resource_start(pdev, 0); 5941 ivideo->video_base = pci_resource_start(pdev, 0);
5942 ivideo->video_size = pci_resource_len(pdev, 0);
5939 ivideo->mmio_base = pci_resource_start(pdev, 1); 5943 ivideo->mmio_base = pci_resource_start(pdev, 1);
5940 ivideo->mmio_size = pci_resource_len(pdev, 1); 5944 ivideo->mmio_size = pci_resource_len(pdev, 1);
5941 ivideo->SiS_Pr.RelIO = pci_resource_start(pdev, 2) + 0x30; 5945 ivideo->SiS_Pr.RelIO = pci_resource_start(pdev, 2) + 0x30;
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 97612f548a8..321a0c8346e 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -1299,9 +1299,6 @@ static void restore_cpu_virqs(unsigned int cpu)
1299 evtchn_to_irq[evtchn] = irq; 1299 evtchn_to_irq[evtchn] = irq;
1300 irq_info[irq] = mk_virq_info(evtchn, virq); 1300 irq_info[irq] = mk_virq_info(evtchn, virq);
1301 bind_evtchn_to_cpu(evtchn, cpu); 1301 bind_evtchn_to_cpu(evtchn, cpu);
1302
1303 /* Ready for use. */
1304 unmask_evtchn(evtchn);
1305 } 1302 }
1306} 1303}
1307 1304
@@ -1327,10 +1324,6 @@ static void restore_cpu_ipis(unsigned int cpu)
1327 evtchn_to_irq[evtchn] = irq; 1324 evtchn_to_irq[evtchn] = irq;
1328 irq_info[irq] = mk_ipi_info(evtchn, ipi); 1325 irq_info[irq] = mk_ipi_info(evtchn, ipi);
1329 bind_evtchn_to_cpu(evtchn, cpu); 1326 bind_evtchn_to_cpu(evtchn, cpu);
1330
1331 /* Ready for use. */
1332 unmask_evtchn(evtchn);
1333
1334 } 1327 }
1335} 1328}
1336 1329
@@ -1390,6 +1383,7 @@ void xen_poll_irq(int irq)
1390void xen_irq_resume(void) 1383void xen_irq_resume(void)
1391{ 1384{
1392 unsigned int cpu, irq, evtchn; 1385 unsigned int cpu, irq, evtchn;
1386 struct irq_desc *desc;
1393 1387
1394 init_evtchn_cpu_bindings(); 1388 init_evtchn_cpu_bindings();
1395 1389
@@ -1408,6 +1402,23 @@ void xen_irq_resume(void)
1408 restore_cpu_virqs(cpu); 1402 restore_cpu_virqs(cpu);
1409 restore_cpu_ipis(cpu); 1403 restore_cpu_ipis(cpu);
1410 } 1404 }
1405
1406 /*
1407 * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These
1408 * are not handled by the IRQ core.
1409 */
1410 for_each_irq_desc(irq, desc) {
1411 if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND))
1412 continue;
1413 if (desc->status & IRQ_DISABLED)
1414 continue;
1415
1416 evtchn = evtchn_from_irq(irq);
1417 if (evtchn == -1)
1418 continue;
1419
1420 unmask_evtchn(evtchn);
1421 }
1411} 1422}
1412 1423
1413static struct irq_chip xen_dynamic_chip __read_mostly = { 1424static struct irq_chip xen_dynamic_chip __read_mostly = {
diff --git a/fs/bio.c b/fs/bio.c
index 8abb2dfb2e7..4bd454fa844 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -370,6 +370,9 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
370{ 370{
371 struct bio *bio; 371 struct bio *bio;
372 372
373 if (nr_iovecs > UIO_MAXIOV)
374 return NULL;
375
373 bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec), 376 bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
374 gfp_mask); 377 gfp_mask);
375 if (unlikely(!bio)) 378 if (unlikely(!bio))
@@ -697,8 +700,12 @@ static void bio_free_map_data(struct bio_map_data *bmd)
697static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count, 700static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
698 gfp_t gfp_mask) 701 gfp_t gfp_mask)
699{ 702{
700 struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask); 703 struct bio_map_data *bmd;
701 704
705 if (iov_count > UIO_MAXIOV)
706 return NULL;
707
708 bmd = kmalloc(sizeof(*bmd), gfp_mask);
702 if (!bmd) 709 if (!bmd)
703 return NULL; 710 return NULL;
704 711
@@ -827,6 +834,12 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
827 end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; 834 end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
828 start = uaddr >> PAGE_SHIFT; 835 start = uaddr >> PAGE_SHIFT;
829 836
837 /*
838 * Overflow, abort
839 */
840 if (end < start)
841 return ERR_PTR(-EINVAL);
842
830 nr_pages += end - start; 843 nr_pages += end - start;
831 len += iov[i].iov_len; 844 len += iov[i].iov_len;
832 } 845 }
@@ -955,6 +968,12 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
955 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 968 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
956 unsigned long start = uaddr >> PAGE_SHIFT; 969 unsigned long start = uaddr >> PAGE_SHIFT;
957 970
971 /*
972 * Overflow, abort
973 */
974 if (end < start)
975 return ERR_PTR(-EINVAL);
976
958 nr_pages += end - start; 977 nr_pages += end - start;
959 /* 978 /*
960 * buffer must be aligned to at least hardsector size for now 979 * buffer must be aligned to at least hardsector size for now
@@ -982,7 +1001,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
982 unsigned long start = uaddr >> PAGE_SHIFT; 1001 unsigned long start = uaddr >> PAGE_SHIFT;
983 const int local_nr_pages = end - start; 1002 const int local_nr_pages = end - start;
984 const int page_limit = cur_page + local_nr_pages; 1003 const int page_limit = cur_page + local_nr_pages;
985 1004
986 ret = get_user_pages_fast(uaddr, local_nr_pages, 1005 ret = get_user_pages_fast(uaddr, local_nr_pages,
987 write_to_vm, &pages[cur_page]); 1006 write_to_vm, &pages[cur_page]);
988 if (ret < local_nr_pages) { 1007 if (ret < local_nr_pages) {
diff --git a/fs/cifs/TODO b/fs/cifs/TODO
index 5aff46c61e5..355abcdcda9 100644
--- a/fs/cifs/TODO
+++ b/fs/cifs/TODO
@@ -81,7 +81,7 @@ u) DOS attrs - returned as pseudo-xattr in Samba format (check VFAT and NTFS for
81 81
82v) mount check for unmatched uids 82v) mount check for unmatched uids
83 83
84w) Add support for new vfs entry points for setlease and fallocate 84w) Add support for new vfs entry point for fallocate
85 85
86x) Fix Samba 3 server to handle Linux kernel aio so dbench with lots of 86x) Fix Samba 3 server to handle Linux kernel aio so dbench with lots of
87processes can proceed better in parallel (on the server) 87processes can proceed better in parallel (on the server)
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 525ba59a410..e9a393c9c2c 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -15,7 +15,7 @@
15 * the GNU Lesser General Public License for more details. 15 * the GNU Lesser General Public License for more details.
16 * 16 *
17 */ 17 */
18#include <linux/radix-tree.h> 18#include <linux/rbtree.h>
19 19
20#ifndef _CIFS_FS_SB_H 20#ifndef _CIFS_FS_SB_H
21#define _CIFS_FS_SB_H 21#define _CIFS_FS_SB_H
@@ -42,9 +42,9 @@
42#define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */ 42#define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */
43 43
44struct cifs_sb_info { 44struct cifs_sb_info {
45 struct radix_tree_root tlink_tree; 45 struct rb_root tlink_tree;
46#define CIFS_TLINK_MASTER_TAG 0 /* is "master" (mount) tcon */
47 spinlock_t tlink_tree_lock; 46 spinlock_t tlink_tree_lock;
47 struct tcon_link *master_tlink;
48 struct nls_table *local_nls; 48 struct nls_table *local_nls;
49 unsigned int rsize; 49 unsigned int rsize;
50 unsigned int wsize; 50 unsigned int wsize;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 75c4eaa7958..9c3789762ab 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -116,7 +116,7 @@ cifs_read_super(struct super_block *sb, void *data,
116 return -ENOMEM; 116 return -ENOMEM;
117 117
118 spin_lock_init(&cifs_sb->tlink_tree_lock); 118 spin_lock_init(&cifs_sb->tlink_tree_lock);
119 INIT_RADIX_TREE(&cifs_sb->tlink_tree, GFP_KERNEL); 119 cifs_sb->tlink_tree = RB_ROOT;
120 120
121 rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); 121 rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
122 if (rc) { 122 if (rc) {
@@ -321,8 +321,7 @@ cifs_alloc_inode(struct super_block *sb)
321 /* Until the file is open and we have gotten oplock 321 /* Until the file is open and we have gotten oplock
322 info back from the server, can not assume caching of 322 info back from the server, can not assume caching of
323 file data or metadata */ 323 file data or metadata */
324 cifs_inode->clientCanCacheRead = false; 324 cifs_set_oplock_level(cifs_inode, 0);
325 cifs_inode->clientCanCacheAll = false;
326 cifs_inode->delete_pending = false; 325 cifs_inode->delete_pending = false;
327 cifs_inode->invalid_mapping = false; 326 cifs_inode->invalid_mapping = false;
328 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ 327 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index f259e4d7612..b577bf0a1bb 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -336,7 +336,8 @@ struct cifsTconInfo {
336 * "get" on the container. 336 * "get" on the container.
337 */ 337 */
338struct tcon_link { 338struct tcon_link {
339 unsigned long tl_index; 339 struct rb_node tl_rbnode;
340 uid_t tl_uid;
340 unsigned long tl_flags; 341 unsigned long tl_flags;
341#define TCON_LINK_MASTER 0 342#define TCON_LINK_MASTER 0
342#define TCON_LINK_PENDING 1 343#define TCON_LINK_PENDING 1
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index edb6d90efdf..7ed69b6b5fe 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -104,6 +104,7 @@ extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
104extern u64 cifs_UnixTimeToNT(struct timespec); 104extern u64 cifs_UnixTimeToNT(struct timespec);
105extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, 105extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
106 int offset); 106 int offset);
107extern void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock);
107 108
108extern struct cifsFileInfo *cifs_new_fileinfo(__u16 fileHandle, 109extern struct cifsFileInfo *cifs_new_fileinfo(__u16 fileHandle,
109 struct file *file, struct tcon_link *tlink, 110 struct file *file, struct tcon_link *tlink,
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 9eb327defa1..251a17c0354 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -116,6 +116,7 @@ struct smb_vol {
116 116
117static int ipv4_connect(struct TCP_Server_Info *server); 117static int ipv4_connect(struct TCP_Server_Info *server);
118static int ipv6_connect(struct TCP_Server_Info *server); 118static int ipv6_connect(struct TCP_Server_Info *server);
119static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
119static void cifs_prune_tlinks(struct work_struct *work); 120static void cifs_prune_tlinks(struct work_struct *work);
120 121
121/* 122/*
@@ -2900,24 +2901,16 @@ remote_path_check:
2900 goto mount_fail_check; 2901 goto mount_fail_check;
2901 } 2902 }
2902 2903
2903 tlink->tl_index = pSesInfo->linux_uid; 2904 tlink->tl_uid = pSesInfo->linux_uid;
2904 tlink->tl_tcon = tcon; 2905 tlink->tl_tcon = tcon;
2905 tlink->tl_time = jiffies; 2906 tlink->tl_time = jiffies;
2906 set_bit(TCON_LINK_MASTER, &tlink->tl_flags); 2907 set_bit(TCON_LINK_MASTER, &tlink->tl_flags);
2907 set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); 2908 set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
2908 2909
2909 rc = radix_tree_preload(GFP_KERNEL); 2910 cifs_sb->master_tlink = tlink;
2910 if (rc == -ENOMEM) {
2911 kfree(tlink);
2912 goto mount_fail_check;
2913 }
2914
2915 spin_lock(&cifs_sb->tlink_tree_lock); 2911 spin_lock(&cifs_sb->tlink_tree_lock);
2916 radix_tree_insert(&cifs_sb->tlink_tree, pSesInfo->linux_uid, tlink); 2912 tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
2917 radix_tree_tag_set(&cifs_sb->tlink_tree, pSesInfo->linux_uid,
2918 CIFS_TLINK_MASTER_TAG);
2919 spin_unlock(&cifs_sb->tlink_tree_lock); 2913 spin_unlock(&cifs_sb->tlink_tree_lock);
2920 radix_tree_preload_end();
2921 2914
2922 queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks, 2915 queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
2923 TLINK_IDLE_EXPIRE); 2916 TLINK_IDLE_EXPIRE);
@@ -3107,32 +3100,25 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
3107int 3100int
3108cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb) 3101cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
3109{ 3102{
3110 int i, ret; 3103 struct rb_root *root = &cifs_sb->tlink_tree;
3104 struct rb_node *node;
3105 struct tcon_link *tlink;
3111 char *tmp; 3106 char *tmp;
3112 struct tcon_link *tlink[8];
3113 unsigned long index = 0;
3114 3107
3115 cancel_delayed_work_sync(&cifs_sb->prune_tlinks); 3108 cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
3116 3109
3117 do { 3110 spin_lock(&cifs_sb->tlink_tree_lock);
3118 spin_lock(&cifs_sb->tlink_tree_lock); 3111 while ((node = rb_first(root))) {
3119 ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree, 3112 tlink = rb_entry(node, struct tcon_link, tl_rbnode);
3120 (void **)tlink, index, 3113 cifs_get_tlink(tlink);
3121 ARRAY_SIZE(tlink)); 3114 clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
3122 /* increment index for next pass */ 3115 rb_erase(node, root);
3123 if (ret > 0)
3124 index = tlink[ret - 1]->tl_index + 1;
3125 for (i = 0; i < ret; i++) {
3126 cifs_get_tlink(tlink[i]);
3127 clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags);
3128 radix_tree_delete(&cifs_sb->tlink_tree,
3129 tlink[i]->tl_index);
3130 }
3131 spin_unlock(&cifs_sb->tlink_tree_lock);
3132 3116
3133 for (i = 0; i < ret; i++) 3117 spin_unlock(&cifs_sb->tlink_tree_lock);
3134 cifs_put_tlink(tlink[i]); 3118 cifs_put_tlink(tlink);
3135 } while (ret != 0); 3119 spin_lock(&cifs_sb->tlink_tree_lock);
3120 }
3121 spin_unlock(&cifs_sb->tlink_tree_lock);
3136 3122
3137 tmp = cifs_sb->prepath; 3123 tmp = cifs_sb->prepath;
3138 cifs_sb->prepathlen = 0; 3124 cifs_sb->prepathlen = 0;
@@ -3271,22 +3257,10 @@ out:
3271 return tcon; 3257 return tcon;
3272} 3258}
3273 3259
3274static struct tcon_link * 3260static inline struct tcon_link *
3275cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb) 3261cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
3276{ 3262{
3277 struct tcon_link *tlink; 3263 return cifs_sb->master_tlink;
3278 unsigned int ret;
3279
3280 spin_lock(&cifs_sb->tlink_tree_lock);
3281 ret = radix_tree_gang_lookup_tag(&cifs_sb->tlink_tree, (void **)&tlink,
3282 0, 1, CIFS_TLINK_MASTER_TAG);
3283 spin_unlock(&cifs_sb->tlink_tree_lock);
3284
3285 /* the master tcon should always be present */
3286 if (ret == 0)
3287 BUG();
3288
3289 return tlink;
3290} 3264}
3291 3265
3292struct cifsTconInfo * 3266struct cifsTconInfo *
@@ -3302,6 +3276,47 @@ cifs_sb_tcon_pending_wait(void *unused)
3302 return signal_pending(current) ? -ERESTARTSYS : 0; 3276 return signal_pending(current) ? -ERESTARTSYS : 0;
3303} 3277}
3304 3278
3279/* find and return a tlink with given uid */
3280static struct tcon_link *
3281tlink_rb_search(struct rb_root *root, uid_t uid)
3282{
3283 struct rb_node *node = root->rb_node;
3284 struct tcon_link *tlink;
3285
3286 while (node) {
3287 tlink = rb_entry(node, struct tcon_link, tl_rbnode);
3288
3289 if (tlink->tl_uid > uid)
3290 node = node->rb_left;
3291 else if (tlink->tl_uid < uid)
3292 node = node->rb_right;
3293 else
3294 return tlink;
3295 }
3296 return NULL;
3297}
3298
3299/* insert a tcon_link into the tree */
3300static void
3301tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
3302{
3303 struct rb_node **new = &(root->rb_node), *parent = NULL;
3304 struct tcon_link *tlink;
3305
3306 while (*new) {
3307 tlink = rb_entry(*new, struct tcon_link, tl_rbnode);
3308 parent = *new;
3309
3310 if (tlink->tl_uid > new_tlink->tl_uid)
3311 new = &((*new)->rb_left);
3312 else
3313 new = &((*new)->rb_right);
3314 }
3315
3316 rb_link_node(&new_tlink->tl_rbnode, parent, new);
3317 rb_insert_color(&new_tlink->tl_rbnode, root);
3318}
3319
3305/* 3320/*
3306 * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the 3321 * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the
3307 * current task. 3322 * current task.
@@ -3309,7 +3324,7 @@ cifs_sb_tcon_pending_wait(void *unused)
3309 * If the superblock doesn't refer to a multiuser mount, then just return 3324 * If the superblock doesn't refer to a multiuser mount, then just return
3310 * the master tcon for the mount. 3325 * the master tcon for the mount.
3311 * 3326 *
3312 * First, search the radix tree for an existing tcon for this fsuid. If one 3327 * First, search the rbtree for an existing tcon for this fsuid. If one
3313 * exists, then check to see if it's pending construction. If it is then wait 3328 * exists, then check to see if it's pending construction. If it is then wait
3314 * for construction to complete. Once it's no longer pending, check to see if 3329 * for construction to complete. Once it's no longer pending, check to see if
3315 * it failed and either return an error or retry construction, depending on 3330 * it failed and either return an error or retry construction, depending on
@@ -3322,14 +3337,14 @@ struct tcon_link *
3322cifs_sb_tlink(struct cifs_sb_info *cifs_sb) 3337cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
3323{ 3338{
3324 int ret; 3339 int ret;
3325 unsigned long fsuid = (unsigned long) current_fsuid(); 3340 uid_t fsuid = current_fsuid();
3326 struct tcon_link *tlink, *newtlink; 3341 struct tcon_link *tlink, *newtlink;
3327 3342
3328 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) 3343 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
3329 return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); 3344 return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
3330 3345
3331 spin_lock(&cifs_sb->tlink_tree_lock); 3346 spin_lock(&cifs_sb->tlink_tree_lock);
3332 tlink = radix_tree_lookup(&cifs_sb->tlink_tree, fsuid); 3347 tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
3333 if (tlink) 3348 if (tlink)
3334 cifs_get_tlink(tlink); 3349 cifs_get_tlink(tlink);
3335 spin_unlock(&cifs_sb->tlink_tree_lock); 3350 spin_unlock(&cifs_sb->tlink_tree_lock);
@@ -3338,36 +3353,24 @@ cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
3338 newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL); 3353 newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
3339 if (newtlink == NULL) 3354 if (newtlink == NULL)
3340 return ERR_PTR(-ENOMEM); 3355 return ERR_PTR(-ENOMEM);
3341 newtlink->tl_index = fsuid; 3356 newtlink->tl_uid = fsuid;
3342 newtlink->tl_tcon = ERR_PTR(-EACCES); 3357 newtlink->tl_tcon = ERR_PTR(-EACCES);
3343 set_bit(TCON_LINK_PENDING, &newtlink->tl_flags); 3358 set_bit(TCON_LINK_PENDING, &newtlink->tl_flags);
3344 set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags); 3359 set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags);
3345 cifs_get_tlink(newtlink); 3360 cifs_get_tlink(newtlink);
3346 3361
3347 ret = radix_tree_preload(GFP_KERNEL);
3348 if (ret != 0) {
3349 kfree(newtlink);
3350 return ERR_PTR(ret);
3351 }
3352
3353 spin_lock(&cifs_sb->tlink_tree_lock); 3362 spin_lock(&cifs_sb->tlink_tree_lock);
3354 /* was one inserted after previous search? */ 3363 /* was one inserted after previous search? */
3355 tlink = radix_tree_lookup(&cifs_sb->tlink_tree, fsuid); 3364 tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
3356 if (tlink) { 3365 if (tlink) {
3357 cifs_get_tlink(tlink); 3366 cifs_get_tlink(tlink);
3358 spin_unlock(&cifs_sb->tlink_tree_lock); 3367 spin_unlock(&cifs_sb->tlink_tree_lock);
3359 radix_tree_preload_end();
3360 kfree(newtlink); 3368 kfree(newtlink);
3361 goto wait_for_construction; 3369 goto wait_for_construction;
3362 } 3370 }
3363 ret = radix_tree_insert(&cifs_sb->tlink_tree, fsuid, newtlink);
3364 spin_unlock(&cifs_sb->tlink_tree_lock);
3365 radix_tree_preload_end();
3366 if (ret) {
3367 kfree(newtlink);
3368 return ERR_PTR(ret);
3369 }
3370 tlink = newtlink; 3371 tlink = newtlink;
3372 tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
3373 spin_unlock(&cifs_sb->tlink_tree_lock);
3371 } else { 3374 } else {
3372wait_for_construction: 3375wait_for_construction:
3373 ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING, 3376 ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
@@ -3413,39 +3416,39 @@ cifs_prune_tlinks(struct work_struct *work)
3413{ 3416{
3414 struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info, 3417 struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
3415 prune_tlinks.work); 3418 prune_tlinks.work);
3416 struct tcon_link *tlink[8]; 3419 struct rb_root *root = &cifs_sb->tlink_tree;
3417 unsigned long now = jiffies; 3420 struct rb_node *node = rb_first(root);
3418 unsigned long index = 0; 3421 struct rb_node *tmp;
3419 int i, ret; 3422 struct tcon_link *tlink;
3420 3423
3421 do { 3424 /*
3422 spin_lock(&cifs_sb->tlink_tree_lock); 3425 * Because we drop the spinlock in the loop in order to put the tlink
3423 ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree, 3426 * it's not guarded against removal of links from the tree. The only
3424 (void **)tlink, index, 3427 * places that remove entries from the tree are this function and
3425 ARRAY_SIZE(tlink)); 3428 * umounts. Because this function is non-reentrant and is canceled
3426 /* increment index for next pass */ 3429 * before umount can proceed, this is safe.
3427 if (ret > 0) 3430 */
3428 index = tlink[ret - 1]->tl_index + 1; 3431 spin_lock(&cifs_sb->tlink_tree_lock);
3429 for (i = 0; i < ret; i++) { 3432 node = rb_first(root);
3430 if (test_bit(TCON_LINK_MASTER, &tlink[i]->tl_flags) || 3433 while (node != NULL) {
3431 atomic_read(&tlink[i]->tl_count) != 0 || 3434 tmp = node;
3432 time_after(tlink[i]->tl_time + TLINK_IDLE_EXPIRE, 3435 node = rb_next(tmp);
3433 now)) { 3436 tlink = rb_entry(tmp, struct tcon_link, tl_rbnode);
3434 tlink[i] = NULL; 3437
3435 continue; 3438 if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) ||
3436 } 3439 atomic_read(&tlink->tl_count) != 0 ||
3437 cifs_get_tlink(tlink[i]); 3440 time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies))
3438 clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags); 3441 continue;
3439 radix_tree_delete(&cifs_sb->tlink_tree,
3440 tlink[i]->tl_index);
3441 }
3442 spin_unlock(&cifs_sb->tlink_tree_lock);
3443 3442
3444 for (i = 0; i < ret; i++) { 3443 cifs_get_tlink(tlink);
3445 if (tlink[i] != NULL) 3444 clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
3446 cifs_put_tlink(tlink[i]); 3445 rb_erase(tmp, root);
3447 } 3446
3448 } while (ret != 0); 3447 spin_unlock(&cifs_sb->tlink_tree_lock);
3448 cifs_put_tlink(tlink);
3449 spin_lock(&cifs_sb->tlink_tree_lock);
3450 }
3451 spin_unlock(&cifs_sb->tlink_tree_lock);
3449 3452
3450 queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks, 3453 queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
3451 TLINK_IDLE_EXPIRE); 3454 TLINK_IDLE_EXPIRE);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index ae82159cf7f..06c3e83fa38 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -146,12 +146,7 @@ client_can_cache:
146 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, 146 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
147 xid, NULL); 147 xid, NULL);
148 148
149 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { 149 cifs_set_oplock_level(pCifsInode, oplock);
150 pCifsInode->clientCanCacheAll = true;
151 pCifsInode->clientCanCacheRead = true;
152 cFYI(1, "Exclusive Oplock granted on inode %p", inode);
153 } else if ((oplock & 0xF) == OPLOCK_READ)
154 pCifsInode->clientCanCacheRead = true;
155 150
156 return rc; 151 return rc;
157} 152}
@@ -253,12 +248,7 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
253 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList); 248 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
254 spin_unlock(&cifs_file_list_lock); 249 spin_unlock(&cifs_file_list_lock);
255 250
256 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { 251 cifs_set_oplock_level(pCifsInode, oplock);
257 pCifsInode->clientCanCacheAll = true;
258 pCifsInode->clientCanCacheRead = true;
259 cFYI(1, "Exclusive Oplock inode %p", inode);
260 } else if ((oplock & 0xF) == OPLOCK_READ)
261 pCifsInode->clientCanCacheRead = true;
262 252
263 file->private_data = pCifsFile; 253 file->private_data = pCifsFile;
264 return pCifsFile; 254 return pCifsFile;
@@ -271,8 +261,9 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
271 */ 261 */
272void cifsFileInfo_put(struct cifsFileInfo *cifs_file) 262void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
273{ 263{
264 struct inode *inode = cifs_file->dentry->d_inode;
274 struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink); 265 struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
275 struct cifsInodeInfo *cifsi = CIFS_I(cifs_file->dentry->d_inode); 266 struct cifsInodeInfo *cifsi = CIFS_I(inode);
276 struct cifsLockInfo *li, *tmp; 267 struct cifsLockInfo *li, *tmp;
277 268
278 spin_lock(&cifs_file_list_lock); 269 spin_lock(&cifs_file_list_lock);
@@ -288,8 +279,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
288 if (list_empty(&cifsi->openFileList)) { 279 if (list_empty(&cifsi->openFileList)) {
289 cFYI(1, "closing last open instance for inode %p", 280 cFYI(1, "closing last open instance for inode %p",
290 cifs_file->dentry->d_inode); 281 cifs_file->dentry->d_inode);
291 cifsi->clientCanCacheRead = false; 282 cifs_set_oplock_level(cifsi, 0);
292 cifsi->clientCanCacheAll = false;
293 } 283 }
294 spin_unlock(&cifs_file_list_lock); 284 spin_unlock(&cifs_file_list_lock);
295 285
@@ -607,8 +597,6 @@ reopen_success:
607 rc = filemap_write_and_wait(inode->i_mapping); 597 rc = filemap_write_and_wait(inode->i_mapping);
608 mapping_set_error(inode->i_mapping, rc); 598 mapping_set_error(inode->i_mapping, rc);
609 599
610 pCifsInode->clientCanCacheAll = false;
611 pCifsInode->clientCanCacheRead = false;
612 if (tcon->unix_ext) 600 if (tcon->unix_ext)
613 rc = cifs_get_inode_info_unix(&inode, 601 rc = cifs_get_inode_info_unix(&inode,
614 full_path, inode->i_sb, xid); 602 full_path, inode->i_sb, xid);
@@ -622,18 +610,9 @@ reopen_success:
622 invalidate the current end of file on the server 610 invalidate the current end of file on the server
623 we can not go to the server to get the new inod 611 we can not go to the server to get the new inod
624 info */ 612 info */
625 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { 613
626 pCifsInode->clientCanCacheAll = true; 614 cifs_set_oplock_level(pCifsInode, oplock);
627 pCifsInode->clientCanCacheRead = true; 615
628 cFYI(1, "Exclusive Oplock granted on inode %p",
629 pCifsFile->dentry->d_inode);
630 } else if ((oplock & 0xF) == OPLOCK_READ) {
631 pCifsInode->clientCanCacheRead = true;
632 pCifsInode->clientCanCacheAll = false;
633 } else {
634 pCifsInode->clientCanCacheRead = false;
635 pCifsInode->clientCanCacheAll = false;
636 }
637 cifs_relock_file(pCifsFile); 616 cifs_relock_file(pCifsFile);
638 617
639reopen_error_exit: 618reopen_error_exit:
@@ -775,12 +754,6 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
775 754
776 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 755 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
777 tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink); 756 tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
778
779 if (file->private_data == NULL) {
780 rc = -EBADF;
781 FreeXid(xid);
782 return rc;
783 }
784 netfid = ((struct cifsFileInfo *)file->private_data)->netfid; 757 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
785 758
786 if ((tcon->ses->capabilities & CAP_UNIX) && 759 if ((tcon->ses->capabilities & CAP_UNIX) &&
@@ -956,6 +929,7 @@ cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
956ssize_t cifs_user_write(struct file *file, const char __user *write_data, 929ssize_t cifs_user_write(struct file *file, const char __user *write_data,
957 size_t write_size, loff_t *poffset) 930 size_t write_size, loff_t *poffset)
958{ 931{
932 struct inode *inode = file->f_path.dentry->d_inode;
959 int rc = 0; 933 int rc = 0;
960 unsigned int bytes_written = 0; 934 unsigned int bytes_written = 0;
961 unsigned int total_written; 935 unsigned int total_written;
@@ -963,7 +937,7 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
963 struct cifsTconInfo *pTcon; 937 struct cifsTconInfo *pTcon;
964 int xid, long_op; 938 int xid, long_op;
965 struct cifsFileInfo *open_file; 939 struct cifsFileInfo *open_file;
966 struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode); 940 struct cifsInodeInfo *cifsi = CIFS_I(inode);
967 941
968 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 942 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
969 943
@@ -1029,21 +1003,17 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
1029 1003
1030 cifs_stats_bytes_written(pTcon, total_written); 1004 cifs_stats_bytes_written(pTcon, total_written);
1031 1005
1032 /* since the write may have blocked check these pointers again */
1033 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
1034 struct inode *inode = file->f_path.dentry->d_inode;
1035/* Do not update local mtime - server will set its actual value on write 1006/* Do not update local mtime - server will set its actual value on write
1036 * inode->i_ctime = inode->i_mtime = 1007 * inode->i_ctime = inode->i_mtime =
1037 * current_fs_time(inode->i_sb);*/ 1008 * current_fs_time(inode->i_sb);*/
1038 if (total_written > 0) { 1009 if (total_written > 0) {
1039 spin_lock(&inode->i_lock); 1010 spin_lock(&inode->i_lock);
1040 if (*poffset > file->f_path.dentry->d_inode->i_size) 1011 if (*poffset > inode->i_size)
1041 i_size_write(file->f_path.dentry->d_inode, 1012 i_size_write(inode, *poffset);
1042 *poffset); 1013 spin_unlock(&inode->i_lock);
1043 spin_unlock(&inode->i_lock);
1044 }
1045 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1046 } 1014 }
1015 mark_inode_dirty_sync(inode);
1016
1047 FreeXid(xid); 1017 FreeXid(xid);
1048 return total_written; 1018 return total_written;
1049} 1019}
@@ -1178,7 +1148,7 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1178 bool fsuid_only) 1148 bool fsuid_only)
1179{ 1149{
1180 struct cifsFileInfo *open_file; 1150 struct cifsFileInfo *open_file;
1181 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb); 1151 struct cifs_sb_info *cifs_sb;
1182 bool any_available = false; 1152 bool any_available = false;
1183 int rc; 1153 int rc;
1184 1154
@@ -1192,6 +1162,8 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1192 return NULL; 1162 return NULL;
1193 } 1163 }
1194 1164
1165 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1166
1195 /* only filter by fsuid on multiuser mounts */ 1167 /* only filter by fsuid on multiuser mounts */
1196 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) 1168 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1197 fsuid_only = false; 1169 fsuid_only = false;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 39869c3c3ef..ef3a55bf86b 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -2177,7 +2177,6 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
2177 2177
2178 setattr_copy(inode, attrs); 2178 setattr_copy(inode, attrs);
2179 mark_inode_dirty(inode); 2179 mark_inode_dirty(inode);
2180 return 0;
2181 2180
2182cifs_setattr_exit: 2181cifs_setattr_exit:
2183 kfree(full_path); 2182 kfree(full_path);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 077bf756f34..0c98672d012 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -38,10 +38,10 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
38 struct cifs_sb_info *cifs_sb; 38 struct cifs_sb_info *cifs_sb;
39#ifdef CONFIG_CIFS_POSIX 39#ifdef CONFIG_CIFS_POSIX
40 struct cifsFileInfo *pSMBFile = filep->private_data; 40 struct cifsFileInfo *pSMBFile = filep->private_data;
41 struct cifsTconInfo *tcon = tlink_tcon(pSMBFile->tlink); 41 struct cifsTconInfo *tcon;
42 __u64 ExtAttrBits = 0; 42 __u64 ExtAttrBits = 0;
43 __u64 ExtAttrMask = 0; 43 __u64 ExtAttrMask = 0;
44 __u64 caps = le64_to_cpu(tcon->fsUnixInfo.Capability); 44 __u64 caps;
45#endif /* CONFIG_CIFS_POSIX */ 45#endif /* CONFIG_CIFS_POSIX */
46 46
47 xid = GetXid(); 47 xid = GetXid();
@@ -62,9 +62,11 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
62 break; 62 break;
63#ifdef CONFIG_CIFS_POSIX 63#ifdef CONFIG_CIFS_POSIX
64 case FS_IOC_GETFLAGS: 64 case FS_IOC_GETFLAGS:
65 if (pSMBFile == NULL)
66 break;
67 tcon = tlink_tcon(pSMBFile->tlink);
68 caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
65 if (CIFS_UNIX_EXTATTR_CAP & caps) { 69 if (CIFS_UNIX_EXTATTR_CAP & caps) {
66 if (pSMBFile == NULL)
67 break;
68 rc = CIFSGetExtAttr(xid, tcon, pSMBFile->netfid, 70 rc = CIFSGetExtAttr(xid, tcon, pSMBFile->netfid,
69 &ExtAttrBits, &ExtAttrMask); 71 &ExtAttrBits, &ExtAttrMask);
70 if (rc == 0) 72 if (rc == 0)
@@ -75,13 +77,15 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
75 break; 77 break;
76 78
77 case FS_IOC_SETFLAGS: 79 case FS_IOC_SETFLAGS:
80 if (pSMBFile == NULL)
81 break;
82 tcon = tlink_tcon(pSMBFile->tlink);
83 caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
78 if (CIFS_UNIX_EXTATTR_CAP & caps) { 84 if (CIFS_UNIX_EXTATTR_CAP & caps) {
79 if (get_user(ExtAttrBits, (int __user *)arg)) { 85 if (get_user(ExtAttrBits, (int __user *)arg)) {
80 rc = -EFAULT; 86 rc = -EFAULT;
81 break; 87 break;
82 } 88 }
83 if (pSMBFile == NULL)
84 break;
85 /* rc= CIFSGetExtAttr(xid,tcon,pSMBFile->netfid, 89 /* rc= CIFSGetExtAttr(xid,tcon,pSMBFile->netfid,
86 extAttrBits, &ExtAttrMask);*/ 90 extAttrBits, &ExtAttrMask);*/
87 } 91 }
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index c4e296fe351..43f10281bc1 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -569,10 +569,9 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
569 569
570 cFYI(1, "file id match, oplock break"); 570 cFYI(1, "file id match, oplock break");
571 pCifsInode = CIFS_I(netfile->dentry->d_inode); 571 pCifsInode = CIFS_I(netfile->dentry->d_inode);
572 pCifsInode->clientCanCacheAll = false;
573 if (pSMB->OplockLevel == 0)
574 pCifsInode->clientCanCacheRead = false;
575 572
573 cifs_set_oplock_level(pCifsInode,
574 pSMB->OplockLevel);
576 /* 575 /*
577 * cifs_oplock_break_put() can't be called 576 * cifs_oplock_break_put() can't be called
578 * from here. Get reference after queueing 577 * from here. Get reference after queueing
@@ -722,3 +721,23 @@ cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
722 cifs_sb_master_tcon(cifs_sb)->treeName); 721 cifs_sb_master_tcon(cifs_sb)->treeName);
723 } 722 }
724} 723}
724
725void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
726{
727 oplock &= 0xF;
728
729 if (oplock == OPLOCK_EXCLUSIVE) {
730 cinode->clientCanCacheAll = true;
731 cinode->clientCanCacheRead = true;
732 cFYI(1, "Exclusive Oplock granted on inode %p",
733 &cinode->vfs_inode);
734 } else if (oplock == OPLOCK_READ) {
735 cinode->clientCanCacheAll = false;
736 cinode->clientCanCacheRead = true;
737 cFYI(1, "Level II Oplock granted on inode %p",
738 &cinode->vfs_inode);
739 } else {
740 cinode->clientCanCacheAll = false;
741 cinode->clientCanCacheRead = false;
742 }
743}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 8b5dd6369f8..6a5edea2d70 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -177,7 +177,7 @@ struct mpage_da_data {
177 177
178struct ext4_io_page { 178struct ext4_io_page {
179 struct page *p_page; 179 struct page *p_page;
180 int p_count; 180 atomic_t p_count;
181}; 181};
182 182
183#define MAX_IO_PAGES 128 183#define MAX_IO_PAGES 128
@@ -858,6 +858,7 @@ struct ext4_inode_info {
858 spinlock_t i_completed_io_lock; 858 spinlock_t i_completed_io_lock;
859 /* current io_end structure for async DIO write*/ 859 /* current io_end structure for async DIO write*/
860 ext4_io_end_t *cur_aio_dio; 860 ext4_io_end_t *cur_aio_dio;
861 atomic_t i_ioend_count; /* Number of outstanding io_end structs */
861 862
862 /* 863 /*
863 * Transactions that contain inode's metadata needed to complete 864 * Transactions that contain inode's metadata needed to complete
@@ -2060,6 +2061,7 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
2060/* page-io.c */ 2061/* page-io.c */
2061extern int __init ext4_init_pageio(void); 2062extern int __init ext4_init_pageio(void);
2062extern void ext4_exit_pageio(void); 2063extern void ext4_exit_pageio(void);
2064extern void ext4_ioend_wait(struct inode *);
2063extern void ext4_free_io_end(ext4_io_end_t *io); 2065extern void ext4_free_io_end(ext4_io_end_t *io);
2064extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags); 2066extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
2065extern int ext4_end_io_nolock(ext4_io_end_t *io); 2067extern int ext4_end_io_nolock(ext4_io_end_t *io);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 19161647046..bdbe6990220 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -53,6 +53,7 @@
53static inline int ext4_begin_ordered_truncate(struct inode *inode, 53static inline int ext4_begin_ordered_truncate(struct inode *inode,
54 loff_t new_size) 54 loff_t new_size)
55{ 55{
56 trace_ext4_begin_ordered_truncate(inode, new_size);
56 return jbd2_journal_begin_ordered_truncate( 57 return jbd2_journal_begin_ordered_truncate(
57 EXT4_SB(inode->i_sb)->s_journal, 58 EXT4_SB(inode->i_sb)->s_journal,
58 &EXT4_I(inode)->jinode, 59 &EXT4_I(inode)->jinode,
@@ -178,6 +179,7 @@ void ext4_evict_inode(struct inode *inode)
178 handle_t *handle; 179 handle_t *handle;
179 int err; 180 int err;
180 181
182 trace_ext4_evict_inode(inode);
181 if (inode->i_nlink) { 183 if (inode->i_nlink) {
182 truncate_inode_pages(&inode->i_data, 0); 184 truncate_inode_pages(&inode->i_data, 0);
183 goto no_delete; 185 goto no_delete;
@@ -5410,9 +5412,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
5410 * will return the blocks that include the delayed allocation 5412 * will return the blocks that include the delayed allocation
5411 * blocks for this file. 5413 * blocks for this file.
5412 */ 5414 */
5413 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
5414 delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks; 5415 delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
5415 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
5416 5416
5417 stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; 5417 stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
5418 return 0; 5418 return 0;
@@ -5649,6 +5649,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5649 int err, ret; 5649 int err, ret;
5650 5650
5651 might_sleep(); 5651 might_sleep();
5652 trace_ext4_mark_inode_dirty(inode, _RET_IP_);
5652 err = ext4_reserve_inode_write(handle, inode, &iloc); 5653 err = ext4_reserve_inode_write(handle, inode, &iloc);
5653 if (ext4_handle_valid(handle) && 5654 if (ext4_handle_valid(handle) &&
5654 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && 5655 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index c58eba34724..5b4d4e3a4d5 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -4640,8 +4640,6 @@ do_more:
4640 * with group lock held. generate_buddy look at 4640 * with group lock held. generate_buddy look at
4641 * them with group lock_held 4641 * them with group lock_held
4642 */ 4642 */
4643 if (test_opt(sb, DISCARD))
4644 ext4_issue_discard(sb, block_group, bit, count);
4645 ext4_lock_group(sb, block_group); 4643 ext4_lock_group(sb, block_group);
4646 mb_clear_bits(bitmap_bh->b_data, bit, count); 4644 mb_clear_bits(bitmap_bh->b_data, bit, count);
4647 mb_free_blocks(inode, &e4b, bit, count); 4645 mb_free_blocks(inode, &e4b, bit, count);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 46a7d6a9d97..7f5451cd1d3 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -32,8 +32,14 @@
32 32
33static struct kmem_cache *io_page_cachep, *io_end_cachep; 33static struct kmem_cache *io_page_cachep, *io_end_cachep;
34 34
35#define WQ_HASH_SZ 37
36#define to_ioend_wq(v) (&ioend_wq[((unsigned long)v) % WQ_HASH_SZ])
37static wait_queue_head_t ioend_wq[WQ_HASH_SZ];
38
35int __init ext4_init_pageio(void) 39int __init ext4_init_pageio(void)
36{ 40{
41 int i;
42
37 io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT); 43 io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
38 if (io_page_cachep == NULL) 44 if (io_page_cachep == NULL)
39 return -ENOMEM; 45 return -ENOMEM;
@@ -42,6 +48,8 @@ int __init ext4_init_pageio(void)
42 kmem_cache_destroy(io_page_cachep); 48 kmem_cache_destroy(io_page_cachep);
43 return -ENOMEM; 49 return -ENOMEM;
44 } 50 }
51 for (i = 0; i < WQ_HASH_SZ; i++)
52 init_waitqueue_head(&ioend_wq[i]);
45 53
46 return 0; 54 return 0;
47} 55}
@@ -52,24 +60,37 @@ void ext4_exit_pageio(void)
52 kmem_cache_destroy(io_page_cachep); 60 kmem_cache_destroy(io_page_cachep);
53} 61}
54 62
63void ext4_ioend_wait(struct inode *inode)
64{
65 wait_queue_head_t *wq = to_ioend_wq(inode);
66
67 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
68}
69
70static void put_io_page(struct ext4_io_page *io_page)
71{
72 if (atomic_dec_and_test(&io_page->p_count)) {
73 end_page_writeback(io_page->p_page);
74 put_page(io_page->p_page);
75 kmem_cache_free(io_page_cachep, io_page);
76 }
77}
78
55void ext4_free_io_end(ext4_io_end_t *io) 79void ext4_free_io_end(ext4_io_end_t *io)
56{ 80{
57 int i; 81 int i;
82 wait_queue_head_t *wq;
58 83
59 BUG_ON(!io); 84 BUG_ON(!io);
60 if (io->page) 85 if (io->page)
61 put_page(io->page); 86 put_page(io->page);
62 for (i = 0; i < io->num_io_pages; i++) { 87 for (i = 0; i < io->num_io_pages; i++)
63 if (--io->pages[i]->p_count == 0) { 88 put_io_page(io->pages[i]);
64 struct page *page = io->pages[i]->p_page;
65
66 end_page_writeback(page);
67 put_page(page);
68 kmem_cache_free(io_page_cachep, io->pages[i]);
69 }
70 }
71 io->num_io_pages = 0; 89 io->num_io_pages = 0;
72 iput(io->inode); 90 wq = to_ioend_wq(io->inode);
91 if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count) &&
92 waitqueue_active(wq))
93 wake_up_all(wq);
73 kmem_cache_free(io_end_cachep, io); 94 kmem_cache_free(io_end_cachep, io);
74} 95}
75 96
@@ -142,8 +163,8 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
142 io = kmem_cache_alloc(io_end_cachep, flags); 163 io = kmem_cache_alloc(io_end_cachep, flags);
143 if (io) { 164 if (io) {
144 memset(io, 0, sizeof(*io)); 165 memset(io, 0, sizeof(*io));
145 io->inode = igrab(inode); 166 atomic_inc(&EXT4_I(inode)->i_ioend_count);
146 BUG_ON(!io->inode); 167 io->inode = inode;
147 INIT_WORK(&io->work, ext4_end_io_work); 168 INIT_WORK(&io->work, ext4_end_io_work);
148 INIT_LIST_HEAD(&io->list); 169 INIT_LIST_HEAD(&io->list);
149 } 170 }
@@ -171,35 +192,15 @@ static void ext4_end_bio(struct bio *bio, int error)
171 struct workqueue_struct *wq; 192 struct workqueue_struct *wq;
172 struct inode *inode; 193 struct inode *inode;
173 unsigned long flags; 194 unsigned long flags;
174 ext4_fsblk_t err_block;
175 int i; 195 int i;
176 196
177 BUG_ON(!io_end); 197 BUG_ON(!io_end);
178 inode = io_end->inode;
179 bio->bi_private = NULL; 198 bio->bi_private = NULL;
180 bio->bi_end_io = NULL; 199 bio->bi_end_io = NULL;
181 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 200 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
182 error = 0; 201 error = 0;
183 err_block = bio->bi_sector >> (inode->i_blkbits - 9);
184 bio_put(bio); 202 bio_put(bio);
185 203
186 if (!(inode->i_sb->s_flags & MS_ACTIVE)) {
187 pr_err("sb umounted, discard end_io request for inode %lu\n",
188 io_end->inode->i_ino);
189 ext4_free_io_end(io_end);
190 return;
191 }
192
193 if (error) {
194 io_end->flag |= EXT4_IO_END_ERROR;
195 ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
196 "(offset %llu size %ld starting block %llu)",
197 inode->i_ino,
198 (unsigned long long) io_end->offset,
199 (long) io_end->size,
200 (unsigned long long) err_block);
201 }
202
203 for (i = 0; i < io_end->num_io_pages; i++) { 204 for (i = 0; i < io_end->num_io_pages; i++) {
204 struct page *page = io_end->pages[i]->p_page; 205 struct page *page = io_end->pages[i]->p_page;
205 struct buffer_head *bh, *head; 206 struct buffer_head *bh, *head;
@@ -236,13 +237,7 @@ static void ext4_end_bio(struct bio *bio, int error)
236 } while (bh != head); 237 } while (bh != head);
237 } 238 }
238 239
239 if (--io_end->pages[i]->p_count == 0) { 240 put_io_page(io_end->pages[i]);
240 struct page *page = io_end->pages[i]->p_page;
241
242 end_page_writeback(page);
243 put_page(page);
244 kmem_cache_free(io_page_cachep, io_end->pages[i]);
245 }
246 241
247 /* 242 /*
248 * If this is a partial write which happened to make 243 * If this is a partial write which happened to make
@@ -254,8 +249,19 @@ static void ext4_end_bio(struct bio *bio, int error)
254 if (!partial_write) 249 if (!partial_write)
255 SetPageUptodate(page); 250 SetPageUptodate(page);
256 } 251 }
257
258 io_end->num_io_pages = 0; 252 io_end->num_io_pages = 0;
253 inode = io_end->inode;
254
255 if (error) {
256 io_end->flag |= EXT4_IO_END_ERROR;
257 ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
258 "(offset %llu size %ld starting block %llu)",
259 inode->i_ino,
260 (unsigned long long) io_end->offset,
261 (long) io_end->size,
262 (unsigned long long)
263 bio->bi_sector >> (inode->i_blkbits - 9));
264 }
259 265
260 /* Add the io_end to per-inode completed io list*/ 266 /* Add the io_end to per-inode completed io list*/
261 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); 267 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
@@ -305,7 +311,6 @@ static int io_submit_init(struct ext4_io_submit *io,
305 bio->bi_private = io->io_end = io_end; 311 bio->bi_private = io->io_end = io_end;
306 bio->bi_end_io = ext4_end_bio; 312 bio->bi_end_io = ext4_end_bio;
307 313
308 io_end->inode = inode;
309 io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); 314 io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
310 315
311 io->io_bio = bio; 316 io->io_bio = bio;
@@ -360,7 +365,7 @@ submit_and_retry:
360 if ((io_end->num_io_pages == 0) || 365 if ((io_end->num_io_pages == 0) ||
361 (io_end->pages[io_end->num_io_pages-1] != io_page)) { 366 (io_end->pages[io_end->num_io_pages-1] != io_page)) {
362 io_end->pages[io_end->num_io_pages++] = io_page; 367 io_end->pages[io_end->num_io_pages++] = io_page;
363 io_page->p_count++; 368 atomic_inc(&io_page->p_count);
364 } 369 }
365 return 0; 370 return 0;
366} 371}
@@ -389,7 +394,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
389 return -ENOMEM; 394 return -ENOMEM;
390 } 395 }
391 io_page->p_page = page; 396 io_page->p_page = page;
392 io_page->p_count = 0; 397 atomic_set(&io_page->p_count, 1);
393 get_page(page); 398 get_page(page);
394 399
395 for (bh = head = page_buffers(page), block_start = 0; 400 for (bh = head = page_buffers(page), block_start = 0;
@@ -421,10 +426,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
421 * PageWriteback bit from the page to prevent the system from 426 * PageWriteback bit from the page to prevent the system from
422 * wedging later on. 427 * wedging later on.
423 */ 428 */
424 if (io_page->p_count == 0) { 429 put_io_page(io_page);
425 put_page(page);
426 end_page_writeback(page);
427 kmem_cache_free(io_page_cachep, io_page);
428 }
429 return ret; 430 return ret;
430} 431}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 40131b777af..61182fe6254 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -828,12 +828,22 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
828 ei->cur_aio_dio = NULL; 828 ei->cur_aio_dio = NULL;
829 ei->i_sync_tid = 0; 829 ei->i_sync_tid = 0;
830 ei->i_datasync_tid = 0; 830 ei->i_datasync_tid = 0;
831 atomic_set(&ei->i_ioend_count, 0);
831 832
832 return &ei->vfs_inode; 833 return &ei->vfs_inode;
833} 834}
834 835
836static int ext4_drop_inode(struct inode *inode)
837{
838 int drop = generic_drop_inode(inode);
839
840 trace_ext4_drop_inode(inode, drop);
841 return drop;
842}
843
835static void ext4_destroy_inode(struct inode *inode) 844static void ext4_destroy_inode(struct inode *inode)
836{ 845{
846 ext4_ioend_wait(inode);
837 if (!list_empty(&(EXT4_I(inode)->i_orphan))) { 847 if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
838 ext4_msg(inode->i_sb, KERN_ERR, 848 ext4_msg(inode->i_sb, KERN_ERR,
839 "Inode %lu (%p): orphan list check failed!", 849 "Inode %lu (%p): orphan list check failed!",
@@ -1173,6 +1183,7 @@ static const struct super_operations ext4_sops = {
1173 .destroy_inode = ext4_destroy_inode, 1183 .destroy_inode = ext4_destroy_inode,
1174 .write_inode = ext4_write_inode, 1184 .write_inode = ext4_write_inode,
1175 .dirty_inode = ext4_dirty_inode, 1185 .dirty_inode = ext4_dirty_inode,
1186 .drop_inode = ext4_drop_inode,
1176 .evict_inode = ext4_evict_inode, 1187 .evict_inode = ext4_evict_inode,
1177 .put_super = ext4_put_super, 1188 .put_super = ext4_put_super,
1178 .sync_fs = ext4_sync_fs, 1189 .sync_fs = ext4_sync_fs,
@@ -1194,6 +1205,7 @@ static const struct super_operations ext4_nojournal_sops = {
1194 .destroy_inode = ext4_destroy_inode, 1205 .destroy_inode = ext4_destroy_inode,
1195 .write_inode = ext4_write_inode, 1206 .write_inode = ext4_write_inode,
1196 .dirty_inode = ext4_dirty_inode, 1207 .dirty_inode = ext4_dirty_inode,
1208 .drop_inode = ext4_drop_inode,
1197 .evict_inode = ext4_evict_inode, 1209 .evict_inode = ext4_evict_inode,
1198 .write_super = ext4_write_super, 1210 .write_super = ext4_write_super,
1199 .put_super = ext4_put_super, 1211 .put_super = ext4_put_super,
@@ -2699,7 +2711,6 @@ static int ext4_lazyinit_thread(void *arg)
2699 struct ext4_li_request *elr; 2711 struct ext4_li_request *elr;
2700 unsigned long next_wakeup; 2712 unsigned long next_wakeup;
2701 DEFINE_WAIT(wait); 2713 DEFINE_WAIT(wait);
2702 int ret;
2703 2714
2704 BUG_ON(NULL == eli); 2715 BUG_ON(NULL == eli);
2705 2716
@@ -2723,13 +2734,12 @@ cont_thread:
2723 elr = list_entry(pos, struct ext4_li_request, 2734 elr = list_entry(pos, struct ext4_li_request,
2724 lr_request); 2735 lr_request);
2725 2736
2726 if (time_after_eq(jiffies, elr->lr_next_sched)) 2737 if (time_after_eq(jiffies, elr->lr_next_sched)) {
2727 ret = ext4_run_li_request(elr); 2738 if (ext4_run_li_request(elr) != 0) {
2728 2739 /* error, remove the lazy_init job */
2729 if (ret) { 2740 ext4_remove_li_request(elr);
2730 ret = 0; 2741 continue;
2731 ext4_remove_li_request(elr); 2742 }
2732 continue;
2733 } 2743 }
2734 2744
2735 if (time_before(elr->lr_next_sched, next_wakeup)) 2745 if (time_before(elr->lr_next_sched, next_wakeup))
@@ -2740,7 +2750,8 @@ cont_thread:
2740 if (freezing(current)) 2750 if (freezing(current))
2741 refrigerator(); 2751 refrigerator();
2742 2752
2743 if (time_after_eq(jiffies, next_wakeup)) { 2753 if ((time_after_eq(jiffies, next_wakeup)) ||
2754 (MAX_JIFFY_OFFSET == next_wakeup)) {
2744 cond_resched(); 2755 cond_resched();
2745 continue; 2756 continue;
2746 } 2757 }
@@ -3348,6 +3359,24 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3348 get_random_bytes(&sbi->s_next_generation, sizeof(u32)); 3359 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
3349 spin_lock_init(&sbi->s_next_gen_lock); 3360 spin_lock_init(&sbi->s_next_gen_lock);
3350 3361
3362 err = percpu_counter_init(&sbi->s_freeblocks_counter,
3363 ext4_count_free_blocks(sb));
3364 if (!err) {
3365 err = percpu_counter_init(&sbi->s_freeinodes_counter,
3366 ext4_count_free_inodes(sb));
3367 }
3368 if (!err) {
3369 err = percpu_counter_init(&sbi->s_dirs_counter,
3370 ext4_count_dirs(sb));
3371 }
3372 if (!err) {
3373 err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
3374 }
3375 if (err) {
3376 ext4_msg(sb, KERN_ERR, "insufficient memory");
3377 goto failed_mount3;
3378 }
3379
3351 sbi->s_stripe = ext4_get_stripe_size(sbi); 3380 sbi->s_stripe = ext4_get_stripe_size(sbi);
3352 sbi->s_max_writeback_mb_bump = 128; 3381 sbi->s_max_writeback_mb_bump = 128;
3353 3382
@@ -3446,22 +3475,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3446 } 3475 }
3447 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); 3476 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
3448 3477
3449no_journal: 3478 /*
3450 err = percpu_counter_init(&sbi->s_freeblocks_counter, 3479 * The journal may have updated the bg summary counts, so we
3451 ext4_count_free_blocks(sb)); 3480 * need to update the global counters.
3452 if (!err) 3481 */
3453 err = percpu_counter_init(&sbi->s_freeinodes_counter, 3482 percpu_counter_set(&sbi->s_freeblocks_counter,
3454 ext4_count_free_inodes(sb)); 3483 ext4_count_free_blocks(sb));
3455 if (!err) 3484 percpu_counter_set(&sbi->s_freeinodes_counter,
3456 err = percpu_counter_init(&sbi->s_dirs_counter, 3485 ext4_count_free_inodes(sb));
3457 ext4_count_dirs(sb)); 3486 percpu_counter_set(&sbi->s_dirs_counter,
3458 if (!err) 3487 ext4_count_dirs(sb));
3459 err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0); 3488 percpu_counter_set(&sbi->s_dirtyblocks_counter, 0);
3460 if (err) {
3461 ext4_msg(sb, KERN_ERR, "insufficient memory");
3462 goto failed_mount_wq;
3463 }
3464 3489
3490no_journal:
3465 EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten"); 3491 EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten");
3466 if (!EXT4_SB(sb)->dio_unwritten_wq) { 3492 if (!EXT4_SB(sb)->dio_unwritten_wq) {
3467 printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n"); 3493 printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n");
@@ -3611,10 +3637,6 @@ failed_mount_wq:
3611 jbd2_journal_destroy(sbi->s_journal); 3637 jbd2_journal_destroy(sbi->s_journal);
3612 sbi->s_journal = NULL; 3638 sbi->s_journal = NULL;
3613 } 3639 }
3614 percpu_counter_destroy(&sbi->s_freeblocks_counter);
3615 percpu_counter_destroy(&sbi->s_freeinodes_counter);
3616 percpu_counter_destroy(&sbi->s_dirs_counter);
3617 percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
3618failed_mount3: 3640failed_mount3:
3619 if (sbi->s_flex_groups) { 3641 if (sbi->s_flex_groups) {
3620 if (is_vmalloc_addr(sbi->s_flex_groups)) 3642 if (is_vmalloc_addr(sbi->s_flex_groups))
@@ -3622,6 +3644,10 @@ failed_mount3:
3622 else 3644 else
3623 kfree(sbi->s_flex_groups); 3645 kfree(sbi->s_flex_groups);
3624 } 3646 }
3647 percpu_counter_destroy(&sbi->s_freeblocks_counter);
3648 percpu_counter_destroy(&sbi->s_freeinodes_counter);
3649 percpu_counter_destroy(&sbi->s_dirs_counter);
3650 percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
3625failed_mount2: 3651failed_mount2:
3626 for (i = 0; i < db_count; i++) 3652 for (i = 0; i < db_count; i++)
3627 brelse(sbi->s_group_desc[i]); 3653 brelse(sbi->s_group_desc[i]);
@@ -3949,13 +3975,11 @@ static int ext4_commit_super(struct super_block *sb, int sync)
3949 else 3975 else
3950 es->s_kbytes_written = 3976 es->s_kbytes_written =
3951 cpu_to_le64(EXT4_SB(sb)->s_kbytes_written); 3977 cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
3952 if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeblocks_counter)) 3978 ext4_free_blocks_count_set(es, percpu_counter_sum_positive(
3953 ext4_free_blocks_count_set(es, percpu_counter_sum_positive( 3979 &EXT4_SB(sb)->s_freeblocks_counter));
3954 &EXT4_SB(sb)->s_freeblocks_counter)); 3980 es->s_free_inodes_count =
3955 if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter)) 3981 cpu_to_le32(percpu_counter_sum_positive(
3956 es->s_free_inodes_count = 3982 &EXT4_SB(sb)->s_freeinodes_counter));
3957 cpu_to_le32(percpu_counter_sum_positive(
3958 &EXT4_SB(sb)->s_freeinodes_counter));
3959 sb->s_dirt = 0; 3983 sb->s_dirt = 0;
3960 BUFFER_TRACE(sbh, "marking dirty"); 3984 BUFFER_TRACE(sbh, "marking dirty");
3961 mark_buffer_dirty(sbh); 3985 mark_buffer_dirty(sbh);
@@ -4556,12 +4580,10 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
4556 4580
4557static int ext4_quota_off(struct super_block *sb, int type) 4581static int ext4_quota_off(struct super_block *sb, int type)
4558{ 4582{
4559 /* Force all delayed allocation blocks to be allocated */ 4583 /* Force all delayed allocation blocks to be allocated.
4560 if (test_opt(sb, DELALLOC)) { 4584 * Caller already holds s_umount sem */
4561 down_read(&sb->s_umount); 4585 if (test_opt(sb, DELALLOC))
4562 sync_filesystem(sb); 4586 sync_filesystem(sb);
4563 up_read(&sb->s_umount);
4564 }
4565 4587
4566 return dquot_quota_off(sb, type); 4588 return dquot_quota_off(sb, type);
4567} 4589}
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index 06d582732d3..5ab3839dfcb 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -138,10 +138,8 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb,
138 struct gfs2_inum_host *inum) 138 struct gfs2_inum_host *inum)
139{ 139{
140 struct gfs2_sbd *sdp = sb->s_fs_info; 140 struct gfs2_sbd *sdp = sb->s_fs_info;
141 struct gfs2_holder i_gh;
142 struct inode *inode; 141 struct inode *inode;
143 struct dentry *dentry; 142 struct dentry *dentry;
144 int error;
145 143
146 inode = gfs2_ilookup(sb, inum->no_addr); 144 inode = gfs2_ilookup(sb, inum->no_addr);
147 if (inode) { 145 if (inode) {
@@ -152,52 +150,16 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb,
152 goto out_inode; 150 goto out_inode;
153 } 151 }
154 152
155 error = gfs2_glock_nq_num(sdp, inum->no_addr, &gfs2_inode_glops, 153 inode = gfs2_lookup_by_inum(sdp, inum->no_addr, &inum->no_formal_ino,
156 LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 154 GFS2_BLKST_DINODE);
157 if (error) 155 if (IS_ERR(inode))
158 return ERR_PTR(error); 156 return ERR_CAST(inode);
159
160 error = gfs2_check_blk_type(sdp, inum->no_addr, GFS2_BLKST_DINODE);
161 if (error)
162 goto fail;
163
164 inode = gfs2_inode_lookup(sb, DT_UNKNOWN, inum->no_addr, 0);
165 if (IS_ERR(inode)) {
166 error = PTR_ERR(inode);
167 goto fail;
168 }
169
170 error = gfs2_inode_refresh(GFS2_I(inode));
171 if (error) {
172 iput(inode);
173 goto fail;
174 }
175
176 /* Pick up the works we bypass in gfs2_inode_lookup */
177 if (inode->i_state & I_NEW)
178 gfs2_set_iop(inode);
179
180 if (GFS2_I(inode)->i_no_formal_ino != inum->no_formal_ino) {
181 iput(inode);
182 goto fail;
183 }
184
185 error = -EIO;
186 if (GFS2_I(inode)->i_diskflags & GFS2_DIF_SYSTEM) {
187 iput(inode);
188 goto fail;
189 }
190
191 gfs2_glock_dq_uninit(&i_gh);
192 157
193out_inode: 158out_inode:
194 dentry = d_obtain_alias(inode); 159 dentry = d_obtain_alias(inode);
195 if (!IS_ERR(dentry)) 160 if (!IS_ERR(dentry))
196 dentry->d_op = &gfs2_dops; 161 dentry->d_op = &gfs2_dops;
197 return dentry; 162 return dentry;
198fail:
199 gfs2_glock_dq_uninit(&i_gh);
200 return ERR_PTR(error);
201} 163}
202 164
203static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid, 165static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid,
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 87778857f09..f92c1770416 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -686,21 +686,20 @@ static void delete_work_func(struct work_struct *work)
686{ 686{
687 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); 687 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
688 struct gfs2_sbd *sdp = gl->gl_sbd; 688 struct gfs2_sbd *sdp = gl->gl_sbd;
689 struct gfs2_inode *ip = NULL; 689 struct gfs2_inode *ip;
690 struct inode *inode; 690 struct inode *inode;
691 u64 no_addr = 0; 691 u64 no_addr = gl->gl_name.ln_number;
692
693 ip = gl->gl_object;
694 /* Note: Unsafe to dereference ip as we don't hold right refs/locks */
692 695
693 spin_lock(&gl->gl_spin);
694 ip = (struct gfs2_inode *)gl->gl_object;
695 if (ip) 696 if (ip)
696 no_addr = ip->i_no_addr;
697 spin_unlock(&gl->gl_spin);
698 if (ip) {
699 inode = gfs2_ilookup(sdp->sd_vfs, no_addr); 697 inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
700 if (inode) { 698 else
701 d_prune_aliases(inode); 699 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
702 iput(inode); 700 if (inode && !IS_ERR(inode)) {
703 } 701 d_prune_aliases(inode);
702 iput(inode);
704 } 703 }
705 gfs2_glock_put(gl); 704 gfs2_glock_put(gl);
706} 705}
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 06370f8bd8c..e1213f7f921 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -73,49 +73,6 @@ static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
73 return iget5_locked(sb, hash, iget_test, iget_set, &no_addr); 73 return iget5_locked(sb, hash, iget_test, iget_set, &no_addr);
74} 74}
75 75
76struct gfs2_skip_data {
77 u64 no_addr;
78 int skipped;
79};
80
81static int iget_skip_test(struct inode *inode, void *opaque)
82{
83 struct gfs2_inode *ip = GFS2_I(inode);
84 struct gfs2_skip_data *data = opaque;
85
86 if (ip->i_no_addr == data->no_addr) {
87 if (inode->i_state & (I_FREEING|I_WILL_FREE)){
88 data->skipped = 1;
89 return 0;
90 }
91 return 1;
92 }
93 return 0;
94}
95
96static int iget_skip_set(struct inode *inode, void *opaque)
97{
98 struct gfs2_inode *ip = GFS2_I(inode);
99 struct gfs2_skip_data *data = opaque;
100
101 if (data->skipped)
102 return 1;
103 inode->i_ino = (unsigned long)(data->no_addr);
104 ip->i_no_addr = data->no_addr;
105 return 0;
106}
107
108static struct inode *gfs2_iget_skip(struct super_block *sb,
109 u64 no_addr)
110{
111 struct gfs2_skip_data data;
112 unsigned long hash = (unsigned long)no_addr;
113
114 data.no_addr = no_addr;
115 data.skipped = 0;
116 return iget5_locked(sb, hash, iget_skip_test, iget_skip_set, &data);
117}
118
119/** 76/**
120 * GFS2 lookup code fills in vfs inode contents based on info obtained 77 * GFS2 lookup code fills in vfs inode contents based on info obtained
121 * from directory entry inside gfs2_inode_lookup(). This has caused issues 78 * from directory entry inside gfs2_inode_lookup(). This has caused issues
@@ -243,93 +200,54 @@ fail:
243 return ERR_PTR(error); 200 return ERR_PTR(error);
244} 201}
245 202
246/** 203struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
247 * gfs2_process_unlinked_inode - Lookup an unlinked inode for reclamation 204 u64 *no_formal_ino, unsigned int blktype)
248 * and try to reclaim it by doing iput.
249 *
250 * This function assumes no rgrp locks are currently held.
251 *
252 * @sb: The super block
253 * no_addr: The inode number
254 *
255 */
256
257void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr)
258{ 205{
259 struct gfs2_sbd *sdp; 206 struct super_block *sb = sdp->sd_vfs;
260 struct gfs2_inode *ip; 207 struct gfs2_holder i_gh;
261 struct gfs2_glock *io_gl = NULL;
262 int error;
263 struct gfs2_holder gh;
264 struct inode *inode; 208 struct inode *inode;
209 int error;
265 210
266 inode = gfs2_iget_skip(sb, no_addr); 211 error = gfs2_glock_nq_num(sdp, no_addr, &gfs2_inode_glops,
267 212 LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
268 if (!inode) 213 if (error)
269 return; 214 return ERR_PTR(error);
270
271 /* If it's not a new inode, someone's using it, so leave it alone. */
272 if (!(inode->i_state & I_NEW)) {
273 iput(inode);
274 return;
275 }
276
277 ip = GFS2_I(inode);
278 sdp = GFS2_SB(inode);
279 ip->i_no_formal_ino = -1;
280 215
281 error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); 216 error = gfs2_check_blk_type(sdp, no_addr, blktype);
282 if (unlikely(error)) 217 if (error)
283 goto fail; 218 goto fail;
284 ip->i_gl->gl_object = ip;
285 219
286 error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl); 220 inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0);
287 if (unlikely(error)) 221 if (IS_ERR(inode))
288 goto fail_put; 222 goto fail;
289
290 set_bit(GIF_INVALID, &ip->i_flags);
291 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, LM_FLAG_TRY | GL_EXACT,
292 &ip->i_iopen_gh);
293 if (unlikely(error))
294 goto fail_iopen;
295 223
296 ip->i_iopen_gh.gh_gl->gl_object = ip; 224 error = gfs2_inode_refresh(GFS2_I(inode));
297 gfs2_glock_put(io_gl); 225 if (error)
298 io_gl = NULL; 226 goto fail_iput;
299 227
300 inode->i_mode = DT2IF(DT_UNKNOWN); 228 /* Pick up the works we bypass in gfs2_inode_lookup */
229 if (inode->i_state & I_NEW)
230 gfs2_set_iop(inode);
301 231
302 /* 232 /* Two extra checks for NFS only */
303 * We must read the inode in order to work out its type in 233 if (no_formal_ino) {
304 * this case. Note that this doesn't happen often as we normally 234 error = -ESTALE;
305 * know the type beforehand. This code path only occurs during 235 if (GFS2_I(inode)->i_no_formal_ino != *no_formal_ino)
306 * unlinked inode recovery (where it is safe to do this glock, 236 goto fail_iput;
307 * which is not true in the general case).
308 */
309 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, LM_FLAG_TRY,
310 &gh);
311 if (unlikely(error))
312 goto fail_glock;
313 237
314 /* Inode is now uptodate */ 238 error = -EIO;
315 gfs2_glock_dq_uninit(&gh); 239 if (GFS2_I(inode)->i_diskflags & GFS2_DIF_SYSTEM)
316 gfs2_set_iop(inode); 240 goto fail_iput;
317 241
318 /* The iput will cause it to be deleted. */ 242 error = 0;
319 iput(inode); 243 }
320 return;
321 244
322fail_glock:
323 gfs2_glock_dq(&ip->i_iopen_gh);
324fail_iopen:
325 if (io_gl)
326 gfs2_glock_put(io_gl);
327fail_put:
328 ip->i_gl->gl_object = NULL;
329 gfs2_glock_put(ip->i_gl);
330fail: 245fail:
331 iget_failed(inode); 246 gfs2_glock_dq_uninit(&i_gh);
332 return; 247 return error ? ERR_PTR(error) : inode;
248fail_iput:
249 iput(inode);
250 goto fail;
333} 251}
334 252
335static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) 253static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 6720d7d5fbc..d8499fadcc5 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -99,7 +99,9 @@ err:
99extern void gfs2_set_iop(struct inode *inode); 99extern void gfs2_set_iop(struct inode *inode);
100extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, 100extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
101 u64 no_addr, u64 no_formal_ino); 101 u64 no_addr, u64 no_formal_ino);
102extern void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr); 102extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
103 u64 *no_formal_ino,
104 unsigned int blktype);
103extern struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr); 105extern struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr);
104 106
105extern int gfs2_inode_refresh(struct gfs2_inode *ip); 107extern int gfs2_inode_refresh(struct gfs2_inode *ip);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index bef3ab6cf5c..33c8407b876 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -963,17 +963,18 @@ static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al)
963 * The inode, if one has been found, in inode. 963 * The inode, if one has been found, in inode.
964 */ 964 */
965 965
966static u64 try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, 966static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
967 u64 skip)
968{ 967{
969 u32 goal = 0, block; 968 u32 goal = 0, block;
970 u64 no_addr; 969 u64 no_addr;
971 struct gfs2_sbd *sdp = rgd->rd_sbd; 970 struct gfs2_sbd *sdp = rgd->rd_sbd;
972 unsigned int n; 971 unsigned int n;
972 struct gfs2_glock *gl;
973 struct gfs2_inode *ip;
974 int error;
975 int found = 0;
973 976
974 for(;;) { 977 while (goal < rgd->rd_data) {
975 if (goal >= rgd->rd_data)
976 break;
977 down_write(&sdp->sd_log_flush_lock); 978 down_write(&sdp->sd_log_flush_lock);
978 n = 1; 979 n = 1;
979 block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED, 980 block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED,
@@ -990,11 +991,32 @@ static u64 try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked,
990 if (no_addr == skip) 991 if (no_addr == skip)
991 continue; 992 continue;
992 *last_unlinked = no_addr; 993 *last_unlinked = no_addr;
993 return no_addr; 994
995 error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &gl);
996 if (error)
997 continue;
998
999 /* If the inode is already in cache, we can ignore it here
1000 * because the existing inode disposal code will deal with
1001 * it when all refs have gone away. Accessing gl_object like
1002 * this is not safe in general. Here it is ok because we do
1003 * not dereference the pointer, and we only need an approx
1004 * answer to whether it is NULL or not.
1005 */
1006 ip = gl->gl_object;
1007
1008 if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
1009 gfs2_glock_put(gl);
1010 else
1011 found++;
1012
1013 /* Limit reclaim to sensible number of tasks */
1014 if (found > 2*NR_CPUS)
1015 return;
994 } 1016 }
995 1017
996 rgd->rd_flags &= ~GFS2_RDF_CHECK; 1018 rgd->rd_flags &= ~GFS2_RDF_CHECK;
997 return 0; 1019 return;
998} 1020}
999 1021
1000/** 1022/**
@@ -1075,11 +1097,9 @@ static void forward_rgrp_set(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd)
1075 * Try to acquire rgrp in way which avoids contending with others. 1097 * Try to acquire rgrp in way which avoids contending with others.
1076 * 1098 *
1077 * Returns: errno 1099 * Returns: errno
1078 * unlinked: the block address of an unlinked block to be reclaimed
1079 */ 1100 */
1080 1101
1081static int get_local_rgrp(struct gfs2_inode *ip, u64 *unlinked, 1102static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
1082 u64 *last_unlinked)
1083{ 1103{
1084 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1104 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1085 struct gfs2_rgrpd *rgd, *begin = NULL; 1105 struct gfs2_rgrpd *rgd, *begin = NULL;
@@ -1089,7 +1109,6 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *unlinked,
1089 int loops = 0; 1109 int loops = 0;
1090 int error, rg_locked; 1110 int error, rg_locked;
1091 1111
1092 *unlinked = 0;
1093 rgd = gfs2_blk2rgrpd(sdp, ip->i_goal); 1112 rgd = gfs2_blk2rgrpd(sdp, ip->i_goal);
1094 1113
1095 while (rgd) { 1114 while (rgd) {
@@ -1106,17 +1125,10 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *unlinked,
1106 case 0: 1125 case 0:
1107 if (try_rgrp_fit(rgd, al)) 1126 if (try_rgrp_fit(rgd, al))
1108 goto out; 1127 goto out;
1109 /* If the rg came in already locked, there's no 1128 if (rgd->rd_flags & GFS2_RDF_CHECK)
1110 way we can recover from a failed try_rgrp_unlink 1129 try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr);
1111 because that would require an iput which can only
1112 happen after the rgrp is unlocked. */
1113 if (!rg_locked && rgd->rd_flags & GFS2_RDF_CHECK)
1114 *unlinked = try_rgrp_unlink(rgd, last_unlinked,
1115 ip->i_no_addr);
1116 if (!rg_locked) 1130 if (!rg_locked)
1117 gfs2_glock_dq_uninit(&al->al_rgd_gh); 1131 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1118 if (*unlinked)
1119 return -EAGAIN;
1120 /* fall through */ 1132 /* fall through */
1121 case GLR_TRYFAILED: 1133 case GLR_TRYFAILED:
1122 rgd = recent_rgrp_next(rgd); 1134 rgd = recent_rgrp_next(rgd);
@@ -1145,13 +1157,10 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *unlinked,
1145 case 0: 1157 case 0:
1146 if (try_rgrp_fit(rgd, al)) 1158 if (try_rgrp_fit(rgd, al))
1147 goto out; 1159 goto out;
1148 if (!rg_locked && rgd->rd_flags & GFS2_RDF_CHECK) 1160 if (rgd->rd_flags & GFS2_RDF_CHECK)
1149 *unlinked = try_rgrp_unlink(rgd, last_unlinked, 1161 try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr);
1150 ip->i_no_addr);
1151 if (!rg_locked) 1162 if (!rg_locked)
1152 gfs2_glock_dq_uninit(&al->al_rgd_gh); 1163 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1153 if (*unlinked)
1154 return -EAGAIN;
1155 break; 1164 break;
1156 1165
1157 case GLR_TRYFAILED: 1166 case GLR_TRYFAILED:
@@ -1204,12 +1213,12 @@ int gfs2_inplace_reserve_i(struct gfs2_inode *ip, int hold_rindex,
1204 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1213 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1205 struct gfs2_alloc *al = ip->i_alloc; 1214 struct gfs2_alloc *al = ip->i_alloc;
1206 int error = 0; 1215 int error = 0;
1207 u64 last_unlinked = NO_BLOCK, unlinked; 1216 u64 last_unlinked = NO_BLOCK;
1217 int tries = 0;
1208 1218
1209 if (gfs2_assert_warn(sdp, al->al_requested)) 1219 if (gfs2_assert_warn(sdp, al->al_requested))
1210 return -EINVAL; 1220 return -EINVAL;
1211 1221
1212try_again:
1213 if (hold_rindex) { 1222 if (hold_rindex) {
1214 /* We need to hold the rindex unless the inode we're using is 1223 /* We need to hold the rindex unless the inode we're using is
1215 the rindex itself, in which case it's already held. */ 1224 the rindex itself, in which case it's already held. */
@@ -1218,31 +1227,23 @@ try_again:
1218 else if (!sdp->sd_rgrps) /* We may not have the rindex read 1227 else if (!sdp->sd_rgrps) /* We may not have the rindex read
1219 in, so: */ 1228 in, so: */
1220 error = gfs2_ri_update_special(ip); 1229 error = gfs2_ri_update_special(ip);
1230 if (error)
1231 return error;
1221 } 1232 }
1222 1233
1223 if (error) 1234 do {
1224 return error; 1235 error = get_local_rgrp(ip, &last_unlinked);
1236 /* If there is no space, flushing the log may release some */
1237 if (error)
1238 gfs2_log_flush(sdp, NULL);
1239 } while (error && tries++ < 3);
1225 1240
1226 /* Find an rgrp suitable for allocation. If it encounters any unlinked
1227 dinodes along the way, error will equal -EAGAIN and unlinked will
1228 contains it block address. We then need to look up that inode and
1229 try to free it, and try the allocation again. */
1230 error = get_local_rgrp(ip, &unlinked, &last_unlinked);
1231 if (error) { 1241 if (error) {
1232 if (hold_rindex && ip != GFS2_I(sdp->sd_rindex)) 1242 if (hold_rindex && ip != GFS2_I(sdp->sd_rindex))
1233 gfs2_glock_dq_uninit(&al->al_ri_gh); 1243 gfs2_glock_dq_uninit(&al->al_ri_gh);
1234 if (error != -EAGAIN) 1244 return error;
1235 return error;
1236
1237 gfs2_process_unlinked_inode(ip->i_inode.i_sb, unlinked);
1238 /* regardless of whether or not gfs2_process_unlinked_inode
1239 was successful, we don't want to repeat it again. */
1240 last_unlinked = unlinked;
1241 gfs2_log_flush(sdp, NULL);
1242 error = 0;
1243
1244 goto try_again;
1245 } 1245 }
1246
1246 /* no error, so we have the rgrp set in the inode's allocation. */ 1247 /* no error, so we have the rgrp set in the inode's allocation. */
1247 al->al_file = file; 1248 al->al_file = file;
1248 al->al_line = line; 1249 al->al_line = line;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index d6cfac1f0a4..a5fe68189ee 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -932,8 +932,7 @@ struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag,
932 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { 932 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
933 *user = current_user(); 933 *user = current_user();
934 if (user_shm_lock(size, *user)) { 934 if (user_shm_lock(size, *user)) {
935 WARN_ONCE(1, 935 printk_once(KERN_WARNING "Using mlock ulimits for SHM_HUGETLB is deprecated\n");
936 "Using mlock ulimits for SHM_HUGETLB deprecated\n");
937 } else { 936 } else {
938 *user = NULL; 937 *user = NULL;
939 return ERR_PTR(-EPERM); 938 return ERR_PTR(-EPERM);
diff --git a/fs/ioprio.c b/fs/ioprio.c
index 748cfb92dcc..2f7d05c8992 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -111,12 +111,14 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
111 read_lock(&tasklist_lock); 111 read_lock(&tasklist_lock);
112 switch (which) { 112 switch (which) {
113 case IOPRIO_WHO_PROCESS: 113 case IOPRIO_WHO_PROCESS:
114 rcu_read_lock();
114 if (!who) 115 if (!who)
115 p = current; 116 p = current;
116 else 117 else
117 p = find_task_by_vpid(who); 118 p = find_task_by_vpid(who);
118 if (p) 119 if (p)
119 ret = set_task_ioprio(p, ioprio); 120 ret = set_task_ioprio(p, ioprio);
121 rcu_read_unlock();
120 break; 122 break;
121 case IOPRIO_WHO_PGRP: 123 case IOPRIO_WHO_PGRP:
122 if (!who) 124 if (!who)
@@ -139,7 +141,12 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
139 break; 141 break;
140 142
141 do_each_thread(g, p) { 143 do_each_thread(g, p) {
142 if (__task_cred(p)->uid != who) 144 int match;
145
146 rcu_read_lock();
147 match = __task_cred(p)->uid == who;
148 rcu_read_unlock();
149 if (!match)
143 continue; 150 continue;
144 ret = set_task_ioprio(p, ioprio); 151 ret = set_task_ioprio(p, ioprio);
145 if (ret) 152 if (ret)
@@ -200,12 +207,14 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
200 read_lock(&tasklist_lock); 207 read_lock(&tasklist_lock);
201 switch (which) { 208 switch (which) {
202 case IOPRIO_WHO_PROCESS: 209 case IOPRIO_WHO_PROCESS:
210 rcu_read_lock();
203 if (!who) 211 if (!who)
204 p = current; 212 p = current;
205 else 213 else
206 p = find_task_by_vpid(who); 214 p = find_task_by_vpid(who);
207 if (p) 215 if (p)
208 ret = get_task_ioprio(p); 216 ret = get_task_ioprio(p);
217 rcu_read_unlock();
209 break; 218 break;
210 case IOPRIO_WHO_PGRP: 219 case IOPRIO_WHO_PGRP:
211 if (!who) 220 if (!who)
@@ -232,7 +241,12 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
232 break; 241 break;
233 242
234 do_each_thread(g, p) { 243 do_each_thread(g, p) {
235 if (__task_cred(p)->uid != user->uid) 244 int match;
245
246 rcu_read_lock();
247 match = __task_cred(p)->uid == user->uid;
248 rcu_read_unlock();
249 if (!match)
236 continue; 250 continue;
237 tmpio = get_task_ioprio(p); 251 tmpio = get_task_ioprio(p);
238 if (tmpio < 0) 252 if (tmpio < 0)
diff --git a/fs/locks.c b/fs/locks.c
index 65765cb6afe..0e62dd35d08 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1504,9 +1504,8 @@ static int do_fcntl_delete_lease(struct file *filp)
1504 1504
1505static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) 1505static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1506{ 1506{
1507 struct file_lock *fl; 1507 struct file_lock *fl, *ret;
1508 struct fasync_struct *new; 1508 struct fasync_struct *new;
1509 struct inode *inode = filp->f_path.dentry->d_inode;
1510 int error; 1509 int error;
1511 1510
1512 fl = lease_alloc(filp, arg); 1511 fl = lease_alloc(filp, arg);
@@ -1518,13 +1517,16 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1518 locks_free_lock(fl); 1517 locks_free_lock(fl);
1519 return -ENOMEM; 1518 return -ENOMEM;
1520 } 1519 }
1520 ret = fl;
1521 lock_flocks(); 1521 lock_flocks();
1522 error = __vfs_setlease(filp, arg, &fl); 1522 error = __vfs_setlease(filp, arg, &ret);
1523 if (error) { 1523 if (error) {
1524 unlock_flocks(); 1524 unlock_flocks();
1525 locks_free_lock(fl); 1525 locks_free_lock(fl);
1526 goto out_free_fasync; 1526 goto out_free_fasync;
1527 } 1527 }
1528 if (ret != fl)
1529 locks_free_lock(fl);
1528 1530
1529 /* 1531 /*
1530 * fasync_insert_entry() returns the old entry if any. 1532 * fasync_insert_entry() returns the old entry if any.
@@ -1532,17 +1534,10 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1532 * inserted it into the fasync list. Clear new so that 1534 * inserted it into the fasync list. Clear new so that
1533 * we don't release it here. 1535 * we don't release it here.
1534 */ 1536 */
1535 if (!fasync_insert_entry(fd, filp, &fl->fl_fasync, new)) 1537 if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
1536 new = NULL; 1538 new = NULL;
1537 1539
1538 if (error < 0) { 1540 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1539 /* remove lease just inserted by setlease */
1540 fl->fl_type = F_UNLCK | F_INPROGRESS;
1541 fl->fl_break_time = jiffies - 10;
1542 time_out_leases(inode);
1543 } else {
1544 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1545 }
1546 unlock_flocks(); 1541 unlock_flocks();
1547 1542
1548out_free_fasync: 1543out_free_fasync:
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h
index cd51a36b37f..57afd4a6fab 100644
--- a/fs/logfs/logfs.h
+++ b/fs/logfs/logfs.h
@@ -486,7 +486,7 @@ static inline int logfs_get_sb_bdev(struct logfs_super *s,
486 486
487/* dev_mtd.c */ 487/* dev_mtd.c */
488#ifdef CONFIG_MTD 488#ifdef CONFIG_MTD
489int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr) 489int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr);
490#else 490#else
491static inline int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr) 491static inline int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr)
492{ 492{
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index f1e5ec6b510..ad2bfa68d53 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -673,16 +673,17 @@ static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
673 spin_unlock(&clp->cl_lock); 673 spin_unlock(&clp->cl_lock);
674} 674}
675 675
676static void nfsd4_register_conn(struct nfsd4_conn *conn) 676static int nfsd4_register_conn(struct nfsd4_conn *conn)
677{ 677{
678 conn->cn_xpt_user.callback = nfsd4_conn_lost; 678 conn->cn_xpt_user.callback = nfsd4_conn_lost;
679 register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); 679 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
680} 680}
681 681
682static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses) 682static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses)
683{ 683{
684 struct nfsd4_conn *conn; 684 struct nfsd4_conn *conn;
685 u32 flags = NFS4_CDFC4_FORE; 685 u32 flags = NFS4_CDFC4_FORE;
686 int ret;
686 687
687 if (ses->se_flags & SESSION4_BACK_CHAN) 688 if (ses->se_flags & SESSION4_BACK_CHAN)
688 flags |= NFS4_CDFC4_BACK; 689 flags |= NFS4_CDFC4_BACK;
@@ -690,7 +691,10 @@ static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses)
690 if (!conn) 691 if (!conn)
691 return nfserr_jukebox; 692 return nfserr_jukebox;
692 nfsd4_hash_conn(conn, ses); 693 nfsd4_hash_conn(conn, ses);
693 nfsd4_register_conn(conn); 694 ret = nfsd4_register_conn(conn);
695 if (ret)
696 /* oops; xprt is already down: */
697 nfsd4_conn_lost(&conn->cn_xpt_user);
694 return nfs_ok; 698 return nfs_ok;
695} 699}
696 700
@@ -1644,6 +1648,7 @@ static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_sessi
1644{ 1648{
1645 struct nfs4_client *clp = ses->se_client; 1649 struct nfs4_client *clp = ses->se_client;
1646 struct nfsd4_conn *c; 1650 struct nfsd4_conn *c;
1651 int ret;
1647 1652
1648 spin_lock(&clp->cl_lock); 1653 spin_lock(&clp->cl_lock);
1649 c = __nfsd4_find_conn(new->cn_xprt, ses); 1654 c = __nfsd4_find_conn(new->cn_xprt, ses);
@@ -1654,7 +1659,10 @@ static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_sessi
1654 } 1659 }
1655 __nfsd4_hash_conn(new, ses); 1660 __nfsd4_hash_conn(new, ses);
1656 spin_unlock(&clp->cl_lock); 1661 spin_unlock(&clp->cl_lock);
1657 nfsd4_register_conn(new); 1662 ret = nfsd4_register_conn(new);
1663 if (ret)
1664 /* oops; xprt is already down: */
1665 nfsd4_conn_lost(&new->cn_xpt_user);
1658 return; 1666 return;
1659} 1667}
1660 1668
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index d8408217e3b..1efea361558 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -159,7 +159,9 @@ struct ocfs2_lock_res {
159 char l_name[OCFS2_LOCK_ID_MAX_LEN]; 159 char l_name[OCFS2_LOCK_ID_MAX_LEN];
160 unsigned int l_ro_holders; 160 unsigned int l_ro_holders;
161 unsigned int l_ex_holders; 161 unsigned int l_ex_holders;
162 unsigned char l_level; 162 char l_level;
163 char l_requested;
164 char l_blocking;
163 165
164 /* Data packed - type enum ocfs2_lock_type */ 166 /* Data packed - type enum ocfs2_lock_type */
165 unsigned char l_type; 167 unsigned char l_type;
@@ -169,8 +171,6 @@ struct ocfs2_lock_res {
169 unsigned char l_action; 171 unsigned char l_action;
170 /* Data packed - enum type ocfs2_unlock_action */ 172 /* Data packed - enum type ocfs2_unlock_action */
171 unsigned char l_unlock_action; 173 unsigned char l_unlock_action;
172 unsigned char l_requested;
173 unsigned char l_blocking;
174 unsigned int l_pending_gen; 174 unsigned int l_pending_gen;
175 175
176 spinlock_t l_lock; 176 spinlock_t l_lock;
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index ddb1f41376e..911e61f348f 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -418,7 +418,7 @@ out_no_root:
418static struct dentry *openprom_mount(struct file_system_type *fs_type, 418static struct dentry *openprom_mount(struct file_system_type *fs_type,
419 int flags, const char *dev_name, void *data) 419 int flags, const char *dev_name, void *data)
420{ 420{
421 return mount_single(fs_type, flags, data, openprom_fill_super) 421 return mount_single(fs_type, flags, data, openprom_fill_super);
422} 422}
423 423
424static struct file_system_type openprom_fs_type = { 424static struct file_system_type openprom_fs_type = {
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index c9af48fffcd..7d287afccde 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1111,11 +1111,12 @@ xfs_vm_writepage(
1111 uptodate = 0; 1111 uptodate = 0;
1112 1112
1113 /* 1113 /*
1114 * A hole may still be marked uptodate because discard_buffer 1114 * set_page_dirty dirties all buffers in a page, independent
1115 * leaves the flag set. 1115 * of their state. The dirty state however is entirely
1116 * meaningless for holes (!mapped && uptodate), so skip
1117 * buffers covering holes here.
1116 */ 1118 */
1117 if (!buffer_mapped(bh) && buffer_uptodate(bh)) { 1119 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
1118 ASSERT(!buffer_dirty(bh));
1119 imap_valid = 0; 1120 imap_valid = 0;
1120 continue; 1121 continue;
1121 } 1122 }
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 63fd2c07cb5..aa1d353def2 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1781,7 +1781,6 @@ xfs_buf_delwri_split(
1781 INIT_LIST_HEAD(list); 1781 INIT_LIST_HEAD(list);
1782 spin_lock(dwlk); 1782 spin_lock(dwlk);
1783 list_for_each_entry_safe(bp, n, dwq, b_list) { 1783 list_for_each_entry_safe(bp, n, dwq, b_list) {
1784 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1785 ASSERT(bp->b_flags & XBF_DELWRI); 1784 ASSERT(bp->b_flags & XBF_DELWRI);
1786 1785
1787 if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) { 1786 if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) {
@@ -1795,6 +1794,7 @@ xfs_buf_delwri_split(
1795 _XBF_RUN_QUEUES); 1794 _XBF_RUN_QUEUES);
1796 bp->b_flags |= XBF_WRITE; 1795 bp->b_flags |= XBF_WRITE;
1797 list_move_tail(&bp->b_list, list); 1796 list_move_tail(&bp->b_list, list);
1797 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1798 } else 1798 } else
1799 skipped++; 1799 skipped++;
1800 } 1800 }
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 2ea238f6d38..ad442d9e392 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -416,7 +416,7 @@ xfs_attrlist_by_handle(
416 if (IS_ERR(dentry)) 416 if (IS_ERR(dentry))
417 return PTR_ERR(dentry); 417 return PTR_ERR(dentry);
418 418
419 kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL); 419 kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
420 if (!kbuf) 420 if (!kbuf)
421 goto out_dput; 421 goto out_dput;
422 422
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 96107efc0c6..94d5fd6a297 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -762,7 +762,8 @@ xfs_setup_inode(
762 inode->i_state = I_NEW; 762 inode->i_state = I_NEW;
763 763
764 inode_sb_list_add(inode); 764 inode_sb_list_add(inode);
765 insert_inode_hash(inode); 765 /* make the inode look hashed for the writeback code */
766 hlist_add_fake(&inode->i_hash);
766 767
767 inode->i_mode = ip->i_d.di_mode; 768 inode->i_mode = ip->i_d.di_mode;
768 inode->i_nlink = ip->i_d.di_nlink; 769 inode->i_nlink = ip->i_d.di_nlink;
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 9f3a78fe6ae..064f964d4f3 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -353,9 +353,6 @@ xfs_parseargs(
353 mp->m_qflags &= ~XFS_OQUOTA_ENFD; 353 mp->m_qflags &= ~XFS_OQUOTA_ENFD;
354 } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) { 354 } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
355 mp->m_flags |= XFS_MOUNT_DELAYLOG; 355 mp->m_flags |= XFS_MOUNT_DELAYLOG;
356 cmn_err(CE_WARN,
357 "Enabling EXPERIMENTAL delayed logging feature "
358 "- use at your own risk.\n");
359 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { 356 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
360 mp->m_flags &= ~XFS_MOUNT_DELAYLOG; 357 mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
361 } else if (!strcmp(this_char, "ihashsize")) { 358 } else if (!strcmp(this_char, "ihashsize")) {
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 37d33254981..afb0d7cfad1 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -853,6 +853,7 @@ restart:
853 if (trylock) { 853 if (trylock) {
854 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { 854 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
855 skipped++; 855 skipped++;
856 xfs_perag_put(pag);
856 continue; 857 continue;
857 } 858 }
858 first_index = pag->pag_ici_reclaim_cursor; 859 first_index = pag->pag_ici_reclaim_cursor;
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index 9b715dce569..9124425b7f2 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -744,9 +744,15 @@ xfs_filestream_new_ag(
744 * If the file's parent directory is known, take its iolock in exclusive 744 * If the file's parent directory is known, take its iolock in exclusive
745 * mode to prevent two sibling files from racing each other to migrate 745 * mode to prevent two sibling files from racing each other to migrate
746 * themselves and their parent to different AGs. 746 * themselves and their parent to different AGs.
747 *
748 * Note that we lock the parent directory iolock inside the child
749 * iolock here. That's fine as we never hold both parent and child
750 * iolock in any other place. This is different from the ilock,
751 * which requires locking of the child after the parent for namespace
752 * operations.
747 */ 753 */
748 if (pip) 754 if (pip)
749 xfs_ilock(pip, XFS_IOLOCK_EXCL); 755 xfs_ilock(pip, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
750 756
751 /* 757 /*
752 * A new AG needs to be found for the file. If the file's parent 758 * A new AG needs to be found for the file. If the file's parent
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index b1498ab5a39..19e9dfa1c25 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -275,6 +275,7 @@ xfs_free_perag(
275 pag = radix_tree_delete(&mp->m_perag_tree, agno); 275 pag = radix_tree_delete(&mp->m_perag_tree, agno);
276 spin_unlock(&mp->m_perag_lock); 276 spin_unlock(&mp->m_perag_lock);
277 ASSERT(pag); 277 ASSERT(pag);
278 ASSERT(atomic_read(&pag->pag_ref) == 0);
278 call_rcu(&pag->rcu_head, __xfs_free_perag); 279 call_rcu(&pag->rcu_head, __xfs_free_perag);
279 } 280 }
280} 281}
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index e0e64b113bd..9bb6eda4cd2 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -346,8 +346,17 @@ xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid,
346#define xfs_trans_mod_dquot_byino(tp, ip, fields, delta) 346#define xfs_trans_mod_dquot_byino(tp, ip, fields, delta)
347#define xfs_trans_apply_dquot_deltas(tp) 347#define xfs_trans_apply_dquot_deltas(tp)
348#define xfs_trans_unreserve_and_mod_dquots(tp) 348#define xfs_trans_unreserve_and_mod_dquots(tp)
349#define xfs_trans_reserve_quota_nblks(tp, ip, nblks, ninos, flags) (0) 349static inline int xfs_trans_reserve_quota_nblks(struct xfs_trans *tp,
350#define xfs_trans_reserve_quota_bydquots(tp, mp, u, g, nb, ni, fl) (0) 350 struct xfs_inode *ip, long nblks, long ninos, uint flags)
351{
352 return 0;
353}
354static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp,
355 struct xfs_mount *mp, struct xfs_dquot *udqp,
356 struct xfs_dquot *gdqp, long nblks, long nions, uint flags)
357{
358 return 0;
359}
351#define xfs_qm_vop_create_dqattach(tp, ip, u, g) 360#define xfs_qm_vop_create_dqattach(tp, ip, u, g)
352#define xfs_qm_vop_rename_dqattach(it) (0) 361#define xfs_qm_vop_rename_dqattach(it) (0)
353#define xfs_qm_vop_chown(tp, ip, old, new) (NULL) 362#define xfs_qm_vop_chown(tp, ip, old, new) (NULL)
@@ -357,11 +366,14 @@ xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid,
357#define xfs_qm_dqdetach(ip) 366#define xfs_qm_dqdetach(ip)
358#define xfs_qm_dqrele(d) 367#define xfs_qm_dqrele(d)
359#define xfs_qm_statvfs(ip, s) 368#define xfs_qm_statvfs(ip, s)
360#define xfs_qm_sync(mp, fl) (0) 369static inline int xfs_qm_sync(struct xfs_mount *mp, int flags)
370{
371 return 0;
372}
361#define xfs_qm_newmount(mp, a, b) (0) 373#define xfs_qm_newmount(mp, a, b) (0)
362#define xfs_qm_mount_quotas(mp) 374#define xfs_qm_mount_quotas(mp)
363#define xfs_qm_unmount(mp) 375#define xfs_qm_unmount(mp)
364#define xfs_qm_unmount_quotas(mp) (0) 376#define xfs_qm_unmount_quotas(mp)
365#endif /* CONFIG_XFS_QUOTA */ 377#endif /* CONFIG_XFS_QUOTA */
366 378
367#define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \ 379#define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \
diff --git a/include/asm-generic/stat.h b/include/asm-generic/stat.h
index 47e64170305..bd8cad21998 100644
--- a/include/asm-generic/stat.h
+++ b/include/asm-generic/stat.h
@@ -33,18 +33,18 @@ struct stat {
33 int st_blksize; /* Optimal block size for I/O. */ 33 int st_blksize; /* Optimal block size for I/O. */
34 int __pad2; 34 int __pad2;
35 long st_blocks; /* Number 512-byte blocks allocated. */ 35 long st_blocks; /* Number 512-byte blocks allocated. */
36 int st_atime; /* Time of last access. */ 36 long st_atime; /* Time of last access. */
37 unsigned int st_atime_nsec; 37 unsigned long st_atime_nsec;
38 int st_mtime; /* Time of last modification. */ 38 long st_mtime; /* Time of last modification. */
39 unsigned int st_mtime_nsec; 39 unsigned long st_mtime_nsec;
40 int st_ctime; /* Time of last status change. */ 40 long st_ctime; /* Time of last status change. */
41 unsigned int st_ctime_nsec; 41 unsigned long st_ctime_nsec;
42 unsigned int __unused4; 42 unsigned int __unused4;
43 unsigned int __unused5; 43 unsigned int __unused5;
44}; 44};
45 45
46#if __BITS_PER_LONG != 64
47/* This matches struct stat64 in glibc2.1. Only used for 32 bit. */ 46/* This matches struct stat64 in glibc2.1. Only used for 32 bit. */
47#if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64)
48struct stat64 { 48struct stat64 {
49 unsigned long long st_dev; /* Device. */ 49 unsigned long long st_dev; /* Device. */
50 unsigned long long st_ino; /* File serial number. */ 50 unsigned long long st_ino; /* File serial number. */
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 5afa5b52063..beafc156a53 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -432,6 +432,10 @@ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
432 * together with the @destroy function, 432 * together with the @destroy function,
433 * enables driver-specific objects derived from a ttm_buffer_object. 433 * enables driver-specific objects derived from a ttm_buffer_object.
434 * On successful return, the object kref and list_kref are set to 1. 434 * On successful return, the object kref and list_kref are set to 1.
435 * If a failure occurs, the function will call the @destroy function, or
436 * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
437 * illegal and will likely cause memory corruption.
438 *
435 * Returns 439 * Returns
436 * -ENOMEM: Out of memory. 440 * -ENOMEM: Out of memory.
437 * -EINVAL: Invalid placement flags. 441 * -EINVAL: Invalid placement flags.
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index d01b4ddbdc5..8e0c848326b 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -206,14 +206,84 @@ struct ttm_tt {
206struct ttm_mem_type_manager; 206struct ttm_mem_type_manager;
207 207
208struct ttm_mem_type_manager_func { 208struct ttm_mem_type_manager_func {
209 /**
210 * struct ttm_mem_type_manager member init
211 *
212 * @man: Pointer to a memory type manager.
213 * @p_size: Implementation dependent, but typically the size of the
214 * range to be managed in pages.
215 *
216 * Called to initialize a private range manager. The function is
217 * expected to initialize the man::priv member.
218 * Returns 0 on success, negative error code on failure.
219 */
209 int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size); 220 int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
221
222 /**
223 * struct ttm_mem_type_manager member takedown
224 *
225 * @man: Pointer to a memory type manager.
226 *
227 * Called to undo the setup done in init. All allocated resources
228 * should be freed.
229 */
210 int (*takedown)(struct ttm_mem_type_manager *man); 230 int (*takedown)(struct ttm_mem_type_manager *man);
231
232 /**
233 * struct ttm_mem_type_manager member get_node
234 *
235 * @man: Pointer to a memory type manager.
236 * @bo: Pointer to the buffer object we're allocating space for.
237 * @placement: Placement details.
238 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
239 *
240 * This function should allocate space in the memory type managed
241 * by @man. Placement details if
242 * applicable are given by @placement. If successful,
243 * @mem::mm_node should be set to a non-null value, and
244 * @mem::start should be set to a value identifying the beginning
245 * of the range allocated, and the function should return zero.
246 * If the memory region accomodate the buffer object, @mem::mm_node
247 * should be set to NULL, and the function should return 0.
248 * If a system error occured, preventing the request to be fulfilled,
249 * the function should return a negative error code.
250 *
251 * Note that @mem::mm_node will only be dereferenced by
252 * struct ttm_mem_type_manager functions and optionally by the driver,
253 * which has knowledge of the underlying type.
254 *
255 * This function may not be called from within atomic context, so
256 * an implementation can and must use either a mutex or a spinlock to
257 * protect any data structures managing the space.
258 */
211 int (*get_node)(struct ttm_mem_type_manager *man, 259 int (*get_node)(struct ttm_mem_type_manager *man,
212 struct ttm_buffer_object *bo, 260 struct ttm_buffer_object *bo,
213 struct ttm_placement *placement, 261 struct ttm_placement *placement,
214 struct ttm_mem_reg *mem); 262 struct ttm_mem_reg *mem);
263
264 /**
265 * struct ttm_mem_type_manager member put_node
266 *
267 * @man: Pointer to a memory type manager.
268 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
269 *
270 * This function frees memory type resources previously allocated
271 * and that are identified by @mem::mm_node and @mem::start. May not
272 * be called from within atomic context.
273 */
215 void (*put_node)(struct ttm_mem_type_manager *man, 274 void (*put_node)(struct ttm_mem_type_manager *man,
216 struct ttm_mem_reg *mem); 275 struct ttm_mem_reg *mem);
276
277 /**
278 * struct ttm_mem_type_manager member debug
279 *
280 * @man: Pointer to a memory type manager.
281 * @prefix: Prefix to be used in printout to identify the caller.
282 *
283 * This function is called to print out the state of the memory
284 * type manager to aid debugging of out-of-memory conditions.
285 * It may not be called from within atomic context.
286 */
217 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); 287 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
218}; 288};
219 289
@@ -231,14 +301,13 @@ struct ttm_mem_type_manager {
231 uint64_t size; 301 uint64_t size;
232 uint32_t available_caching; 302 uint32_t available_caching;
233 uint32_t default_caching; 303 uint32_t default_caching;
304 const struct ttm_mem_type_manager_func *func;
305 void *priv;
234 306
235 /* 307 /*
236 * Protected by the bdev->lru_lock. 308 * Protected by the global->lru_lock.
237 * TODO: Consider one lru_lock per ttm_mem_type_manager.
238 * Plays ill with list removal, though.
239 */ 309 */
240 const struct ttm_mem_type_manager_func *func; 310
241 void *priv;
242 struct list_head lru; 311 struct list_head lru;
243}; 312};
244 313
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
new file mode 100644
index 00000000000..96c038e43d6
--- /dev/null
+++ b/include/linux/atomic.h
@@ -0,0 +1,37 @@
1#ifndef _LINUX_ATOMIC_H
2#define _LINUX_ATOMIC_H
3#include <asm/atomic.h>
4
5/**
6 * atomic_inc_not_zero_hint - increment if not null
7 * @v: pointer of type atomic_t
8 * @hint: probable value of the atomic before the increment
9 *
10 * This version of atomic_inc_not_zero() gives a hint of probable
11 * value of the atomic. This helps processor to not read the memory
12 * before doing the atomic read/modify/write cycle, lowering
13 * number of bus transactions on some arches.
14 *
15 * Returns: 0 if increment was not done, 1 otherwise.
16 */
17#ifndef atomic_inc_not_zero_hint
18static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
19{
20 int val, c = hint;
21
22 /* sanity test, should be removed by compiler if hint is a constant */
23 if (!hint)
24 return atomic_inc_not_zero(v);
25
26 do {
27 val = atomic_cmpxchg(v, c, c + 1);
28 if (val == c)
29 return 1;
30 c = val;
31 } while (c);
32
33 return 0;
34}
35#endif
36
37#endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index ba679992d39..35dcdb3589b 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -66,10 +66,6 @@
66#define bio_offset(bio) bio_iovec((bio))->bv_offset 66#define bio_offset(bio) bio_iovec((bio))->bv_offset
67#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) 67#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
68#define bio_sectors(bio) ((bio)->bi_size >> 9) 68#define bio_sectors(bio) ((bio)->bi_size >> 9)
69#define bio_empty_barrier(bio) \
70 ((bio->bi_rw & REQ_HARDBARRIER) && \
71 !bio_has_data(bio) && \
72 !(bio->bi_rw & REQ_DISCARD))
73 69
74static inline unsigned int bio_cur_bytes(struct bio *bio) 70static inline unsigned int bio_cur_bytes(struct bio *bio)
75{ 71{
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 0437ab6bb54..46ad5197537 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -122,7 +122,6 @@ enum rq_flag_bits {
122 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ 122 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
123 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ 123 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
124 124
125 __REQ_HARDBARRIER, /* may not be passed by drive either */
126 __REQ_SYNC, /* request is sync (sync write or read) */ 125 __REQ_SYNC, /* request is sync (sync write or read) */
127 __REQ_META, /* metadata io request */ 126 __REQ_META, /* metadata io request */
128 __REQ_DISCARD, /* request to discard sectors */ 127 __REQ_DISCARD, /* request to discard sectors */
@@ -159,7 +158,6 @@ enum rq_flag_bits {
159#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) 158#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
160#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) 159#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
161#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) 160#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
162#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
163#define REQ_SYNC (1 << __REQ_SYNC) 161#define REQ_SYNC (1 << __REQ_SYNC)
164#define REQ_META (1 << __REQ_META) 162#define REQ_META (1 << __REQ_META)
165#define REQ_DISCARD (1 << __REQ_DISCARD) 163#define REQ_DISCARD (1 << __REQ_DISCARD)
@@ -168,8 +166,8 @@ enum rq_flag_bits {
168#define REQ_FAILFAST_MASK \ 166#define REQ_FAILFAST_MASK \
169 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 167 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
170#define REQ_COMMON_MASK \ 168#define REQ_COMMON_MASK \
171 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \ 169 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \
172 REQ_META | REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) 170 REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
173#define REQ_CLONE_MASK REQ_COMMON_MASK 171#define REQ_CLONE_MASK REQ_COMMON_MASK
174 172
175#define REQ_UNPLUG (1 << __REQ_UNPLUG) 173#define REQ_UNPLUG (1 << __REQ_UNPLUG)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5027a599077..aae86fd10c4 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -552,8 +552,7 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync)
552 * it already be started by driver. 552 * it already be started by driver.
553 */ 553 */
554#define RQ_NOMERGE_FLAGS \ 554#define RQ_NOMERGE_FLAGS \
555 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER | \ 555 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
556 REQ_FLUSH | REQ_FUA)
557#define rq_mergeable(rq) \ 556#define rq_mergeable(rq) \
558 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 557 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
559 (((rq)->cmd_flags & REQ_DISCARD) || \ 558 (((rq)->cmd_flags & REQ_DISCARD) || \
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 9b2a0158f39..ef44c7a0638 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -53,7 +53,7 @@
53 53
54 54
55extern const char *drbd_buildtag(void); 55extern const char *drbd_buildtag(void);
56#define REL_VERSION "8.3.9rc2" 56#define REL_VERSION "8.3.9"
57#define API_VERSION 88 57#define API_VERSION 88
58#define PRO_VERSION_MIN 86 58#define PRO_VERSION_MIN 86
59#define PRO_VERSION_MAX 95 59#define PRO_VERSION_MAX 95
diff --git a/include/linux/fsl-diu-fb.h b/include/linux/fsl-diu-fb.h
index fc295d7ea46..781d4671415 100644
--- a/include/linux/fsl-diu-fb.h
+++ b/include/linux/fsl-diu-fb.h
@@ -54,7 +54,6 @@ struct aoi_display_offset {
54}; 54};
55 55
56#define MFB_SET_CHROMA_KEY _IOW('M', 1, struct mfb_chroma_key) 56#define MFB_SET_CHROMA_KEY _IOW('M', 1, struct mfb_chroma_key)
57#define MFB_WAIT_FOR_VSYNC _IOW('F', 0x20, u_int32_t)
58#define MFB_SET_BRIGHTNESS _IOW('M', 3, __u8) 57#define MFB_SET_BRIGHTNESS _IOW('M', 3, __u8)
59 58
60#define MFB_SET_ALPHA 0x80014d00 59#define MFB_SET_ALPHA 0x80014d00
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 8a389b608ce..41cb31f14ee 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -96,11 +96,15 @@
96 */ 96 */
97#define in_nmi() (preempt_count() & NMI_MASK) 97#define in_nmi() (preempt_count() & NMI_MASK)
98 98
99#if defined(CONFIG_PREEMPT) 99#if defined(CONFIG_PREEMPT) && defined(CONFIG_BKL)
100# define PREEMPT_INATOMIC_BASE kernel_locked() 100# define PREEMPT_INATOMIC_BASE kernel_locked()
101# define PREEMPT_CHECK_OFFSET 1
102#else 101#else
103# define PREEMPT_INATOMIC_BASE 0 102# define PREEMPT_INATOMIC_BASE 0
103#endif
104
105#if defined(CONFIG_PREEMPT)
106# define PREEMPT_CHECK_OFFSET 1
107#else
104# define PREEMPT_CHECK_OFFSET 0 108# define PREEMPT_CHECK_OFFSET 0
105#endif 109#endif
106 110
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index e9138198e82..b676c585574 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -5,6 +5,7 @@
5#include <linux/kernel.h> 5#include <linux/kernel.h>
6#include <linux/mm.h> 6#include <linux/mm.h>
7#include <linux/uaccess.h> 7#include <linux/uaccess.h>
8#include <linux/hardirq.h>
8 9
9#include <asm/cacheflush.h> 10#include <asm/cacheflush.h>
10 11
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index e844a0b1869..4bef5c55716 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -32,28 +32,6 @@
32 */ 32 */
33 33
34/* --- Bit algorithm adapters */ 34/* --- Bit algorithm adapters */
35#define I2C_HW_B_BT848 0x010005 /* BT848 video boards */
36#define I2C_HW_B_RIVA 0x010010 /* Riva based graphics cards */
37#define I2C_HW_B_ZR36067 0x010019 /* Zoran-36057/36067 based boards */
38#define I2C_HW_B_CX2388x 0x01001b /* connexant 2388x based tv cards */ 35#define I2C_HW_B_CX2388x 0x01001b /* connexant 2388x based tv cards */
39#define I2C_HW_B_EM28XX 0x01001f /* em28xx video capture cards */
40#define I2C_HW_B_CX2341X 0x010020 /* Conexant CX2341X MPEG encoder cards */
41#define I2C_HW_B_CX23885 0x010022 /* conexant 23885 based tv cards (bus1) */
42#define I2C_HW_B_AU0828 0x010023 /* auvitek au0828 usb bridge */
43#define I2C_HW_B_CX231XX 0x010024 /* Conexant CX231XX USB based cards */
44#define I2C_HW_B_HDPVR 0x010025 /* Hauppauge HD PVR */
45
46/* --- SGI adapters */
47#define I2C_HW_SGI_VINO 0x160000
48
49/* --- SMBus only adapters */
50#define I2C_HW_SMBUS_W9968CF 0x04000d
51#define I2C_HW_SMBUS_OV511 0x04000e /* OV511(+) USB 1.1 webcam ICs */
52#define I2C_HW_SMBUS_OV518 0x04000f /* OV518(+) USB 1.1 webcam ICs */
53#define I2C_HW_SMBUS_CAFE 0x040012 /* Marvell 88ALP01 "CAFE" cam */
54
55/* --- Miscellaneous adapters */
56#define I2C_HW_SAA7146 0x060000 /* SAA7146 video decoder bus */
57#define I2C_HW_SAA7134 0x090000 /* SAA7134 video decoder bus */
58 36
59#endif /* LINUX_I2C_ID_H */ 37#endif /* LINUX_I2C_ID_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 889b35abaed..56cfe23ffb3 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -353,7 +353,7 @@ struct i2c_algorithm {
353 */ 353 */
354struct i2c_adapter { 354struct i2c_adapter {
355 struct module *owner; 355 struct module *owner;
356 unsigned int id; 356 unsigned int id __deprecated;
357 unsigned int class; /* classes to allow probing for */ 357 unsigned int class; /* classes to allow probing for */
358 const struct i2c_algorithm *algo; /* the algorithm to access the bus */ 358 const struct i2c_algorithm *algo; /* the algorithm to access the bus */
359 void *algo_data; 359 void *algo_data;
diff --git a/include/linux/i2c/adp5588.h b/include/linux/i2c/adp5588.h
index 3c5d6b6e765..cec17cf6cac 100644
--- a/include/linux/i2c/adp5588.h
+++ b/include/linux/i2c/adp5588.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller 2 * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller
3 * 3 *
4 * Copyright 2009 Analog Devices Inc. 4 * Copyright 2009-2010 Analog Devices Inc.
5 * 5 *
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
@@ -77,13 +77,26 @@
77 /* Configuration Register1 */ 77 /* Configuration Register1 */
78#define ADP5588_AUTO_INC (1 << 7) 78#define ADP5588_AUTO_INC (1 << 7)
79#define ADP5588_GPIEM_CFG (1 << 6) 79#define ADP5588_GPIEM_CFG (1 << 6)
80#define ADP5588_OVR_FLOW_M (1 << 5)
80#define ADP5588_INT_CFG (1 << 4) 81#define ADP5588_INT_CFG (1 << 4)
82#define ADP5588_OVR_FLOW_IEN (1 << 3)
83#define ADP5588_K_LCK_IM (1 << 2)
81#define ADP5588_GPI_IEN (1 << 1) 84#define ADP5588_GPI_IEN (1 << 1)
85#define ADP5588_KE_IEN (1 << 0)
82 86
83/* Interrupt Status Register */ 87/* Interrupt Status Register */
88#define ADP5588_CMP2_INT (1 << 5)
89#define ADP5588_CMP1_INT (1 << 4)
90#define ADP5588_OVR_FLOW_INT (1 << 3)
91#define ADP5588_K_LCK_INT (1 << 2)
84#define ADP5588_GPI_INT (1 << 1) 92#define ADP5588_GPI_INT (1 << 1)
85#define ADP5588_KE_INT (1 << 0) 93#define ADP5588_KE_INT (1 << 0)
86 94
95/* Key Lock and Event Counter Register */
96#define ADP5588_K_LCK_EN (1 << 6)
97#define ADP5588_LCK21 0x30
98#define ADP5588_KEC 0xF
99
87#define ADP5588_MAXGPIO 18 100#define ADP5588_MAXGPIO 18
88#define ADP5588_BANK(offs) ((offs) >> 3) 101#define ADP5588_BANK(offs) ((offs) >> 3)
89#define ADP5588_BIT(offs) (1u << ((offs) & 0x7)) 102#define ADP5588_BIT(offs) (1u << ((offs) & 0x7))
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index c2f3a72712c..635e1faec41 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -339,6 +339,31 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
339 } 339 }
340} 340}
341 341
342/**
343 * vlan_get_protocol - get protocol EtherType.
344 * @skb: skbuff to query
345 *
346 * Returns the EtherType of the packet, regardless of whether it is
347 * vlan encapsulated (normal or hardware accelerated) or not.
348 */
349static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
350{
351 __be16 protocol = 0;
352
353 if (vlan_tx_tag_present(skb) ||
354 skb->protocol != cpu_to_be16(ETH_P_8021Q))
355 protocol = skb->protocol;
356 else {
357 __be16 proto, *protop;
358 protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr,
359 h_vlan_encapsulated_proto),
360 sizeof(proto), &proto);
361 if (likely(protop))
362 protocol = *protop;
363 }
364
365 return protocol;
366}
342#endif /* __KERNEL__ */ 367#endif /* __KERNEL__ */
343 368
344/* VLAN IOCTLs are found in sockios.h */ 369/* VLAN IOCTLs are found in sockios.h */
diff --git a/include/linux/input.h b/include/linux/input.h
index 51af441f3a2..6ef44465db8 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -1406,6 +1406,8 @@ static inline void input_set_drvdata(struct input_dev *dev, void *data)
1406int __must_check input_register_device(struct input_dev *); 1406int __must_check input_register_device(struct input_dev *);
1407void input_unregister_device(struct input_dev *); 1407void input_unregister_device(struct input_dev *);
1408 1408
1409void input_reset_device(struct input_dev *);
1410
1409int __must_check input_register_handler(struct input_handler *); 1411int __must_check input_register_handler(struct input_handler *);
1410void input_unregister_handler(struct input_handler *); 1412void input_unregister_handler(struct input_handler *);
1411 1413
@@ -1421,7 +1423,7 @@ void input_release_device(struct input_handle *);
1421int input_open_device(struct input_handle *); 1423int input_open_device(struct input_handle *);
1422void input_close_device(struct input_handle *); 1424void input_close_device(struct input_handle *);
1423 1425
1424int input_flush_device(struct input_handle* handle, struct file* file); 1426int input_flush_device(struct input_handle *handle, struct file *file);
1425 1427
1426void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value); 1428void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value);
1427void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value); 1429void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value);
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 3e70b21884a..b2eee896dcb 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -76,7 +76,6 @@ int put_io_context(struct io_context *ioc);
76void exit_io_context(struct task_struct *task); 76void exit_io_context(struct task_struct *task);
77struct io_context *get_io_context(gfp_t gfp_flags, int node); 77struct io_context *get_io_context(gfp_t gfp_flags, int node);
78struct io_context *alloc_io_context(gfp_t gfp_flags, int node); 78struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
79void copy_io_context(struct io_context **pdst, struct io_context **psrc);
80#else 79#else
81static inline void exit_io_context(struct task_struct *task) 80static inline void exit_io_context(struct task_struct *task)
82{ 81{
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 450092c1e35..b6de9a6f701 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -17,13 +17,11 @@
17#include <linux/bitops.h> 17#include <linux/bitops.h>
18#include <linux/log2.h> 18#include <linux/log2.h>
19#include <linux/typecheck.h> 19#include <linux/typecheck.h>
20#include <linux/printk.h>
20#include <linux/dynamic_debug.h> 21#include <linux/dynamic_debug.h>
21#include <asm/byteorder.h> 22#include <asm/byteorder.h>
22#include <asm/bug.h> 23#include <asm/bug.h>
23 24
24extern const char linux_banner[];
25extern const char linux_proc_banner[];
26
27#define USHRT_MAX ((u16)(~0U)) 25#define USHRT_MAX ((u16)(~0U))
28#define SHRT_MAX ((s16)(USHRT_MAX>>1)) 26#define SHRT_MAX ((s16)(USHRT_MAX>>1))
29#define SHRT_MIN ((s16)(-SHRT_MAX - 1)) 27#define SHRT_MIN ((s16)(-SHRT_MAX - 1))
@@ -60,7 +58,7 @@ extern const char linux_proc_banner[];
60#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) 58#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
61#define roundup(x, y) ( \ 59#define roundup(x, y) ( \
62{ \ 60{ \
63 typeof(y) __y = y; \ 61 const typeof(y) __y = y; \
64 (((x) + (__y - 1)) / __y) * __y; \ 62 (((x) + (__y - 1)) / __y) * __y; \
65} \ 63} \
66) 64)
@@ -110,31 +108,6 @@ extern const char linux_proc_banner[];
110 */ 108 */
111#define lower_32_bits(n) ((u32)(n)) 109#define lower_32_bits(n) ((u32)(n))
112 110
113#define KERN_EMERG "<0>" /* system is unusable */
114#define KERN_ALERT "<1>" /* action must be taken immediately */
115#define KERN_CRIT "<2>" /* critical conditions */
116#define KERN_ERR "<3>" /* error conditions */
117#define KERN_WARNING "<4>" /* warning conditions */
118#define KERN_NOTICE "<5>" /* normal but significant condition */
119#define KERN_INFO "<6>" /* informational */
120#define KERN_DEBUG "<7>" /* debug-level messages */
121
122/* Use the default kernel loglevel */
123#define KERN_DEFAULT "<d>"
124/*
125 * Annotation for a "continued" line of log printout (only done after a
126 * line that had no enclosing \n). Only to be used by core/arch code
127 * during early bootup (a continued line is not SMP-safe otherwise).
128 */
129#define KERN_CONT "<c>"
130
131extern int console_printk[];
132
133#define console_loglevel (console_printk[0])
134#define default_message_loglevel (console_printk[1])
135#define minimum_console_loglevel (console_printk[2])
136#define default_console_loglevel (console_printk[3])
137
138struct completion; 111struct completion;
139struct pt_regs; 112struct pt_regs;
140struct user; 113struct user;
@@ -187,11 +160,6 @@ static inline void might_fault(void)
187} 160}
188#endif 161#endif
189 162
190struct va_format {
191 const char *fmt;
192 va_list *va;
193};
194
195extern struct atomic_notifier_head panic_notifier_list; 163extern struct atomic_notifier_head panic_notifier_list;
196extern long (*panic_blink)(int state); 164extern long (*panic_blink)(int state);
197NORET_TYPE void panic(const char * fmt, ...) 165NORET_TYPE void panic(const char * fmt, ...)
@@ -245,114 +213,8 @@ extern int func_ptr_is_kernel_text(void *ptr);
245struct pid; 213struct pid;
246extern struct pid *session_of_pgrp(struct pid *pgrp); 214extern struct pid *session_of_pgrp(struct pid *pgrp);
247 215
248/*
249 * FW_BUG
250 * Add this to a message where you are sure the firmware is buggy or behaves
251 * really stupid or out of spec. Be aware that the responsible BIOS developer
252 * should be able to fix this issue or at least get a concrete idea of the
253 * problem by reading your message without the need of looking at the kernel
254 * code.
255 *
256 * Use it for definite and high priority BIOS bugs.
257 *
258 * FW_WARN
259 * Use it for not that clear (e.g. could the kernel messed up things already?)
260 * and medium priority BIOS bugs.
261 *
262 * FW_INFO
263 * Use this one if you want to tell the user or vendor about something
264 * suspicious, but generally harmless related to the firmware.
265 *
266 * Use it for information or very low priority BIOS bugs.
267 */
268#define FW_BUG "[Firmware Bug]: "
269#define FW_WARN "[Firmware Warn]: "
270#define FW_INFO "[Firmware Info]: "
271
272/*
273 * HW_ERR
274 * Add this to a message for hardware errors, so that user can report
275 * it to hardware vendor instead of LKML or software vendor.
276 */
277#define HW_ERR "[Hardware Error]: "
278
279#ifdef CONFIG_PRINTK
280asmlinkage int vprintk(const char *fmt, va_list args)
281 __attribute__ ((format (printf, 1, 0)));
282asmlinkage int printk(const char * fmt, ...)
283 __attribute__ ((format (printf, 1, 2))) __cold;
284
285/*
286 * Please don't use printk_ratelimit(), because it shares ratelimiting state
287 * with all other unrelated printk_ratelimit() callsites. Instead use
288 * printk_ratelimited() or plain old __ratelimit().
289 */
290extern int __printk_ratelimit(const char *func);
291#define printk_ratelimit() __printk_ratelimit(__func__)
292extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
293 unsigned int interval_msec);
294
295extern int printk_delay_msec;
296
297/*
298 * Print a one-time message (analogous to WARN_ONCE() et al):
299 */
300#define printk_once(x...) ({ \
301 static bool __print_once; \
302 \
303 if (!__print_once) { \
304 __print_once = true; \
305 printk(x); \
306 } \
307})
308
309void log_buf_kexec_setup(void);
310#else
311static inline int vprintk(const char *s, va_list args)
312 __attribute__ ((format (printf, 1, 0)));
313static inline int vprintk(const char *s, va_list args) { return 0; }
314static inline int printk(const char *s, ...)
315 __attribute__ ((format (printf, 1, 2)));
316static inline int __cold printk(const char *s, ...) { return 0; }
317static inline int printk_ratelimit(void) { return 0; }
318static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \
319 unsigned int interval_msec) \
320 { return false; }
321
322/* No effect, but we still get type checking even in the !PRINTK case: */
323#define printk_once(x...) printk(x)
324
325static inline void log_buf_kexec_setup(void)
326{
327}
328#endif
329
330/*
331 * Dummy printk for disabled debugging statements to use whilst maintaining
332 * gcc's format and side-effect checking.
333 */
334static inline __attribute__ ((format (printf, 1, 2)))
335int no_printk(const char *s, ...) { return 0; }
336
337extern int printk_needs_cpu(int cpu);
338extern void printk_tick(void);
339
340extern void asmlinkage __attribute__((format(printf, 1, 2)))
341 early_printk(const char *fmt, ...);
342
343unsigned long int_sqrt(unsigned long); 216unsigned long int_sqrt(unsigned long);
344 217
345static inline void console_silent(void)
346{
347 console_loglevel = 0;
348}
349
350static inline void console_verbose(void)
351{
352 if (console_loglevel)
353 console_loglevel = 15;
354}
355
356extern void bust_spinlocks(int yes); 218extern void bust_spinlocks(int yes);
357extern void wake_up_klogd(void); 219extern void wake_up_klogd(void);
358extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ 220extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
@@ -389,22 +251,6 @@ extern enum system_states {
389#define TAINT_CRAP 10 251#define TAINT_CRAP 10
390#define TAINT_FIRMWARE_WORKAROUND 11 252#define TAINT_FIRMWARE_WORKAROUND 11
391 253
392extern void dump_stack(void) __cold;
393
394enum {
395 DUMP_PREFIX_NONE,
396 DUMP_PREFIX_ADDRESS,
397 DUMP_PREFIX_OFFSET
398};
399extern void hex_dump_to_buffer(const void *buf, size_t len,
400 int rowsize, int groupsize,
401 char *linebuf, size_t linebuflen, bool ascii);
402extern void print_hex_dump(const char *level, const char *prefix_str,
403 int prefix_type, int rowsize, int groupsize,
404 const void *buf, size_t len, bool ascii);
405extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
406 const void *buf, size_t len);
407
408extern const char hex_asc[]; 254extern const char hex_asc[];
409#define hex_asc_lo(x) hex_asc[((x) & 0x0f)] 255#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
410#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4] 256#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
@@ -418,94 +264,6 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
418 264
419extern int hex_to_bin(char ch); 265extern int hex_to_bin(char ch);
420 266
421#ifndef pr_fmt
422#define pr_fmt(fmt) fmt
423#endif
424
425#define pr_emerg(fmt, ...) \
426 printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
427#define pr_alert(fmt, ...) \
428 printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
429#define pr_crit(fmt, ...) \
430 printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
431#define pr_err(fmt, ...) \
432 printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
433#define pr_warning(fmt, ...) \
434 printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
435#define pr_warn pr_warning
436#define pr_notice(fmt, ...) \
437 printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
438#define pr_info(fmt, ...) \
439 printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
440#define pr_cont(fmt, ...) \
441 printk(KERN_CONT fmt, ##__VA_ARGS__)
442
443/* pr_devel() should produce zero code unless DEBUG is defined */
444#ifdef DEBUG
445#define pr_devel(fmt, ...) \
446 printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
447#else
448#define pr_devel(fmt, ...) \
449 ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
450#endif
451
452/* If you are writing a driver, please use dev_dbg instead */
453#if defined(DEBUG)
454#define pr_debug(fmt, ...) \
455 printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
456#elif defined(CONFIG_DYNAMIC_DEBUG)
457/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */
458#define pr_debug(fmt, ...) \
459 dynamic_pr_debug(fmt, ##__VA_ARGS__)
460#else
461#define pr_debug(fmt, ...) \
462 ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
463#endif
464
465/*
466 * ratelimited messages with local ratelimit_state,
467 * no local ratelimit_state used in the !PRINTK case
468 */
469#ifdef CONFIG_PRINTK
470#define printk_ratelimited(fmt, ...) ({ \
471 static DEFINE_RATELIMIT_STATE(_rs, \
472 DEFAULT_RATELIMIT_INTERVAL, \
473 DEFAULT_RATELIMIT_BURST); \
474 \
475 if (__ratelimit(&_rs)) \
476 printk(fmt, ##__VA_ARGS__); \
477})
478#else
479/* No effect, but we still get type checking even in the !PRINTK case: */
480#define printk_ratelimited printk
481#endif
482
483#define pr_emerg_ratelimited(fmt, ...) \
484 printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
485#define pr_alert_ratelimited(fmt, ...) \
486 printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
487#define pr_crit_ratelimited(fmt, ...) \
488 printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
489#define pr_err_ratelimited(fmt, ...) \
490 printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
491#define pr_warning_ratelimited(fmt, ...) \
492 printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
493#define pr_warn_ratelimited pr_warning_ratelimited
494#define pr_notice_ratelimited(fmt, ...) \
495 printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
496#define pr_info_ratelimited(fmt, ...) \
497 printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
498/* no pr_cont_ratelimited, don't do that... */
499/* If you are writing a driver, please use dev_dbg instead */
500#if defined(DEBUG)
501#define pr_debug_ratelimited(fmt, ...) \
502 printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
503#else
504#define pr_debug_ratelimited(fmt, ...) \
505 ({ if (0) printk_ratelimited(KERN_DEBUG pr_fmt(fmt), \
506 ##__VA_ARGS__); 0; })
507#endif
508
509/* 267/*
510 * General tracing related utility functions - trace_printk(), 268 * General tracing related utility functions - trace_printk(),
511 * tracing_on/tracing_off and tracing_start()/tracing_stop 269 * tracing_on/tracing_off and tracing_start()/tracing_stop
diff --git a/include/linux/leds-lp5521.h b/include/linux/leds-lp5521.h
new file mode 100644
index 00000000000..38368d785f0
--- /dev/null
+++ b/include/linux/leds-lp5521.h
@@ -0,0 +1,47 @@
1/*
2 * LP5521 LED chip driver.
3 *
4 * Copyright (C) 2010 Nokia Corporation
5 *
6 * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#ifndef __LINUX_LP5521_H
24#define __LINUX_LP5521_H
25
26/* See Documentation/leds/leds-lp5521.txt */
27
28struct lp5521_led_config {
29 u8 chan_nr;
30 u8 led_current; /* mA x10, 0 if led is not connected */
31 u8 max_current;
32};
33
34#define LP5521_CLOCK_AUTO 0
35#define LP5521_CLOCK_INT 1
36#define LP5521_CLOCK_EXT 2
37
38struct lp5521_platform_data {
39 struct lp5521_led_config *led_config;
40 u8 num_channels;
41 u8 clock_mode;
42 int (*setup_resources)(void);
43 void (*release_resources)(void);
44 void (*enable)(bool state);
45};
46
47#endif /* __LINUX_LP5521_H */
diff --git a/include/linux/leds-lp5523.h b/include/linux/leds-lp5523.h
new file mode 100644
index 00000000000..796747637b8
--- /dev/null
+++ b/include/linux/leds-lp5523.h
@@ -0,0 +1,47 @@
1/*
2 * LP5523 LED Driver
3 *
4 * Copyright (C) 2010 Nokia Corporation
5 *
6 * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#ifndef __LINUX_LP5523_H
24#define __LINUX_LP5523_H
25
26/* See Documentation/leds/leds-lp5523.txt */
27
28struct lp5523_led_config {
29 u8 chan_nr;
30 u8 led_current; /* mA x10, 0 if led is not connected */
31 u8 max_current;
32};
33
34#define LP5523_CLOCK_AUTO 0
35#define LP5523_CLOCK_INT 1
36#define LP5523_CLOCK_EXT 2
37
38struct lp5523_platform_data {
39 struct lp5523_led_config *led_config;
40 u8 num_channels;
41 u8 clock_mode;
42 int (*setup_resources)(void);
43 void (*release_resources)(void);
44 void (*enable)(bool state);
45};
46
47#endif /* __LINUX_LP5523_H */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index ba6986a1166..0f19df9e37b 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -15,6 +15,7 @@
15#include <linux/list.h> 15#include <linux/list.h>
16#include <linux/spinlock.h> 16#include <linux/spinlock.h>
17#include <linux/rwsem.h> 17#include <linux/rwsem.h>
18#include <linux/timer.h>
18 19
19struct device; 20struct device;
20/* 21/*
@@ -45,10 +46,14 @@ struct led_classdev {
45 /* Get LED brightness level */ 46 /* Get LED brightness level */
46 enum led_brightness (*brightness_get)(struct led_classdev *led_cdev); 47 enum led_brightness (*brightness_get)(struct led_classdev *led_cdev);
47 48
48 /* Activate hardware accelerated blink, delays are in 49 /*
49 * miliseconds and if none is provided then a sensible default 50 * Activate hardware accelerated blink, delays are in milliseconds
50 * should be chosen. The call can adjust the timings if it can't 51 * and if both are zero then a sensible default should be chosen.
51 * match the values specified exactly. */ 52 * The call should adjust the timings in that case and if it can't
53 * match the values specified exactly.
54 * Deactivate blinking again when the brightness is set to a fixed
55 * value via the brightness_set() callback.
56 */
52 int (*blink_set)(struct led_classdev *led_cdev, 57 int (*blink_set)(struct led_classdev *led_cdev,
53 unsigned long *delay_on, 58 unsigned long *delay_on,
54 unsigned long *delay_off); 59 unsigned long *delay_off);
@@ -57,6 +62,10 @@ struct led_classdev {
57 struct list_head node; /* LED Device list */ 62 struct list_head node; /* LED Device list */
58 const char *default_trigger; /* Trigger to use */ 63 const char *default_trigger; /* Trigger to use */
59 64
65 unsigned long blink_delay_on, blink_delay_off;
66 struct timer_list blink_timer;
67 int blink_brightness;
68
60#ifdef CONFIG_LEDS_TRIGGERS 69#ifdef CONFIG_LEDS_TRIGGERS
61 /* Protects the trigger data below */ 70 /* Protects the trigger data below */
62 struct rw_semaphore trigger_lock; 71 struct rw_semaphore trigger_lock;
@@ -73,6 +82,36 @@ extern void led_classdev_unregister(struct led_classdev *led_cdev);
73extern void led_classdev_suspend(struct led_classdev *led_cdev); 82extern void led_classdev_suspend(struct led_classdev *led_cdev);
74extern void led_classdev_resume(struct led_classdev *led_cdev); 83extern void led_classdev_resume(struct led_classdev *led_cdev);
75 84
85/**
86 * led_blink_set - set blinking with software fallback
87 * @led_cdev: the LED to start blinking
88 * @delay_on: the time it should be on (in ms)
89 * @delay_off: the time it should ble off (in ms)
90 *
91 * This function makes the LED blink, attempting to use the
92 * hardware acceleration if possible, but falling back to
93 * software blinking if there is no hardware blinking or if
94 * the LED refuses the passed values.
95 *
96 * Note that if software blinking is active, simply calling
97 * led_cdev->brightness_set() will not stop the blinking,
98 * use led_classdev_brightness_set() instead.
99 */
100extern void led_blink_set(struct led_classdev *led_cdev,
101 unsigned long *delay_on,
102 unsigned long *delay_off);
103/**
104 * led_brightness_set - set LED brightness
105 * @led_cdev: the LED to set
106 * @brightness: the brightness to set it to
107 *
108 * Set an LED's brightness, and, if necessary, cancel the
109 * software blink timer that implements blinking when the
110 * hardware doesn't.
111 */
112extern void led_brightness_set(struct led_classdev *led_cdev,
113 enum led_brightness brightness);
114
76/* 115/*
77 * LED Triggers 116 * LED Triggers
78 */ 117 */
diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h
index d19e2114fd8..5c99da1078a 100644
--- a/include/linux/mmc/sh_mmcif.h
+++ b/include/linux/mmc/sh_mmcif.h
@@ -59,19 +59,19 @@ struct sh_mmcif_plat_data {
59#define MMCIF_CE_HOST_STS2 0x0000004C 59#define MMCIF_CE_HOST_STS2 0x0000004C
60#define MMCIF_CE_VERSION 0x0000007C 60#define MMCIF_CE_VERSION 0x0000007C
61 61
62extern inline u32 sh_mmcif_readl(void __iomem *addr, int reg) 62static inline u32 sh_mmcif_readl(void __iomem *addr, int reg)
63{ 63{
64 return readl(addr + reg); 64 return readl(addr + reg);
65} 65}
66 66
67extern inline void sh_mmcif_writel(void __iomem *addr, int reg, u32 val) 67static inline void sh_mmcif_writel(void __iomem *addr, int reg, u32 val)
68{ 68{
69 writel(val, addr + reg); 69 writel(val, addr + reg);
70} 70}
71 71
72#define SH_MMCIF_BBS 512 /* boot block size */ 72#define SH_MMCIF_BBS 512 /* boot block size */
73 73
74extern inline void sh_mmcif_boot_cmd_send(void __iomem *base, 74static inline void sh_mmcif_boot_cmd_send(void __iomem *base,
75 unsigned long cmd, unsigned long arg) 75 unsigned long cmd, unsigned long arg)
76{ 76{
77 sh_mmcif_writel(base, MMCIF_CE_INT, 0); 77 sh_mmcif_writel(base, MMCIF_CE_INT, 0);
@@ -79,7 +79,7 @@ extern inline void sh_mmcif_boot_cmd_send(void __iomem *base,
79 sh_mmcif_writel(base, MMCIF_CE_CMD_SET, cmd); 79 sh_mmcif_writel(base, MMCIF_CE_CMD_SET, cmd);
80} 80}
81 81
82extern inline int sh_mmcif_boot_cmd_poll(void __iomem *base, unsigned long mask) 82static inline int sh_mmcif_boot_cmd_poll(void __iomem *base, unsigned long mask)
83{ 83{
84 unsigned long tmp; 84 unsigned long tmp;
85 int cnt; 85 int cnt;
@@ -95,14 +95,14 @@ extern inline int sh_mmcif_boot_cmd_poll(void __iomem *base, unsigned long mask)
95 return -1; 95 return -1;
96} 96}
97 97
98extern inline int sh_mmcif_boot_cmd(void __iomem *base, 98static inline int sh_mmcif_boot_cmd(void __iomem *base,
99 unsigned long cmd, unsigned long arg) 99 unsigned long cmd, unsigned long arg)
100{ 100{
101 sh_mmcif_boot_cmd_send(base, cmd, arg); 101 sh_mmcif_boot_cmd_send(base, cmd, arg);
102 return sh_mmcif_boot_cmd_poll(base, 0x00010000); 102 return sh_mmcif_boot_cmd_poll(base, 0x00010000);
103} 103}
104 104
105extern inline int sh_mmcif_boot_do_read_single(void __iomem *base, 105static inline int sh_mmcif_boot_do_read_single(void __iomem *base,
106 unsigned int block_nr, 106 unsigned int block_nr,
107 unsigned long *buf) 107 unsigned long *buf)
108{ 108{
@@ -125,7 +125,7 @@ extern inline int sh_mmcif_boot_do_read_single(void __iomem *base,
125 return 0; 125 return 0;
126} 126}
127 127
128extern inline int sh_mmcif_boot_do_read(void __iomem *base, 128static inline int sh_mmcif_boot_do_read(void __iomem *base,
129 unsigned long first_block, 129 unsigned long first_block,
130 unsigned long nr_blocks, 130 unsigned long nr_blocks,
131 void *buf) 131 void *buf)
@@ -143,7 +143,7 @@ extern inline int sh_mmcif_boot_do_read(void __iomem *base,
143 return ret; 143 return ret;
144} 144}
145 145
146extern inline void sh_mmcif_boot_init(void __iomem *base) 146static inline void sh_mmcif_boot_init(void __iomem *base)
147{ 147{
148 unsigned long tmp; 148 unsigned long tmp;
149 149
@@ -177,7 +177,7 @@ extern inline void sh_mmcif_boot_init(void __iomem *base)
177 sh_mmcif_boot_cmd(base, 0x03400040, 0x00010000); 177 sh_mmcif_boot_cmd(base, 0x03400040, 0x00010000);
178} 178}
179 179
180extern inline void sh_mmcif_boot_slurp(void __iomem *base, 180static inline void sh_mmcif_boot_slurp(void __iomem *base,
181 unsigned char *buf, 181 unsigned char *buf,
182 unsigned long no_bytes) 182 unsigned long no_bytes)
183{ 183{
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 072652d94d9..d8fd2c23a1b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1554,6 +1554,11 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
1554 1554
1555static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) 1555static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1556{ 1556{
1557 if (WARN_ON(!dev_queue)) {
1558 printk(KERN_INFO "netif_stop_queue() cannot be called before "
1559 "register_netdev()");
1560 return;
1561 }
1557 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); 1562 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1558} 1563}
1559 1564
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 89341c32631..03317c8d407 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -215,7 +215,7 @@ NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sk_buff *skb,
215 int ret; 215 int ret;
216 216
217 if (!cond || 217 if (!cond ||
218 (ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN) == 1)) 218 ((ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN)) == 1))
219 ret = okfn(skb); 219 ret = okfn(skb);
220 return ret; 220 return ret;
221} 221}
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 057bf22a832..40150f34598 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -747,6 +747,16 @@ struct perf_event {
747 u64 tstamp_running; 747 u64 tstamp_running;
748 u64 tstamp_stopped; 748 u64 tstamp_stopped;
749 749
750 /*
751 * timestamp shadows the actual context timing but it can
752 * be safely used in NMI interrupt context. It reflects the
753 * context time as it was when the event was last scheduled in.
754 *
755 * ctx_time already accounts for ctx->timestamp. Therefore to
756 * compute ctx_time for a sample, simply add perf_clock().
757 */
758 u64 shadow_ctx_time;
759
750 struct perf_event_attr attr; 760 struct perf_event_attr attr;
751 struct hw_perf_event hw; 761 struct hw_perf_event hw;
752 762
diff --git a/include/linux/printk.h b/include/linux/printk.h
new file mode 100644
index 00000000000..b772ca5fbdf
--- /dev/null
+++ b/include/linux/printk.h
@@ -0,0 +1,248 @@
1#ifndef __KERNEL_PRINTK__
2#define __KERNEL_PRINTK__
3
4extern const char linux_banner[];
5extern const char linux_proc_banner[];
6
7#define KERN_EMERG "<0>" /* system is unusable */
8#define KERN_ALERT "<1>" /* action must be taken immediately */
9#define KERN_CRIT "<2>" /* critical conditions */
10#define KERN_ERR "<3>" /* error conditions */
11#define KERN_WARNING "<4>" /* warning conditions */
12#define KERN_NOTICE "<5>" /* normal but significant condition */
13#define KERN_INFO "<6>" /* informational */
14#define KERN_DEBUG "<7>" /* debug-level messages */
15
16/* Use the default kernel loglevel */
17#define KERN_DEFAULT "<d>"
18/*
19 * Annotation for a "continued" line of log printout (only done after a
20 * line that had no enclosing \n). Only to be used by core/arch code
21 * during early bootup (a continued line is not SMP-safe otherwise).
22 */
23#define KERN_CONT "<c>"
24
25extern int console_printk[];
26
27#define console_loglevel (console_printk[0])
28#define default_message_loglevel (console_printk[1])
29#define minimum_console_loglevel (console_printk[2])
30#define default_console_loglevel (console_printk[3])
31
32struct va_format {
33 const char *fmt;
34 va_list *va;
35};
36
37/*
38 * FW_BUG
39 * Add this to a message where you are sure the firmware is buggy or behaves
40 * really stupid or out of spec. Be aware that the responsible BIOS developer
41 * should be able to fix this issue or at least get a concrete idea of the
42 * problem by reading your message without the need of looking at the kernel
43 * code.
44 *
45 * Use it for definite and high priority BIOS bugs.
46 *
47 * FW_WARN
48 * Use it for not that clear (e.g. could the kernel messed up things already?)
49 * and medium priority BIOS bugs.
50 *
51 * FW_INFO
52 * Use this one if you want to tell the user or vendor about something
53 * suspicious, but generally harmless related to the firmware.
54 *
55 * Use it for information or very low priority BIOS bugs.
56 */
57#define FW_BUG "[Firmware Bug]: "
58#define FW_WARN "[Firmware Warn]: "
59#define FW_INFO "[Firmware Info]: "
60
61/*
62 * HW_ERR
63 * Add this to a message for hardware errors, so that user can report
64 * it to hardware vendor instead of LKML or software vendor.
65 */
66#define HW_ERR "[Hardware Error]: "
67
68#ifdef CONFIG_PRINTK
69asmlinkage int vprintk(const char *fmt, va_list args)
70 __attribute__ ((format (printf, 1, 0)));
71asmlinkage int printk(const char * fmt, ...)
72 __attribute__ ((format (printf, 1, 2))) __cold;
73
74/*
75 * Please don't use printk_ratelimit(), because it shares ratelimiting state
76 * with all other unrelated printk_ratelimit() callsites. Instead use
77 * printk_ratelimited() or plain old __ratelimit().
78 */
79extern int __printk_ratelimit(const char *func);
80#define printk_ratelimit() __printk_ratelimit(__func__)
81extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
82 unsigned int interval_msec);
83
84extern int printk_delay_msec;
85extern int dmesg_restrict;
86
87/*
88 * Print a one-time message (analogous to WARN_ONCE() et al):
89 */
90#define printk_once(x...) ({ \
91 static bool __print_once; \
92 \
93 if (!__print_once) { \
94 __print_once = true; \
95 printk(x); \
96 } \
97})
98
99void log_buf_kexec_setup(void);
100#else
101static inline int vprintk(const char *s, va_list args)
102 __attribute__ ((format (printf, 1, 0)));
103static inline int vprintk(const char *s, va_list args) { return 0; }
104static inline int printk(const char *s, ...)
105 __attribute__ ((format (printf, 1, 2)));
106static inline int __cold printk(const char *s, ...) { return 0; }
107static inline int printk_ratelimit(void) { return 0; }
108static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \
109 unsigned int interval_msec) \
110 { return false; }
111
112/* No effect, but we still get type checking even in the !PRINTK case: */
113#define printk_once(x...) printk(x)
114
115static inline void log_buf_kexec_setup(void)
116{
117}
118#endif
119
120/*
121 * Dummy printk for disabled debugging statements to use whilst maintaining
122 * gcc's format and side-effect checking.
123 */
124static inline __attribute__ ((format (printf, 1, 2)))
125int no_printk(const char *s, ...) { return 0; }
126
127extern int printk_needs_cpu(int cpu);
128extern void printk_tick(void);
129
130extern void asmlinkage __attribute__((format(printf, 1, 2)))
131 early_printk(const char *fmt, ...);
132
133static inline void console_silent(void)
134{
135 console_loglevel = 0;
136}
137
138static inline void console_verbose(void)
139{
140 if (console_loglevel)
141 console_loglevel = 15;
142}
143
144extern void dump_stack(void) __cold;
145
146enum {
147 DUMP_PREFIX_NONE,
148 DUMP_PREFIX_ADDRESS,
149 DUMP_PREFIX_OFFSET
150};
151extern void hex_dump_to_buffer(const void *buf, size_t len,
152 int rowsize, int groupsize,
153 char *linebuf, size_t linebuflen, bool ascii);
154extern void print_hex_dump(const char *level, const char *prefix_str,
155 int prefix_type, int rowsize, int groupsize,
156 const void *buf, size_t len, bool ascii);
157extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
158 const void *buf, size_t len);
159
160#ifndef pr_fmt
161#define pr_fmt(fmt) fmt
162#endif
163
164#define pr_emerg(fmt, ...) \
165 printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
166#define pr_alert(fmt, ...) \
167 printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
168#define pr_crit(fmt, ...) \
169 printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
170#define pr_err(fmt, ...) \
171 printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
172#define pr_warning(fmt, ...) \
173 printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
174#define pr_warn pr_warning
175#define pr_notice(fmt, ...) \
176 printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
177#define pr_info(fmt, ...) \
178 printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
179#define pr_cont(fmt, ...) \
180 printk(KERN_CONT fmt, ##__VA_ARGS__)
181
182/* pr_devel() should produce zero code unless DEBUG is defined */
183#ifdef DEBUG
184#define pr_devel(fmt, ...) \
185 printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
186#else
187#define pr_devel(fmt, ...) \
188 ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
189#endif
190
191/* If you are writing a driver, please use dev_dbg instead */
192#if defined(DEBUG)
193#define pr_debug(fmt, ...) \
194 printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
195#elif defined(CONFIG_DYNAMIC_DEBUG)
196/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */
197#define pr_debug(fmt, ...) \
198 dynamic_pr_debug(fmt, ##__VA_ARGS__)
199#else
200#define pr_debug(fmt, ...) \
201 ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
202#endif
203
204/*
205 * ratelimited messages with local ratelimit_state,
206 * no local ratelimit_state used in the !PRINTK case
207 */
208#ifdef CONFIG_PRINTK
209#define printk_ratelimited(fmt, ...) ({ \
210 static DEFINE_RATELIMIT_STATE(_rs, \
211 DEFAULT_RATELIMIT_INTERVAL, \
212 DEFAULT_RATELIMIT_BURST); \
213 \
214 if (__ratelimit(&_rs)) \
215 printk(fmt, ##__VA_ARGS__); \
216})
217#else
218/* No effect, but we still get type checking even in the !PRINTK case: */
219#define printk_ratelimited printk
220#endif
221
222#define pr_emerg_ratelimited(fmt, ...) \
223 printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
224#define pr_alert_ratelimited(fmt, ...) \
225 printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
226#define pr_crit_ratelimited(fmt, ...) \
227 printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
228#define pr_err_ratelimited(fmt, ...) \
229 printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
230#define pr_warning_ratelimited(fmt, ...) \
231 printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
232#define pr_warn_ratelimited pr_warning_ratelimited
233#define pr_notice_ratelimited(fmt, ...) \
234 printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
235#define pr_info_ratelimited(fmt, ...) \
236 printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
237/* no pr_cont_ratelimited, don't do that... */
238/* If you are writing a driver, please use dev_dbg instead */
239#if defined(DEBUG)
240#define pr_debug_ratelimited(fmt, ...) \
241 printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
242#else
243#define pr_debug_ratelimited(fmt, ...) \
244 ({ if (0) printk_ratelimited(KERN_DEBUG pr_fmt(fmt), \
245 ##__VA_ARGS__); 0; })
246#endif
247
248#endif
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
index 01b3d759f1f..e031e1a486d 100644
--- a/include/linux/pwm_backlight.h
+++ b/include/linux/pwm_backlight.h
@@ -8,6 +8,7 @@ struct platform_pwm_backlight_data {
8 int pwm_id; 8 int pwm_id;
9 unsigned int max_brightness; 9 unsigned int max_brightness;
10 unsigned int dft_brightness; 10 unsigned int dft_brightness;
11 unsigned int lth_brightness;
11 unsigned int pwm_period_ns; 12 unsigned int pwm_period_ns;
12 int (*init)(struct device *dev); 13 int (*init)(struct device *dev);
13 int (*notify)(struct device *dev, int brightness); 14 int (*notify)(struct device *dev, int brightness);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index a39cbed9ee1..ab2baa5c488 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -34,19 +34,13 @@
34 * needed for RCU lookups (because root->height is unreliable). The only 34 * needed for RCU lookups (because root->height is unreliable). The only
35 * time callers need worry about this is when doing a lookup_slot under 35 * time callers need worry about this is when doing a lookup_slot under
36 * RCU. 36 * RCU.
37 *
38 * Indirect pointer in fact is also used to tag the last pointer of a node
39 * when it is shrunk, before we rcu free the node. See shrink code for
40 * details.
37 */ 41 */
38#define RADIX_TREE_INDIRECT_PTR 1 42#define RADIX_TREE_INDIRECT_PTR 1
39#define RADIX_TREE_RETRY ((void *)-1UL)
40
41static inline void *radix_tree_ptr_to_indirect(void *ptr)
42{
43 return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR);
44}
45 43
46static inline void *radix_tree_indirect_to_ptr(void *ptr)
47{
48 return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
49}
50#define radix_tree_indirect_to_ptr(ptr) \ 44#define radix_tree_indirect_to_ptr(ptr) \
51 radix_tree_indirect_to_ptr((void __force *)(ptr)) 45 radix_tree_indirect_to_ptr((void __force *)(ptr))
52 46
@@ -140,16 +134,29 @@ do { \
140 * removed. 134 * removed.
141 * 135 *
142 * For use with radix_tree_lookup_slot(). Caller must hold tree at least read 136 * For use with radix_tree_lookup_slot(). Caller must hold tree at least read
143 * locked across slot lookup and dereference. More likely, will be used with 137 * locked across slot lookup and dereference. Not required if write lock is
144 * radix_tree_replace_slot(), as well, so caller will hold tree write locked. 138 * held (ie. items cannot be concurrently inserted).
139 *
140 * radix_tree_deref_retry must be used to confirm validity of the pointer if
141 * only the read lock is held.
145 */ 142 */
146static inline void *radix_tree_deref_slot(void **pslot) 143static inline void *radix_tree_deref_slot(void **pslot)
147{ 144{
148 void *ret = rcu_dereference(*pslot); 145 return rcu_dereference(*pslot);
149 if (unlikely(radix_tree_is_indirect_ptr(ret)))
150 ret = RADIX_TREE_RETRY;
151 return ret;
152} 146}
147
148/**
149 * radix_tree_deref_retry - check radix_tree_deref_slot
150 * @arg: pointer returned by radix_tree_deref_slot
151 * Returns: 0 if retry is not required, otherwise retry is required
152 *
153 * radix_tree_deref_retry must be used with radix_tree_deref_slot.
154 */
155static inline int radix_tree_deref_retry(void *arg)
156{
157 return unlikely((unsigned long)arg & RADIX_TREE_INDIRECT_PTR);
158}
159
153/** 160/**
154 * radix_tree_replace_slot - replace item in a slot 161 * radix_tree_replace_slot - replace item in a slot
155 * @pslot: pointer to slot, returned by radix_tree_lookup_slot 162 * @pslot: pointer to slot, returned by radix_tree_lookup_slot
diff --git a/include/linux/resource.h b/include/linux/resource.h
index 88d36f9145b..d01c96c1966 100644
--- a/include/linux/resource.h
+++ b/include/linux/resource.h
@@ -2,6 +2,7 @@
2#define _LINUX_RESOURCE_H 2#define _LINUX_RESOURCE_H
3 3
4#include <linux/time.h> 4#include <linux/time.h>
5#include <linux/types.h>
5 6
6/* 7/*
7 * Resource control/accounting header file for linux 8 * Resource control/accounting header file for linux
diff --git a/include/linux/security.h b/include/linux/security.h
index b8246a8df7d..fd4d55fb884 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -77,7 +77,6 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
77extern int cap_task_setscheduler(struct task_struct *p); 77extern int cap_task_setscheduler(struct task_struct *p);
78extern int cap_task_setioprio(struct task_struct *p, int ioprio); 78extern int cap_task_setioprio(struct task_struct *p, int ioprio);
79extern int cap_task_setnice(struct task_struct *p, int nice); 79extern int cap_task_setnice(struct task_struct *p, int nice);
80extern int cap_syslog(int type, bool from_file);
81extern int cap_vm_enough_memory(struct mm_struct *mm, long pages); 80extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
82 81
83struct msghdr; 82struct msghdr;
@@ -1388,7 +1387,7 @@ struct security_operations {
1388 int (*sysctl) (struct ctl_table *table, int op); 1387 int (*sysctl) (struct ctl_table *table, int op);
1389 int (*quotactl) (int cmds, int type, int id, struct super_block *sb); 1388 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
1390 int (*quota_on) (struct dentry *dentry); 1389 int (*quota_on) (struct dentry *dentry);
1391 int (*syslog) (int type, bool from_file); 1390 int (*syslog) (int type);
1392 int (*settime) (struct timespec *ts, struct timezone *tz); 1391 int (*settime) (struct timespec *ts, struct timezone *tz);
1393 int (*vm_enough_memory) (struct mm_struct *mm, long pages); 1392 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
1394 1393
@@ -1671,7 +1670,7 @@ int security_real_capable_noaudit(struct task_struct *tsk, int cap);
1671int security_sysctl(struct ctl_table *table, int op); 1670int security_sysctl(struct ctl_table *table, int op);
1672int security_quotactl(int cmds, int type, int id, struct super_block *sb); 1671int security_quotactl(int cmds, int type, int id, struct super_block *sb);
1673int security_quota_on(struct dentry *dentry); 1672int security_quota_on(struct dentry *dentry);
1674int security_syslog(int type, bool from_file); 1673int security_syslog(int type);
1675int security_settime(struct timespec *ts, struct timezone *tz); 1674int security_settime(struct timespec *ts, struct timezone *tz);
1676int security_vm_enough_memory(long pages); 1675int security_vm_enough_memory(long pages);
1677int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); 1676int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
@@ -1901,9 +1900,9 @@ static inline int security_quota_on(struct dentry *dentry)
1901 return 0; 1900 return 0;
1902} 1901}
1903 1902
1904static inline int security_syslog(int type, bool from_file) 1903static inline int security_syslog(int type)
1905{ 1904{
1906 return cap_syslog(type, from_file); 1905 return 0;
1907} 1906}
1908 1907
1909static inline int security_settime(struct timespec *ts, struct timezone *tz) 1908static inline int security_settime(struct timespec *ts, struct timezone *tz)
diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h
index 4dca992f309..cea0c38e7a6 100644
--- a/include/linux/sh_clk.h
+++ b/include/linux/sh_clk.h
@@ -122,6 +122,10 @@ int clk_rate_table_find(struct clk *clk,
122long clk_rate_div_range_round(struct clk *clk, unsigned int div_min, 122long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
123 unsigned int div_max, unsigned long rate); 123 unsigned int div_max, unsigned long rate);
124 124
125long clk_round_parent(struct clk *clk, unsigned long target,
126 unsigned long *best_freq, unsigned long *parent_freq,
127 unsigned int div_min, unsigned int div_max);
128
125#define SH_CLK_MSTP32(_parent, _enable_reg, _enable_bit, _flags) \ 129#define SH_CLK_MSTP32(_parent, _enable_reg, _enable_bit, _flags) \
126{ \ 130{ \
127 .parent = _parent, \ 131 .parent = _parent, \
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
index f656d1a43dc..5812fefbced 100644
--- a/include/linux/sh_intc.h
+++ b/include/linux/sh_intc.h
@@ -79,7 +79,7 @@ struct intc_hw_desc {
79 unsigned int nr_subgroups; 79 unsigned int nr_subgroups;
80}; 80};
81 81
82#define _INTC_ARRAY(a) a, a == NULL ? 0 : sizeof(a)/sizeof(*a) 82#define _INTC_ARRAY(a) a, __same_type(a, NULL) ? 0 : sizeof(a)/sizeof(*a)
83 83
84#define INTC_HW_DESC(vectors, groups, mask_regs, \ 84#define INTC_HW_DESC(vectors, groups, mask_regs, \
85 prio_regs, sense_regs, ack_regs) \ 85 prio_regs, sense_regs, ack_regs) \
diff --git a/include/linux/sh_timer.h b/include/linux/sh_timer.h
index 864bd56bd3b..4d9dcd13831 100644
--- a/include/linux/sh_timer.h
+++ b/include/linux/sh_timer.h
@@ -5,7 +5,6 @@ struct sh_timer_config {
5 char *name; 5 char *name;
6 long channel_offset; 6 long channel_offset;
7 int timer_bit; 7 int timer_bit;
8 char *clk;
9 unsigned long clockevent_rating; 8 unsigned long clockevent_rating;
10 unsigned long clocksource_rating; 9 unsigned long clocksource_rating;
11}; 10};
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index bbdb680ffbe..aea0d438e3c 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -82,18 +82,28 @@ struct svc_xprt {
82 struct net *xpt_net; 82 struct net *xpt_net;
83}; 83};
84 84
85static inline void register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) 85static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
86{ 86{
87 spin_lock(&xpt->xpt_lock); 87 spin_lock(&xpt->xpt_lock);
88 list_add(&u->list, &xpt->xpt_users); 88 list_del_init(&u->list);
89 spin_unlock(&xpt->xpt_lock); 89 spin_unlock(&xpt->xpt_lock);
90} 90}
91 91
92static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) 92static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
93{ 93{
94 spin_lock(&xpt->xpt_lock); 94 spin_lock(&xpt->xpt_lock);
95 list_del_init(&u->list); 95 if (test_bit(XPT_CLOSE, &xpt->xpt_flags)) {
96 /*
97 * The connection is about to be deleted soon (or,
98 * worse, may already be deleted--in which case we've
99 * already notified the xpt_users).
100 */
101 spin_unlock(&xpt->xpt_lock);
102 return -ENOTCONN;
103 }
104 list_add(&u->list, &xpt->xpt_users);
96 spin_unlock(&xpt->xpt_lock); 105 spin_unlock(&xpt->xpt_lock);
106 return 0;
97} 107}
98 108
99int svc_reg_xprt_class(struct svc_xprt_class *); 109int svc_reg_xprt_class(struct svc_xprt_class *);
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 2a754748dd5..c7ea9bc8897 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -50,7 +50,7 @@
50#define N_V253 19 /* Codec control over voice modem */ 50#define N_V253 19 /* Codec control over voice modem */
51#define N_CAIF 20 /* CAIF protocol for talking to modems */ 51#define N_CAIF 20 /* CAIF protocol for talking to modems */
52#define N_GSM0710 21 /* GSM 0710 Mux */ 52#define N_GSM0710 21 /* GSM 0710 Mux */
53#define N_TI_WL 22 /* for TI's WL BT, FM, GPS combo chips */ 53#define N_TI_WL 22 /* for TI's WL BT, FM, GPS combo chips */
54 54
55/* 55/*
56 * This character is the same as _POSIX_VDISABLE: it cannot be used as 56 * This character is the same as _POSIX_VDISABLE: it cannot be used as
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 35fe6ab222b..24300d8a1bc 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -797,7 +797,7 @@ struct usbdrv_wrap {
797 * @disconnect: Called when the interface is no longer accessible, usually 797 * @disconnect: Called when the interface is no longer accessible, usually
798 * because its device has been (or is being) disconnected or the 798 * because its device has been (or is being) disconnected or the
799 * driver module is being unloaded. 799 * driver module is being unloaded.
800 * @ioctl: Used for drivers that want to talk to userspace through 800 * @unlocked_ioctl: Used for drivers that want to talk to userspace through
801 * the "usbfs" filesystem. This lets devices provide ways to 801 * the "usbfs" filesystem. This lets devices provide ways to
802 * expose information to user space regardless of where they 802 * expose information to user space regardless of where they
803 * do (or don't) show up otherwise in the filesystem. 803 * do (or don't) show up otherwise in the filesystem.
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index ee2dd1d506e..2387f9fc813 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -89,6 +89,8 @@ struct musb_hdrc_config {
89 /* A GPIO controlling VRSEL in Blackfin */ 89 /* A GPIO controlling VRSEL in Blackfin */
90 unsigned int gpio_vrsel; 90 unsigned int gpio_vrsel;
91 unsigned int gpio_vrsel_active; 91 unsigned int gpio_vrsel_active;
92 /* musb CLKIN in Blackfin in MHZ */
93 unsigned char clkin;
92#endif 94#endif
93 95
94}; 96};
diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
index 6da573c75d5..8eff83b9536 100644
--- a/include/net/caif/caif_dev.h
+++ b/include/net/caif/caif_dev.h
@@ -28,7 +28,7 @@ struct caif_param {
28 * @sockaddr: Socket address to connect. 28 * @sockaddr: Socket address to connect.
29 * @priority: Priority of the connection. 29 * @priority: Priority of the connection.
30 * @link_selector: Link selector (high bandwidth or low latency) 30 * @link_selector: Link selector (high bandwidth or low latency)
31 * @link_name: Name of the CAIF Link Layer to use. 31 * @ifindex: kernel index of the interface.
32 * @param: Connect Request parameters (CAIF_SO_REQ_PARAM). 32 * @param: Connect Request parameters (CAIF_SO_REQ_PARAM).
33 * 33 *
34 * This struct is used when connecting a CAIF channel. 34 * This struct is used when connecting a CAIF channel.
@@ -39,7 +39,7 @@ struct caif_connect_request {
39 struct sockaddr_caif sockaddr; 39 struct sockaddr_caif sockaddr;
40 enum caif_channel_priority priority; 40 enum caif_channel_priority priority;
41 enum caif_link_selector link_selector; 41 enum caif_link_selector link_selector;
42 char link_name[16]; 42 int ifindex;
43 struct caif_param param; 43 struct caif_param param;
44}; 44};
45 45
diff --git a/include/net/caif/caif_spi.h b/include/net/caif/caif_spi.h
index ce4570dff02..87c3d11b8e5 100644
--- a/include/net/caif/caif_spi.h
+++ b/include/net/caif/caif_spi.h
@@ -121,6 +121,8 @@ struct cfspi {
121 wait_queue_head_t wait; 121 wait_queue_head_t wait;
122 spinlock_t lock; 122 spinlock_t lock;
123 bool flow_stop; 123 bool flow_stop;
124 bool slave;
125 bool slave_talked;
124#ifdef CONFIG_DEBUG_FS 126#ifdef CONFIG_DEBUG_FS
125 enum cfspi_state dbg_state; 127 enum cfspi_state dbg_state;
126 u16 pcmd; 128 u16 pcmd;
diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
index bd646faffa4..f688478bfb8 100644
--- a/include/net/caif/cfcnfg.h
+++ b/include/net/caif/cfcnfg.h
@@ -139,10 +139,10 @@ struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg,
139 enum cfcnfg_phy_preference phy_pref); 139 enum cfcnfg_phy_preference phy_pref);
140 140
141/** 141/**
142 * cfcnfg_get_named() - Get the Physical Identifier of CAIF Link Layer 142 * cfcnfg_get_id_from_ifi() - Get the Physical Identifier of ifindex,
143 * it matches caif physical id with the kernel interface id.
143 * @cnfg: Configuration object 144 * @cnfg: Configuration object
144 * @name: Name of the Physical Layer (Caif Link Layer) 145 * @ifi: ifindex obtained from socket.c bindtodevice.
145 */ 146 */
146int cfcnfg_get_named(struct cfcnfg *cnfg, char *name); 147int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi);
147
148#endif /* CFCNFG_H_ */ 148#endif /* CFCNFG_H_ */
diff --git a/include/net/dn.h b/include/net/dn.h
index e5469f7b67a..a514a3cf457 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -225,7 +225,7 @@ extern int decnet_di_count;
225extern int decnet_dr_count; 225extern int decnet_dr_count;
226extern int decnet_no_fc_max_cwnd; 226extern int decnet_no_fc_max_cwnd;
227 227
228extern int sysctl_decnet_mem[3]; 228extern long sysctl_decnet_mem[3];
229extern int sysctl_decnet_wmem[3]; 229extern int sysctl_decnet_wmem[3];
230extern int sysctl_decnet_rmem[3]; 230extern int sysctl_decnet_rmem[3];
231 231
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 1fa5306e3e2..51665b3461b 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -2,6 +2,7 @@
2#define _NET_DST_OPS_H 2#define _NET_DST_OPS_H
3#include <linux/types.h> 3#include <linux/types.h>
4#include <linux/percpu_counter.h> 4#include <linux/percpu_counter.h>
5#include <linux/cache.h>
5 6
6struct dst_entry; 7struct dst_entry;
7struct kmem_cachep; 8struct kmem_cachep;
diff --git a/include/net/netlink.h b/include/net/netlink.h
index f3b201d335b..9801c55de5d 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -384,7 +384,7 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
384 * 384 *
385 * Returns the first attribute which matches the specified type. 385 * Returns the first attribute which matches the specified type.
386 */ 386 */
387static inline struct nlattr *nlmsg_find_attr(struct nlmsghdr *nlh, 387static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
388 int hdrlen, int attrtype) 388 int hdrlen, int attrtype)
389{ 389{
390 return nla_find(nlmsg_attrdata(nlh, hdrlen), 390 return nla_find(nlmsg_attrdata(nlh, hdrlen),
diff --git a/include/net/sock.h b/include/net/sock.h
index c7a736228ca..a6338d03985 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -762,7 +762,7 @@ struct proto {
762 762
763 /* Memory pressure */ 763 /* Memory pressure */
764 void (*enter_memory_pressure)(struct sock *sk); 764 void (*enter_memory_pressure)(struct sock *sk);
765 atomic_t *memory_allocated; /* Current allocated memory. */ 765 atomic_long_t *memory_allocated; /* Current allocated memory. */
766 struct percpu_counter *sockets_allocated; /* Current number of sockets. */ 766 struct percpu_counter *sockets_allocated; /* Current number of sockets. */
767 /* 767 /*
768 * Pressure flag: try to collapse. 768 * Pressure flag: try to collapse.
@@ -771,7 +771,7 @@ struct proto {
771 * is strict, actions are advisory and have some latency. 771 * is strict, actions are advisory and have some latency.
772 */ 772 */
773 int *memory_pressure; 773 int *memory_pressure;
774 int *sysctl_mem; 774 long *sysctl_mem;
775 int *sysctl_wmem; 775 int *sysctl_wmem;
776 int *sysctl_rmem; 776 int *sysctl_rmem;
777 int max_header; 777 int max_header;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 4fee0424af7..e36c874c7fb 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -224,7 +224,7 @@ extern int sysctl_tcp_fack;
224extern int sysctl_tcp_reordering; 224extern int sysctl_tcp_reordering;
225extern int sysctl_tcp_ecn; 225extern int sysctl_tcp_ecn;
226extern int sysctl_tcp_dsack; 226extern int sysctl_tcp_dsack;
227extern int sysctl_tcp_mem[3]; 227extern long sysctl_tcp_mem[3];
228extern int sysctl_tcp_wmem[3]; 228extern int sysctl_tcp_wmem[3];
229extern int sysctl_tcp_rmem[3]; 229extern int sysctl_tcp_rmem[3];
230extern int sysctl_tcp_app_win; 230extern int sysctl_tcp_app_win;
@@ -247,7 +247,7 @@ extern int sysctl_tcp_cookie_size;
247extern int sysctl_tcp_thin_linear_timeouts; 247extern int sysctl_tcp_thin_linear_timeouts;
248extern int sysctl_tcp_thin_dupack; 248extern int sysctl_tcp_thin_dupack;
249 249
250extern atomic_t tcp_memory_allocated; 250extern atomic_long_t tcp_memory_allocated;
251extern struct percpu_counter tcp_sockets_allocated; 251extern struct percpu_counter tcp_sockets_allocated;
252extern int tcp_memory_pressure; 252extern int tcp_memory_pressure;
253 253
@@ -280,7 +280,7 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
280 } 280 }
281 281
282 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && 282 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
283 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]) 283 atomic_long_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
284 return true; 284 return true;
285 return false; 285 return false;
286} 286}
diff --git a/include/net/udp.h b/include/net/udp.h
index 200b82848c9..bb967dd59bf 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -105,10 +105,10 @@ static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
105 105
106extern struct proto udp_prot; 106extern struct proto udp_prot;
107 107
108extern atomic_t udp_memory_allocated; 108extern atomic_long_t udp_memory_allocated;
109 109
110/* sysctl variables for udp */ 110/* sysctl variables for udp */
111extern int sysctl_udp_mem[3]; 111extern long sysctl_udp_mem[3];
112extern int sysctl_udp_rmem_min; 112extern int sysctl_udp_rmem_min;
113extern int sysctl_udp_wmem_min; 113extern int sysctl_udp_wmem_min;
114 114
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 289010d3270..e5e345fb2a5 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -98,6 +98,103 @@ TRACE_EVENT(ext4_allocate_inode,
98 (unsigned long) __entry->dir, __entry->mode) 98 (unsigned long) __entry->dir, __entry->mode)
99); 99);
100 100
101TRACE_EVENT(ext4_evict_inode,
102 TP_PROTO(struct inode *inode),
103
104 TP_ARGS(inode),
105
106 TP_STRUCT__entry(
107 __field( int, dev_major )
108 __field( int, dev_minor )
109 __field( ino_t, ino )
110 __field( int, nlink )
111 ),
112
113 TP_fast_assign(
114 __entry->dev_major = MAJOR(inode->i_sb->s_dev);
115 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
116 __entry->ino = inode->i_ino;
117 __entry->nlink = inode->i_nlink;
118 ),
119
120 TP_printk("dev %d,%d ino %lu nlink %d",
121 __entry->dev_major, __entry->dev_minor,
122 (unsigned long) __entry->ino, __entry->nlink)
123);
124
125TRACE_EVENT(ext4_drop_inode,
126 TP_PROTO(struct inode *inode, int drop),
127
128 TP_ARGS(inode, drop),
129
130 TP_STRUCT__entry(
131 __field( int, dev_major )
132 __field( int, dev_minor )
133 __field( ino_t, ino )
134 __field( int, drop )
135 ),
136
137 TP_fast_assign(
138 __entry->dev_major = MAJOR(inode->i_sb->s_dev);
139 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
140 __entry->ino = inode->i_ino;
141 __entry->drop = drop;
142 ),
143
144 TP_printk("dev %d,%d ino %lu drop %d",
145 __entry->dev_major, __entry->dev_minor,
146 (unsigned long) __entry->ino, __entry->drop)
147);
148
149TRACE_EVENT(ext4_mark_inode_dirty,
150 TP_PROTO(struct inode *inode, unsigned long IP),
151
152 TP_ARGS(inode, IP),
153
154 TP_STRUCT__entry(
155 __field( int, dev_major )
156 __field( int, dev_minor )
157 __field( ino_t, ino )
158 __field(unsigned long, ip )
159 ),
160
161 TP_fast_assign(
162 __entry->dev_major = MAJOR(inode->i_sb->s_dev);
163 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
164 __entry->ino = inode->i_ino;
165 __entry->ip = IP;
166 ),
167
168 TP_printk("dev %d,%d ino %lu caller %pF",
169 __entry->dev_major, __entry->dev_minor,
170 (unsigned long) __entry->ino, (void *)__entry->ip)
171);
172
173TRACE_EVENT(ext4_begin_ordered_truncate,
174 TP_PROTO(struct inode *inode, loff_t new_size),
175
176 TP_ARGS(inode, new_size),
177
178 TP_STRUCT__entry(
179 __field( int, dev_major )
180 __field( int, dev_minor )
181 __field( ino_t, ino )
182 __field( loff_t, new_size )
183 ),
184
185 TP_fast_assign(
186 __entry->dev_major = MAJOR(inode->i_sb->s_dev);
187 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
188 __entry->ino = inode->i_ino;
189 __entry->new_size = new_size;
190 ),
191
192 TP_printk("dev %d,%d ino %lu new_size %lld",
193 __entry->dev_major, __entry->dev_minor,
194 (unsigned long) __entry->ino,
195 (long long) __entry->new_size)
196);
197
101DECLARE_EVENT_CLASS(ext4__write_begin, 198DECLARE_EVENT_CLASS(ext4__write_begin,
102 199
103 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, 200 TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
diff --git a/kernel/exit.c b/kernel/exit.c
index b194febf579..21aa7b3001f 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -96,6 +96,14 @@ static void __exit_signal(struct task_struct *tsk)
96 sig->tty = NULL; 96 sig->tty = NULL;
97 } else { 97 } else {
98 /* 98 /*
99 * This can only happen if the caller is de_thread().
100 * FIXME: this is the temporary hack, we should teach
101 * posix-cpu-timers to handle this case correctly.
102 */
103 if (unlikely(has_group_leader_pid(tsk)))
104 posix_cpu_timers_exit_group(tsk);
105
106 /*
99 * If there is any task waiting for the group exit 107 * If there is any task waiting for the group exit
100 * then notify it: 108 * then notify it:
101 */ 109 */
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index 877fb306d41..17110a4a4fc 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -194,14 +194,7 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
194 194
195 account_global_scheduler_latency(tsk, &lat); 195 account_global_scheduler_latency(tsk, &lat);
196 196
197 /* 197 for (i = 0; i < tsk->latency_record_count; i++) {
198 * short term hack; if we're > 32 we stop; future we recycle:
199 */
200 tsk->latency_record_count++;
201 if (tsk->latency_record_count >= LT_SAVECOUNT)
202 goto out_unlock;
203
204 for (i = 0; i < LT_SAVECOUNT; i++) {
205 struct latency_record *mylat; 198 struct latency_record *mylat;
206 int same = 1; 199 int same = 1;
207 200
@@ -227,8 +220,14 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
227 } 220 }
228 } 221 }
229 222
223 /*
224 * short term hack; if we're > 32 we stop; future we recycle:
225 */
226 if (tsk->latency_record_count >= LT_SAVECOUNT)
227 goto out_unlock;
228
230 /* Allocated a new one: */ 229 /* Allocated a new one: */
231 i = tsk->latency_record_count; 230 i = tsk->latency_record_count++;
232 memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); 231 memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
233 232
234out_unlock: 233out_unlock:
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 517d827f498..cb6c0d2af68 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -674,6 +674,8 @@ event_sched_in(struct perf_event *event,
674 674
675 event->tstamp_running += ctx->time - event->tstamp_stopped; 675 event->tstamp_running += ctx->time - event->tstamp_stopped;
676 676
677 event->shadow_ctx_time = ctx->time - ctx->timestamp;
678
677 if (!is_software_event(event)) 679 if (!is_software_event(event))
678 cpuctx->active_oncpu++; 680 cpuctx->active_oncpu++;
679 ctx->nr_active++; 681 ctx->nr_active++;
@@ -3396,7 +3398,8 @@ static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
3396} 3398}
3397 3399
3398static void perf_output_read_one(struct perf_output_handle *handle, 3400static void perf_output_read_one(struct perf_output_handle *handle,
3399 struct perf_event *event) 3401 struct perf_event *event,
3402 u64 enabled, u64 running)
3400{ 3403{
3401 u64 read_format = event->attr.read_format; 3404 u64 read_format = event->attr.read_format;
3402 u64 values[4]; 3405 u64 values[4];
@@ -3404,11 +3407,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
3404 3407
3405 values[n++] = perf_event_count(event); 3408 values[n++] = perf_event_count(event);
3406 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 3409 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3407 values[n++] = event->total_time_enabled + 3410 values[n++] = enabled +
3408 atomic64_read(&event->child_total_time_enabled); 3411 atomic64_read(&event->child_total_time_enabled);
3409 } 3412 }
3410 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 3413 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3411 values[n++] = event->total_time_running + 3414 values[n++] = running +
3412 atomic64_read(&event->child_total_time_running); 3415 atomic64_read(&event->child_total_time_running);
3413 } 3416 }
3414 if (read_format & PERF_FORMAT_ID) 3417 if (read_format & PERF_FORMAT_ID)
@@ -3421,7 +3424,8 @@ static void perf_output_read_one(struct perf_output_handle *handle,
3421 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. 3424 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3422 */ 3425 */
3423static void perf_output_read_group(struct perf_output_handle *handle, 3426static void perf_output_read_group(struct perf_output_handle *handle,
3424 struct perf_event *event) 3427 struct perf_event *event,
3428 u64 enabled, u64 running)
3425{ 3429{
3426 struct perf_event *leader = event->group_leader, *sub; 3430 struct perf_event *leader = event->group_leader, *sub;
3427 u64 read_format = event->attr.read_format; 3431 u64 read_format = event->attr.read_format;
@@ -3431,10 +3435,10 @@ static void perf_output_read_group(struct perf_output_handle *handle,
3431 values[n++] = 1 + leader->nr_siblings; 3435 values[n++] = 1 + leader->nr_siblings;
3432 3436
3433 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 3437 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3434 values[n++] = leader->total_time_enabled; 3438 values[n++] = enabled;
3435 3439
3436 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 3440 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3437 values[n++] = leader->total_time_running; 3441 values[n++] = running;
3438 3442
3439 if (leader != event) 3443 if (leader != event)
3440 leader->pmu->read(leader); 3444 leader->pmu->read(leader);
@@ -3459,13 +3463,35 @@ static void perf_output_read_group(struct perf_output_handle *handle,
3459 } 3463 }
3460} 3464}
3461 3465
3466#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
3467 PERF_FORMAT_TOTAL_TIME_RUNNING)
3468
3462static void perf_output_read(struct perf_output_handle *handle, 3469static void perf_output_read(struct perf_output_handle *handle,
3463 struct perf_event *event) 3470 struct perf_event *event)
3464{ 3471{
3472 u64 enabled = 0, running = 0, now, ctx_time;
3473 u64 read_format = event->attr.read_format;
3474
3475 /*
3476 * compute total_time_enabled, total_time_running
3477 * based on snapshot values taken when the event
3478 * was last scheduled in.
3479 *
3480 * we cannot simply called update_context_time()
3481 * because of locking issue as we are called in
3482 * NMI context
3483 */
3484 if (read_format & PERF_FORMAT_TOTAL_TIMES) {
3485 now = perf_clock();
3486 ctx_time = event->shadow_ctx_time + now;
3487 enabled = ctx_time - event->tstamp_enabled;
3488 running = ctx_time - event->tstamp_running;
3489 }
3490
3465 if (event->attr.read_format & PERF_FORMAT_GROUP) 3491 if (event->attr.read_format & PERF_FORMAT_GROUP)
3466 perf_output_read_group(handle, event); 3492 perf_output_read_group(handle, event, enabled, running);
3467 else 3493 else
3468 perf_output_read_one(handle, event); 3494 perf_output_read_one(handle, event, enabled, running);
3469} 3495}
3470 3496
3471void perf_output_sample(struct perf_output_handle *handle, 3497void perf_output_sample(struct perf_output_handle *handle,
diff --git a/kernel/printk.c b/kernel/printk.c
index b2ebaee8c37..9a2264fc42c 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -261,6 +261,12 @@ static inline void boot_delay_msec(void)
261} 261}
262#endif 262#endif
263 263
264#ifdef CONFIG_SECURITY_DMESG_RESTRICT
265int dmesg_restrict = 1;
266#else
267int dmesg_restrict;
268#endif
269
264int do_syslog(int type, char __user *buf, int len, bool from_file) 270int do_syslog(int type, char __user *buf, int len, bool from_file)
265{ 271{
266 unsigned i, j, limit, count; 272 unsigned i, j, limit, count;
@@ -268,7 +274,20 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
268 char c; 274 char c;
269 int error = 0; 275 int error = 0;
270 276
271 error = security_syslog(type, from_file); 277 /*
278 * If this is from /proc/kmsg we only do the capabilities checks
279 * at open time.
280 */
281 if (type == SYSLOG_ACTION_OPEN || !from_file) {
282 if (dmesg_restrict && !capable(CAP_SYS_ADMIN))
283 return -EPERM;
284 if ((type != SYSLOG_ACTION_READ_ALL &&
285 type != SYSLOG_ACTION_SIZE_BUFFER) &&
286 !capable(CAP_SYS_ADMIN))
287 return -EPERM;
288 }
289
290 error = security_syslog(type);
272 if (error) 291 if (error)
273 return error; 292 return error;
274 293
diff --git a/kernel/range.c b/kernel/range.c
index 471b66acabb..37fa9b99ad5 100644
--- a/kernel/range.c
+++ b/kernel/range.c
@@ -119,7 +119,7 @@ static int cmp_range(const void *x1, const void *x2)
119 119
120int clean_sort_range(struct range *range, int az) 120int clean_sort_range(struct range *range, int az)
121{ 121{
122 int i, j, k = az - 1, nr_range = 0; 122 int i, j, k = az - 1, nr_range = az;
123 123
124 for (i = 0; i < k; i++) { 124 for (i = 0; i < k; i++) {
125 if (range[i].end) 125 if (range[i].end)
diff --git a/kernel/relay.c b/kernel/relay.c
index c7cf397fb92..859ea5a9605 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -70,17 +70,10 @@ static const struct vm_operations_struct relay_file_mmap_ops = {
70 */ 70 */
71static struct page **relay_alloc_page_array(unsigned int n_pages) 71static struct page **relay_alloc_page_array(unsigned int n_pages)
72{ 72{
73 struct page **array; 73 const size_t pa_size = n_pages * sizeof(struct page *);
74 size_t pa_size = n_pages * sizeof(struct page *); 74 if (pa_size > PAGE_SIZE)
75 75 return vzalloc(pa_size);
76 if (pa_size > PAGE_SIZE) { 76 return kzalloc(pa_size, GFP_KERNEL);
77 array = vmalloc(pa_size);
78 if (array)
79 memset(array, 0, pa_size);
80 } else {
81 array = kzalloc(pa_size, GFP_KERNEL);
82 }
83 return array;
84} 77}
85 78
86/* 79/*
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c33a1edb799..b65bf634035 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -704,6 +704,15 @@ static struct ctl_table kern_table[] = {
704 }, 704 },
705#endif 705#endif
706 { 706 {
707 .procname = "dmesg_restrict",
708 .data = &dmesg_restrict,
709 .maxlen = sizeof(int),
710 .mode = 0644,
711 .proc_handler = proc_dointvec_minmax,
712 .extra1 = &zero,
713 .extra2 = &one,
714 },
715 {
707 .procname = "ngroups_max", 716 .procname = "ngroups_max",
708 .data = &ngroups_max, 717 .data = &ngroups_max,
709 .maxlen = sizeof (int), 718 .maxlen = sizeof (int),
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index bc251ed6672..7b8ec028154 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -168,7 +168,6 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
168static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), 168static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
169 BLK_TC_ACT(BLK_TC_WRITE) }; 169 BLK_TC_ACT(BLK_TC_WRITE) };
170 170
171#define BLK_TC_HARDBARRIER BLK_TC_BARRIER
172#define BLK_TC_RAHEAD BLK_TC_AHEAD 171#define BLK_TC_RAHEAD BLK_TC_AHEAD
173 172
174/* The ilog2() calls fall out because they're constant */ 173/* The ilog2() calls fall out because they're constant */
@@ -196,7 +195,6 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
196 return; 195 return;
197 196
198 what |= ddir_act[rw & WRITE]; 197 what |= ddir_act[rw & WRITE];
199 what |= MASK_TC_BIT(rw, HARDBARRIER);
200 what |= MASK_TC_BIT(rw, SYNC); 198 what |= MASK_TC_BIT(rw, SYNC);
201 what |= MASK_TC_BIT(rw, RAHEAD); 199 what |= MASK_TC_BIT(rw, RAHEAD);
202 what |= MASK_TC_BIT(rw, META); 200 what |= MASK_TC_BIT(rw, META);
@@ -1807,8 +1805,6 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
1807 1805
1808 if (rw & REQ_RAHEAD) 1806 if (rw & REQ_RAHEAD)
1809 rwbs[i++] = 'A'; 1807 rwbs[i++] = 'A';
1810 if (rw & REQ_HARDBARRIER)
1811 rwbs[i++] = 'B';
1812 if (rw & REQ_SYNC) 1808 if (rw & REQ_SYNC)
1813 rwbs[i++] = 'S'; 1809 rwbs[i++] = 'S';
1814 if (rw & REQ_META) 1810 if (rw & REQ_META)
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index bafba687a6d..6e3c41a4024 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -43,7 +43,7 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
43static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); 43static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
44#endif 44#endif
45 45
46static int __initdata no_watchdog; 46static int no_watchdog;
47 47
48 48
49/* boot commands */ 49/* boot commands */
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 6f412ab4c24..5086bb962b4 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -82,6 +82,16 @@ struct radix_tree_preload {
82}; 82};
83static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; 83static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
84 84
85static inline void *ptr_to_indirect(void *ptr)
86{
87 return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR);
88}
89
90static inline void *indirect_to_ptr(void *ptr)
91{
92 return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
93}
94
85static inline gfp_t root_gfp_mask(struct radix_tree_root *root) 95static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
86{ 96{
87 return root->gfp_mask & __GFP_BITS_MASK; 97 return root->gfp_mask & __GFP_BITS_MASK;
@@ -265,7 +275,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
265 return -ENOMEM; 275 return -ENOMEM;
266 276
267 /* Increase the height. */ 277 /* Increase the height. */
268 node->slots[0] = radix_tree_indirect_to_ptr(root->rnode); 278 node->slots[0] = indirect_to_ptr(root->rnode);
269 279
270 /* Propagate the aggregated tag info into the new root */ 280 /* Propagate the aggregated tag info into the new root */
271 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { 281 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
@@ -276,7 +286,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
276 newheight = root->height+1; 286 newheight = root->height+1;
277 node->height = newheight; 287 node->height = newheight;
278 node->count = 1; 288 node->count = 1;
279 node = radix_tree_ptr_to_indirect(node); 289 node = ptr_to_indirect(node);
280 rcu_assign_pointer(root->rnode, node); 290 rcu_assign_pointer(root->rnode, node);
281 root->height = newheight; 291 root->height = newheight;
282 } while (height > root->height); 292 } while (height > root->height);
@@ -309,7 +319,7 @@ int radix_tree_insert(struct radix_tree_root *root,
309 return error; 319 return error;
310 } 320 }
311 321
312 slot = radix_tree_indirect_to_ptr(root->rnode); 322 slot = indirect_to_ptr(root->rnode);
313 323
314 height = root->height; 324 height = root->height;
315 shift = (height-1) * RADIX_TREE_MAP_SHIFT; 325 shift = (height-1) * RADIX_TREE_MAP_SHIFT;
@@ -325,8 +335,7 @@ int radix_tree_insert(struct radix_tree_root *root,
325 rcu_assign_pointer(node->slots[offset], slot); 335 rcu_assign_pointer(node->slots[offset], slot);
326 node->count++; 336 node->count++;
327 } else 337 } else
328 rcu_assign_pointer(root->rnode, 338 rcu_assign_pointer(root->rnode, ptr_to_indirect(slot));
329 radix_tree_ptr_to_indirect(slot));
330 } 339 }
331 340
332 /* Go a level down */ 341 /* Go a level down */
@@ -374,7 +383,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
374 return NULL; 383 return NULL;
375 return is_slot ? (void *)&root->rnode : node; 384 return is_slot ? (void *)&root->rnode : node;
376 } 385 }
377 node = radix_tree_indirect_to_ptr(node); 386 node = indirect_to_ptr(node);
378 387
379 height = node->height; 388 height = node->height;
380 if (index > radix_tree_maxindex(height)) 389 if (index > radix_tree_maxindex(height))
@@ -393,7 +402,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
393 height--; 402 height--;
394 } while (height > 0); 403 } while (height > 0);
395 404
396 return is_slot ? (void *)slot:node; 405 return is_slot ? (void *)slot : indirect_to_ptr(node);
397} 406}
398 407
399/** 408/**
@@ -455,7 +464,7 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
455 height = root->height; 464 height = root->height;
456 BUG_ON(index > radix_tree_maxindex(height)); 465 BUG_ON(index > radix_tree_maxindex(height));
457 466
458 slot = radix_tree_indirect_to_ptr(root->rnode); 467 slot = indirect_to_ptr(root->rnode);
459 shift = (height - 1) * RADIX_TREE_MAP_SHIFT; 468 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
460 469
461 while (height > 0) { 470 while (height > 0) {
@@ -509,7 +518,7 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
509 518
510 shift = (height - 1) * RADIX_TREE_MAP_SHIFT; 519 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
511 pathp->node = NULL; 520 pathp->node = NULL;
512 slot = radix_tree_indirect_to_ptr(root->rnode); 521 slot = indirect_to_ptr(root->rnode);
513 522
514 while (height > 0) { 523 while (height > 0) {
515 int offset; 524 int offset;
@@ -579,7 +588,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
579 588
580 if (!radix_tree_is_indirect_ptr(node)) 589 if (!radix_tree_is_indirect_ptr(node))
581 return (index == 0); 590 return (index == 0);
582 node = radix_tree_indirect_to_ptr(node); 591 node = indirect_to_ptr(node);
583 592
584 height = node->height; 593 height = node->height;
585 if (index > radix_tree_maxindex(height)) 594 if (index > radix_tree_maxindex(height))
@@ -666,7 +675,7 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
666 } 675 }
667 676
668 shift = (height - 1) * RADIX_TREE_MAP_SHIFT; 677 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
669 slot = radix_tree_indirect_to_ptr(root->rnode); 678 slot = indirect_to_ptr(root->rnode);
670 679
671 /* 680 /*
672 * we fill the path from (root->height - 2) to 0, leaving the index at 681 * we fill the path from (root->height - 2) to 0, leaving the index at
@@ -897,7 +906,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
897 results[0] = node; 906 results[0] = node;
898 return 1; 907 return 1;
899 } 908 }
900 node = radix_tree_indirect_to_ptr(node); 909 node = indirect_to_ptr(node);
901 910
902 max_index = radix_tree_maxindex(node->height); 911 max_index = radix_tree_maxindex(node->height);
903 912
@@ -916,7 +925,8 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
916 slot = *(((void ***)results)[ret + i]); 925 slot = *(((void ***)results)[ret + i]);
917 if (!slot) 926 if (!slot)
918 continue; 927 continue;
919 results[ret + nr_found] = rcu_dereference_raw(slot); 928 results[ret + nr_found] =
929 indirect_to_ptr(rcu_dereference_raw(slot));
920 nr_found++; 930 nr_found++;
921 } 931 }
922 ret += nr_found; 932 ret += nr_found;
@@ -965,7 +975,7 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
965 results[0] = (void **)&root->rnode; 975 results[0] = (void **)&root->rnode;
966 return 1; 976 return 1;
967 } 977 }
968 node = radix_tree_indirect_to_ptr(node); 978 node = indirect_to_ptr(node);
969 979
970 max_index = radix_tree_maxindex(node->height); 980 max_index = radix_tree_maxindex(node->height);
971 981
@@ -1090,7 +1100,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
1090 results[0] = node; 1100 results[0] = node;
1091 return 1; 1101 return 1;
1092 } 1102 }
1093 node = radix_tree_indirect_to_ptr(node); 1103 node = indirect_to_ptr(node);
1094 1104
1095 max_index = radix_tree_maxindex(node->height); 1105 max_index = radix_tree_maxindex(node->height);
1096 1106
@@ -1109,7 +1119,8 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
1109 slot = *(((void ***)results)[ret + i]); 1119 slot = *(((void ***)results)[ret + i]);
1110 if (!slot) 1120 if (!slot)
1111 continue; 1121 continue;
1112 results[ret + nr_found] = rcu_dereference_raw(slot); 1122 results[ret + nr_found] =
1123 indirect_to_ptr(rcu_dereference_raw(slot));
1113 nr_found++; 1124 nr_found++;
1114 } 1125 }
1115 ret += nr_found; 1126 ret += nr_found;
@@ -1159,7 +1170,7 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
1159 results[0] = (void **)&root->rnode; 1170 results[0] = (void **)&root->rnode;
1160 return 1; 1171 return 1;
1161 } 1172 }
1162 node = radix_tree_indirect_to_ptr(node); 1173 node = indirect_to_ptr(node);
1163 1174
1164 max_index = radix_tree_maxindex(node->height); 1175 max_index = radix_tree_maxindex(node->height);
1165 1176
@@ -1195,7 +1206,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
1195 void *newptr; 1206 void *newptr;
1196 1207
1197 BUG_ON(!radix_tree_is_indirect_ptr(to_free)); 1208 BUG_ON(!radix_tree_is_indirect_ptr(to_free));
1198 to_free = radix_tree_indirect_to_ptr(to_free); 1209 to_free = indirect_to_ptr(to_free);
1199 1210
1200 /* 1211 /*
1201 * The candidate node has more than one child, or its child 1212 * The candidate node has more than one child, or its child
@@ -1208,16 +1219,39 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
1208 1219
1209 /* 1220 /*
1210 * We don't need rcu_assign_pointer(), since we are simply 1221 * We don't need rcu_assign_pointer(), since we are simply
1211 * moving the node from one part of the tree to another. If 1222 * moving the node from one part of the tree to another: if it
1212 * it was safe to dereference the old pointer to it 1223 * was safe to dereference the old pointer to it
1213 * (to_free->slots[0]), it will be safe to dereference the new 1224 * (to_free->slots[0]), it will be safe to dereference the new
1214 * one (root->rnode). 1225 * one (root->rnode) as far as dependent read barriers go.
1215 */ 1226 */
1216 newptr = to_free->slots[0]; 1227 newptr = to_free->slots[0];
1217 if (root->height > 1) 1228 if (root->height > 1)
1218 newptr = radix_tree_ptr_to_indirect(newptr); 1229 newptr = ptr_to_indirect(newptr);
1219 root->rnode = newptr; 1230 root->rnode = newptr;
1220 root->height--; 1231 root->height--;
1232
1233 /*
1234 * We have a dilemma here. The node's slot[0] must not be
1235 * NULLed in case there are concurrent lookups expecting to
1236 * find the item. However if this was a bottom-level node,
1237 * then it may be subject to the slot pointer being visible
1238 * to callers dereferencing it. If item corresponding to
1239 * slot[0] is subsequently deleted, these callers would expect
1240 * their slot to become empty sooner or later.
1241 *
1242 * For example, lockless pagecache will look up a slot, deref
1243 * the page pointer, and if the page is 0 refcount it means it
1244 * was concurrently deleted from pagecache so try the deref
1245 * again. Fortunately there is already a requirement for logic
1246 * to retry the entire slot lookup -- the indirect pointer
1247 * problem (replacing direct root node with an indirect pointer
1248 * also results in a stale slot). So tag the slot as indirect
1249 * to force callers to retry.
1250 */
1251 if (root->height == 0)
1252 *((unsigned long *)&to_free->slots[0]) |=
1253 RADIX_TREE_INDIRECT_PTR;
1254
1221 radix_tree_node_free(to_free); 1255 radix_tree_node_free(to_free);
1222 } 1256 }
1223} 1257}
@@ -1254,7 +1288,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
1254 root->rnode = NULL; 1288 root->rnode = NULL;
1255 goto out; 1289 goto out;
1256 } 1290 }
1257 slot = radix_tree_indirect_to_ptr(slot); 1291 slot = indirect_to_ptr(slot);
1258 1292
1259 shift = (height - 1) * RADIX_TREE_MAP_SHIFT; 1293 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
1260 pathp->node = NULL; 1294 pathp->node = NULL;
@@ -1296,8 +1330,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
1296 radix_tree_node_free(to_free); 1330 radix_tree_node_free(to_free);
1297 1331
1298 if (pathp->node->count) { 1332 if (pathp->node->count) {
1299 if (pathp->node == 1333 if (pathp->node == indirect_to_ptr(root->rnode))
1300 radix_tree_indirect_to_ptr(root->rnode))
1301 radix_tree_shrink(root); 1334 radix_tree_shrink(root);
1302 goto out; 1335 goto out;
1303 } 1336 }
diff --git a/mm/filemap.c b/mm/filemap.c
index 75572b5f237..ea89840fc65 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -644,7 +644,9 @@ repeat:
644 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); 644 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
645 if (pagep) { 645 if (pagep) {
646 page = radix_tree_deref_slot(pagep); 646 page = radix_tree_deref_slot(pagep);
647 if (unlikely(!page || page == RADIX_TREE_RETRY)) 647 if (unlikely(!page))
648 goto out;
649 if (radix_tree_deref_retry(page))
648 goto repeat; 650 goto repeat;
649 651
650 if (!page_cache_get_speculative(page)) 652 if (!page_cache_get_speculative(page))
@@ -660,6 +662,7 @@ repeat:
660 goto repeat; 662 goto repeat;
661 } 663 }
662 } 664 }
665out:
663 rcu_read_unlock(); 666 rcu_read_unlock();
664 667
665 return page; 668 return page;
@@ -777,12 +780,11 @@ repeat:
777 page = radix_tree_deref_slot((void **)pages[i]); 780 page = radix_tree_deref_slot((void **)pages[i]);
778 if (unlikely(!page)) 781 if (unlikely(!page))
779 continue; 782 continue;
780 /* 783 if (radix_tree_deref_retry(page)) {
781 * this can only trigger if nr_found == 1, making livelock 784 if (ret)
782 * a non issue. 785 start = pages[ret-1]->index;
783 */
784 if (unlikely(page == RADIX_TREE_RETRY))
785 goto restart; 786 goto restart;
787 }
786 788
787 if (!page_cache_get_speculative(page)) 789 if (!page_cache_get_speculative(page))
788 goto repeat; 790 goto repeat;
@@ -830,11 +832,7 @@ repeat:
830 page = radix_tree_deref_slot((void **)pages[i]); 832 page = radix_tree_deref_slot((void **)pages[i]);
831 if (unlikely(!page)) 833 if (unlikely(!page))
832 continue; 834 continue;
833 /* 835 if (radix_tree_deref_retry(page))
834 * this can only trigger if nr_found == 1, making livelock
835 * a non issue.
836 */
837 if (unlikely(page == RADIX_TREE_RETRY))
838 goto restart; 836 goto restart;
839 837
840 if (page->mapping == NULL || page->index != index) 838 if (page->mapping == NULL || page->index != index)
@@ -887,11 +885,7 @@ repeat:
887 page = radix_tree_deref_slot((void **)pages[i]); 885 page = radix_tree_deref_slot((void **)pages[i]);
888 if (unlikely(!page)) 886 if (unlikely(!page))
889 continue; 887 continue;
890 /* 888 if (radix_tree_deref_retry(page))
891 * this can only trigger if nr_found == 1, making livelock
892 * a non issue.
893 */
894 if (unlikely(page == RADIX_TREE_RETRY))
895 goto restart; 889 goto restart;
896 890
897 if (!page_cache_get_speculative(page)) 891 if (!page_cache_get_speculative(page))
@@ -1029,6 +1023,9 @@ find_page:
1029 goto page_not_up_to_date; 1023 goto page_not_up_to_date;
1030 if (!trylock_page(page)) 1024 if (!trylock_page(page))
1031 goto page_not_up_to_date; 1025 goto page_not_up_to_date;
1026 /* Did it get truncated before we got the lock? */
1027 if (!page->mapping)
1028 goto page_not_up_to_date_locked;
1032 if (!mapping->a_ops->is_partially_uptodate(page, 1029 if (!mapping->a_ops->is_partially_uptodate(page,
1033 desc, offset)) 1030 desc, offset))
1034 goto page_not_up_to_date_locked; 1031 goto page_not_up_to_date_locked;
@@ -1563,8 +1560,10 @@ retry_find:
1563 goto no_cached_page; 1560 goto no_cached_page;
1564 } 1561 }
1565 1562
1566 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) 1563 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
1564 page_cache_release(page);
1567 return ret | VM_FAULT_RETRY; 1565 return ret | VM_FAULT_RETRY;
1566 }
1568 1567
1569 /* Did it get truncated? */ 1568 /* Did it get truncated? */
1570 if (unlikely(page->mapping != mapping)) { 1569 if (unlikely(page->mapping != mapping)) {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9a99cfaf0a1..2efa8ea07ff 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4208,15 +4208,17 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
4208 4208
4209 memset(mem, 0, size); 4209 memset(mem, 0, size);
4210 mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); 4210 mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4211 if (!mem->stat) { 4211 if (!mem->stat)
4212 if (size < PAGE_SIZE) 4212 goto out_free;
4213 kfree(mem);
4214 else
4215 vfree(mem);
4216 mem = NULL;
4217 }
4218 spin_lock_init(&mem->pcp_counter_lock); 4213 spin_lock_init(&mem->pcp_counter_lock);
4219 return mem; 4214 return mem;
4215
4216out_free:
4217 if (size < PAGE_SIZE)
4218 kfree(mem);
4219 else
4220 vfree(mem);
4221 return NULL;
4220} 4222}
4221 4223
4222/* 4224/*
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 2d1bf7cf885..4c513387309 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -211,6 +211,7 @@ success:
211 mmu_notifier_invalidate_range_end(mm, start, end); 211 mmu_notifier_invalidate_range_end(mm, start, end);
212 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); 212 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
213 vm_stat_account(mm, newflags, vma->vm_file, nrpages); 213 vm_stat_account(mm, newflags, vma->vm_file, nrpages);
214 perf_event_mmap(vma);
214 return 0; 215 return 0;
215 216
216fail: 217fail:
@@ -299,7 +300,6 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
299 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); 300 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
300 if (error) 301 if (error)
301 goto out; 302 goto out;
302 perf_event_mmap(vma);
303 nstart = tmp; 303 nstart = tmp;
304 304
305 if (nstart < prev->vm_end) 305 if (nstart < prev->vm_end)
diff --git a/mm/slub.c b/mm/slub.c
index 8fd5401bb07..981fb730aa0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3273,9 +3273,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3273 kfree(n); 3273 kfree(n);
3274 kfree(s); 3274 kfree(s);
3275 } 3275 }
3276err:
3276 up_write(&slub_lock); 3277 up_write(&slub_lock);
3277 3278
3278err:
3279 if (flags & SLAB_PANIC) 3279 if (flags & SLAB_PANIC)
3280 panic("Cannot create slabcache %s\n", name); 3280 panic("Cannot create slabcache %s\n", name);
3281 else 3281 else
@@ -3862,6 +3862,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
3862 x += sprintf(buf + x, " N%d=%lu", 3862 x += sprintf(buf + x, " N%d=%lu",
3863 node, nodes[node]); 3863 node, nodes[node]);
3864#endif 3864#endif
3865 up_read(&slub_lock);
3865 kfree(nodes); 3866 kfree(nodes);
3866 return x + sprintf(buf + x, "\n"); 3867 return x + sprintf(buf + x, "\n");
3867} 3868}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b8a6fdc2131..d31d7ce52c0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -913,7 +913,7 @@ keep_lumpy:
913 * back off and wait for congestion to clear because further reclaim 913 * back off and wait for congestion to clear because further reclaim
914 * will encounter the same problem 914 * will encounter the same problem
915 */ 915 */
916 if (nr_dirty == nr_congested) 916 if (nr_dirty == nr_congested && nr_dirty != 0)
917 zone_set_flag(zone, ZONE_CONGESTED); 917 zone_set_flag(zone, ZONE_CONGESTED);
918 918
919 free_page_list(&free_pages); 919 free_page_list(&free_pages);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index cd2e42be7b6..42eac4d3321 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -949,7 +949,7 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
949 v[PGPGIN] /= 2; /* sectors -> kbytes */ 949 v[PGPGIN] /= 2; /* sectors -> kbytes */
950 v[PGPGOUT] /= 2; 950 v[PGPGOUT] /= 2;
951#endif 951#endif
952 return m->private + *pos; 952 return (unsigned long *)m->private + *pos;
953} 953}
954 954
955static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) 955static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 26eaebf4aaa..bb86d293239 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1392,6 +1392,7 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr,
1392 ax25_cb *ax25; 1392 ax25_cb *ax25;
1393 int err = 0; 1393 int err = 0;
1394 1394
1395 memset(fsa, 0, sizeof(fsa));
1395 lock_sock(sk); 1396 lock_sock(sk);
1396 ax25 = ax25_sk(sk); 1397 ax25 = ax25_sk(sk);
1397 1398
@@ -1403,7 +1404,6 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr,
1403 1404
1404 fsa->fsa_ax25.sax25_family = AF_AX25; 1405 fsa->fsa_ax25.sax25_family = AF_AX25;
1405 fsa->fsa_ax25.sax25_call = ax25->dest_addr; 1406 fsa->fsa_ax25.sax25_call = ax25->dest_addr;
1406 fsa->fsa_ax25.sax25_ndigis = 0;
1407 1407
1408 if (ax25->digipeat != NULL) { 1408 if (ax25->digipeat != NULL) {
1409 ndigi = ax25->digipeat->ndigi; 1409 ndigi = ax25->digipeat->ndigi;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index bfef5bae0b3..84093b0000b 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1175,6 +1175,12 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff
1175 hci_send_cmd(hdev, 1175 hci_send_cmd(hdev,
1176 HCI_OP_READ_REMOTE_EXT_FEATURES, 1176 HCI_OP_READ_REMOTE_EXT_FEATURES,
1177 sizeof(cp), &cp); 1177 sizeof(cp), &cp);
1178 } else if (!ev->status && conn->out &&
1179 conn->sec_level == BT_SECURITY_HIGH) {
1180 struct hci_cp_auth_requested cp;
1181 cp.handle = ev->handle;
1182 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1183 sizeof(cp), &cp);
1178 } else { 1184 } else {
1179 conn->state = BT_CONNECTED; 1185 conn->state = BT_CONNECTED;
1180 hci_proto_connect_cfm(conn, ev->status); 1186 hci_proto_connect_cfm(conn, ev->status);
diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig
index 98fdfa1fbdd..86a91543172 100644
--- a/net/bluetooth/hidp/Kconfig
+++ b/net/bluetooth/hidp/Kconfig
@@ -1,6 +1,6 @@
1config BT_HIDP 1config BT_HIDP
2 tristate "HIDP protocol support" 2 tristate "HIDP protocol support"
3 depends on BT && BT_L2CAP && INPUT 3 depends on BT && BT_L2CAP && INPUT && HID_SUPPORT
4 select HID 4 select HID
5 help 5 help
6 HIDP (Human Interface Device Protocol) is a transport layer 6 HIDP (Human Interface Device Protocol) is a transport layer
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index daa7a988d9a..cd8f6ea0384 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -2421,11 +2421,11 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned
2421 break; 2421 break;
2422 2422
2423 case 2: 2423 case 2:
2424 *val = __le16_to_cpu(*((__le16 *) opt->val)); 2424 *val = get_unaligned_le16(opt->val);
2425 break; 2425 break;
2426 2426
2427 case 4: 2427 case 4:
2428 *val = __le32_to_cpu(*((__le32 *) opt->val)); 2428 *val = get_unaligned_le32(opt->val);
2429 break; 2429 break;
2430 2430
2431 default: 2431 default:
@@ -2452,11 +2452,11 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2452 break; 2452 break;
2453 2453
2454 case 2: 2454 case 2:
2455 *((__le16 *) opt->val) = cpu_to_le16(val); 2455 put_unaligned_le16(val, opt->val);
2456 break; 2456 break;
2457 2457
2458 case 4: 2458 case 4:
2459 *((__le32 *) opt->val) = cpu_to_le32(val); 2459 put_unaligned_le32(val, opt->val);
2460 break; 2460 break;
2461 2461
2462 default: 2462 default:
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 39a5d87e33b..fa642aa652b 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -79,7 +79,10 @@ static void rfcomm_make_uih(struct sk_buff *skb, u8 addr);
79 79
80static void rfcomm_process_connect(struct rfcomm_session *s); 80static void rfcomm_process_connect(struct rfcomm_session *s);
81 81
82static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst, int *err); 82static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
83 bdaddr_t *dst,
84 u8 sec_level,
85 int *err);
83static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst); 86static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst);
84static void rfcomm_session_del(struct rfcomm_session *s); 87static void rfcomm_session_del(struct rfcomm_session *s);
85 88
@@ -401,7 +404,7 @@ static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst,
401 404
402 s = rfcomm_session_get(src, dst); 405 s = rfcomm_session_get(src, dst);
403 if (!s) { 406 if (!s) {
404 s = rfcomm_session_create(src, dst, &err); 407 s = rfcomm_session_create(src, dst, d->sec_level, &err);
405 if (!s) 408 if (!s)
406 return err; 409 return err;
407 } 410 }
@@ -679,7 +682,10 @@ static void rfcomm_session_close(struct rfcomm_session *s, int err)
679 rfcomm_session_put(s); 682 rfcomm_session_put(s);
680} 683}
681 684
682static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst, int *err) 685static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
686 bdaddr_t *dst,
687 u8 sec_level,
688 int *err)
683{ 689{
684 struct rfcomm_session *s = NULL; 690 struct rfcomm_session *s = NULL;
685 struct sockaddr_l2 addr; 691 struct sockaddr_l2 addr;
@@ -704,6 +710,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst
704 sk = sock->sk; 710 sk = sock->sk;
705 lock_sock(sk); 711 lock_sock(sk);
706 l2cap_pi(sk)->imtu = l2cap_mtu; 712 l2cap_pi(sk)->imtu = l2cap_mtu;
713 l2cap_pi(sk)->sec_level = sec_level;
707 if (l2cap_ertm) 714 if (l2cap_ertm)
708 l2cap_pi(sk)->mode = L2CAP_MODE_ERTM; 715 l2cap_pi(sk)->mode = L2CAP_MODE_ERTM;
709 release_sock(sk); 716 release_sock(sk);
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c
index 76ae68303d3..d522d8c1703 100644
--- a/net/caif/caif_config_util.c
+++ b/net/caif/caif_config_util.c
@@ -16,11 +16,18 @@ int connect_req_to_link_param(struct cfcnfg *cnfg,
16{ 16{
17 struct dev_info *dev_info; 17 struct dev_info *dev_info;
18 enum cfcnfg_phy_preference pref; 18 enum cfcnfg_phy_preference pref;
19 int res;
20
19 memset(l, 0, sizeof(*l)); 21 memset(l, 0, sizeof(*l));
20 l->priority = s->priority; 22 /* In caif protocol low value is high priority */
23 l->priority = CAIF_PRIO_MAX - s->priority + 1;
21 24
22 if (s->link_name[0] != '\0') 25 if (s->ifindex != 0){
23 l->phyid = cfcnfg_get_named(cnfg, s->link_name); 26 res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex);
27 if (res < 0)
28 return res;
29 l->phyid = res;
30 }
24 else { 31 else {
25 switch (s->link_selector) { 32 switch (s->link_selector) {
26 case CAIF_LINK_HIGH_BANDW: 33 case CAIF_LINK_HIGH_BANDW:
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index b99369a055d..a42a408306e 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -307,6 +307,8 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
307 307
308 case NETDEV_UNREGISTER: 308 case NETDEV_UNREGISTER:
309 caifd = caif_get(dev); 309 caifd = caif_get(dev);
310 if (caifd == NULL)
311 break;
310 netdev_info(dev, "unregister\n"); 312 netdev_info(dev, "unregister\n");
311 atomic_set(&caifd->state, what); 313 atomic_set(&caifd->state, what);
312 caif_device_destroy(dev); 314 caif_device_destroy(dev);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 2eca2dd0000..1bf0cf50379 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -716,8 +716,7 @@ static int setsockopt(struct socket *sock,
716{ 716{
717 struct sock *sk = sock->sk; 717 struct sock *sk = sock->sk;
718 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 718 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
719 int prio, linksel; 719 int linksel;
720 struct ifreq ifreq;
721 720
722 if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED) 721 if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED)
723 return -ENOPROTOOPT; 722 return -ENOPROTOOPT;
@@ -735,33 +734,6 @@ static int setsockopt(struct socket *sock,
735 release_sock(&cf_sk->sk); 734 release_sock(&cf_sk->sk);
736 return 0; 735 return 0;
737 736
738 case SO_PRIORITY:
739 if (lvl != SOL_SOCKET)
740 goto bad_sol;
741 if (ol < sizeof(int))
742 return -EINVAL;
743 if (copy_from_user(&prio, ov, sizeof(int)))
744 return -EINVAL;
745 lock_sock(&(cf_sk->sk));
746 cf_sk->conn_req.priority = prio;
747 release_sock(&cf_sk->sk);
748 return 0;
749
750 case SO_BINDTODEVICE:
751 if (lvl != SOL_SOCKET)
752 goto bad_sol;
753 if (ol < sizeof(struct ifreq))
754 return -EINVAL;
755 if (copy_from_user(&ifreq, ov, sizeof(ifreq)))
756 return -EFAULT;
757 lock_sock(&(cf_sk->sk));
758 strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name,
759 sizeof(cf_sk->conn_req.link_name));
760 cf_sk->conn_req.link_name
761 [sizeof(cf_sk->conn_req.link_name)-1] = 0;
762 release_sock(&cf_sk->sk);
763 return 0;
764
765 case CAIFSO_REQ_PARAM: 737 case CAIFSO_REQ_PARAM:
766 if (lvl != SOL_CAIF) 738 if (lvl != SOL_CAIF)
767 goto bad_sol; 739 goto bad_sol;
@@ -880,6 +852,18 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
880 sock->state = SS_CONNECTING; 852 sock->state = SS_CONNECTING;
881 sk->sk_state = CAIF_CONNECTING; 853 sk->sk_state = CAIF_CONNECTING;
882 854
855 /* Check priority value comming from socket */
856 /* if priority value is out of range it will be ajusted */
857 if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX)
858 cf_sk->conn_req.priority = CAIF_PRIO_MAX;
859 else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN)
860 cf_sk->conn_req.priority = CAIF_PRIO_MIN;
861 else
862 cf_sk->conn_req.priority = cf_sk->sk.sk_priority;
863
864 /*ifindex = id of the interface.*/
865 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
866
883 dbfs_atomic_inc(&cnt.num_connect_req); 867 dbfs_atomic_inc(&cnt.num_connect_req);
884 cf_sk->layer.receive = caif_sktrecv_cb; 868 cf_sk->layer.receive = caif_sktrecv_cb;
885 err = caif_connect_client(&cf_sk->conn_req, 869 err = caif_connect_client(&cf_sk->conn_req,
@@ -905,6 +889,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
905 cf_sk->maxframe = mtu - (headroom + tailroom); 889 cf_sk->maxframe = mtu - (headroom + tailroom);
906 if (cf_sk->maxframe < 1) { 890 if (cf_sk->maxframe < 1) {
907 pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu); 891 pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu);
892 err = -ENODEV;
908 goto out; 893 goto out;
909 } 894 }
910 895
@@ -1142,7 +1127,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
1142 set_rx_flow_on(cf_sk); 1127 set_rx_flow_on(cf_sk);
1143 1128
1144 /* Set default options on configuration */ 1129 /* Set default options on configuration */
1145 cf_sk->conn_req.priority = CAIF_PRIO_NORMAL; 1130 cf_sk->sk.sk_priority= CAIF_PRIO_NORMAL;
1146 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; 1131 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
1147 cf_sk->conn_req.protocol = protocol; 1132 cf_sk->conn_req.protocol = protocol;
1148 /* Increase the number of sockets created. */ 1133 /* Increase the number of sockets created. */
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 41adafd1891..21ede141018 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -173,18 +173,15 @@ static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg,
173 return NULL; 173 return NULL;
174} 174}
175 175
176int cfcnfg_get_named(struct cfcnfg *cnfg, char *name) 176
177int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi)
177{ 178{
178 int i; 179 int i;
179 180 for (i = 0; i < MAX_PHY_LAYERS; i++)
180 /* Try to match with specified name */ 181 if (cnfg->phy_layers[i].frm_layer != NULL &&
181 for (i = 0; i < MAX_PHY_LAYERS; i++) { 182 cnfg->phy_layers[i].ifindex == ifi)
182 if (cnfg->phy_layers[i].frm_layer != NULL 183 return i;
183 && strcmp(cnfg->phy_layers[i].phy_layer->name, 184 return -ENODEV;
184 name) == 0)
185 return cnfg->phy_layers[i].frm_layer->id;
186 }
187 return 0;
188} 185}
189 186
190int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) 187int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 08f267a109a..3cd8f978e30 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -361,11 +361,10 @@ void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
361 struct cfctrl_request_info *p, *tmp; 361 struct cfctrl_request_info *p, *tmp;
362 struct cfctrl *ctrl = container_obj(layr); 362 struct cfctrl *ctrl = container_obj(layr);
363 spin_lock(&ctrl->info_list_lock); 363 spin_lock(&ctrl->info_list_lock);
364 pr_warn("enter\n");
365 364
366 list_for_each_entry_safe(p, tmp, &ctrl->list, list) { 365 list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
367 if (p->client_layer == adap_layer) { 366 if (p->client_layer == adap_layer) {
368 pr_warn("cancel req :%d\n", p->sequence_no); 367 pr_debug("cancel req :%d\n", p->sequence_no);
369 list_del(&p->list); 368 list_del(&p->list);
370 kfree(p); 369 kfree(p);
371 } 370 }
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
index 496fda9ac66..11a2af4c162 100644
--- a/net/caif/cfdbgl.c
+++ b/net/caif/cfdbgl.c
@@ -12,6 +12,8 @@
12#include <net/caif/cfsrvl.h> 12#include <net/caif/cfsrvl.h>
13#include <net/caif/cfpkt.h> 13#include <net/caif/cfpkt.h>
14 14
15#define container_obj(layr) ((struct cfsrvl *) layr)
16
15static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt); 17static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt);
16static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt); 18static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt);
17 19
@@ -38,5 +40,17 @@ static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt)
38 40
39static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt) 41static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt)
40{ 42{
43 struct cfsrvl *service = container_obj(layr);
44 struct caif_payload_info *info;
45 int ret;
46
47 if (!cfsrvl_ready(service, &ret))
48 return ret;
49
50 /* Add info for MUX-layer to route the packet out */
51 info = cfpkt_info(pkt);
52 info->channel_id = service->layer.id;
53 info->dev_info = &service->dev_info;
54
41 return layr->dn->transmit(layr->dn, pkt); 55 return layr->dn->transmit(layr->dn, pkt);
42} 56}
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index bde8481e8d2..e2fb5fa7579 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -193,7 +193,7 @@ out:
193 193
194static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt) 194static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
195{ 195{
196 caif_assert(cfpkt_getlen(pkt) >= rfml->fragment_size); 196 caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size);
197 197
198 /* Add info for MUX-layer to route the packet out. */ 198 /* Add info for MUX-layer to route the packet out. */
199 cfpkt_info(pkt)->channel_id = rfml->serv.layer.id; 199 cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 08ffe9e4be2..6faa8256e10 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -125,7 +125,7 @@ struct bcm_sock {
125 struct list_head tx_ops; 125 struct list_head tx_ops;
126 unsigned long dropped_usr_msgs; 126 unsigned long dropped_usr_msgs;
127 struct proc_dir_entry *bcm_proc_read; 127 struct proc_dir_entry *bcm_proc_read;
128 char procname [9]; /* pointer printed in ASCII with \0 */ 128 char procname [20]; /* pointer printed in ASCII with \0 */
129}; 129};
130 130
131static inline struct bcm_sock *bcm_sk(const struct sock *sk) 131static inline struct bcm_sock *bcm_sk(const struct sock *sk)
diff --git a/net/core/dev.c b/net/core/dev.c
index 35dfb831848..0dd54a69dac 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2131,7 +2131,7 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2131 } else { 2131 } else {
2132 struct sock *sk = skb->sk; 2132 struct sock *sk = skb->sk;
2133 queue_index = sk_tx_queue_get(sk); 2133 queue_index = sk_tx_queue_get(sk);
2134 if (queue_index < 0) { 2134 if (queue_index < 0 || queue_index >= dev->real_num_tx_queues) {
2135 2135
2136 queue_index = 0; 2136 queue_index = 0;
2137 if (dev->real_num_tx_queues > 1) 2137 if (dev->real_num_tx_queues > 1)
diff --git a/net/core/dst.c b/net/core/dst.c
index 8abe628b79f..b99c7c7ffce 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -370,6 +370,7 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
370 370
371static struct notifier_block dst_dev_notifier = { 371static struct notifier_block dst_dev_notifier = {
372 .notifier_call = dst_dev_event, 372 .notifier_call = dst_dev_event,
373 .priority = -10, /* must be called after other network notifiers */
373}; 374};
374 375
375void __init dst_init(void) 376void __init dst_init(void)
diff --git a/net/core/filter.c b/net/core/filter.c
index 7beaec36b54..23e9b2a6b4c 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -112,39 +112,41 @@ EXPORT_SYMBOL(sk_filter);
112 */ 112 */
113unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) 113unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
114{ 114{
115 struct sock_filter *fentry; /* We walk down these */
116 void *ptr; 115 void *ptr;
117 u32 A = 0; /* Accumulator */ 116 u32 A = 0; /* Accumulator */
118 u32 X = 0; /* Index Register */ 117 u32 X = 0; /* Index Register */
119 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ 118 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
119 unsigned long memvalid = 0;
120 u32 tmp; 120 u32 tmp;
121 int k; 121 int k;
122 int pc; 122 int pc;
123 123
124 BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG);
124 /* 125 /*
125 * Process array of filter instructions. 126 * Process array of filter instructions.
126 */ 127 */
127 for (pc = 0; pc < flen; pc++) { 128 for (pc = 0; pc < flen; pc++) {
128 fentry = &filter[pc]; 129 const struct sock_filter *fentry = &filter[pc];
130 u32 f_k = fentry->k;
129 131
130 switch (fentry->code) { 132 switch (fentry->code) {
131 case BPF_S_ALU_ADD_X: 133 case BPF_S_ALU_ADD_X:
132 A += X; 134 A += X;
133 continue; 135 continue;
134 case BPF_S_ALU_ADD_K: 136 case BPF_S_ALU_ADD_K:
135 A += fentry->k; 137 A += f_k;
136 continue; 138 continue;
137 case BPF_S_ALU_SUB_X: 139 case BPF_S_ALU_SUB_X:
138 A -= X; 140 A -= X;
139 continue; 141 continue;
140 case BPF_S_ALU_SUB_K: 142 case BPF_S_ALU_SUB_K:
141 A -= fentry->k; 143 A -= f_k;
142 continue; 144 continue;
143 case BPF_S_ALU_MUL_X: 145 case BPF_S_ALU_MUL_X:
144 A *= X; 146 A *= X;
145 continue; 147 continue;
146 case BPF_S_ALU_MUL_K: 148 case BPF_S_ALU_MUL_K:
147 A *= fentry->k; 149 A *= f_k;
148 continue; 150 continue;
149 case BPF_S_ALU_DIV_X: 151 case BPF_S_ALU_DIV_X:
150 if (X == 0) 152 if (X == 0)
@@ -152,49 +154,49 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
152 A /= X; 154 A /= X;
153 continue; 155 continue;
154 case BPF_S_ALU_DIV_K: 156 case BPF_S_ALU_DIV_K:
155 A /= fentry->k; 157 A /= f_k;
156 continue; 158 continue;
157 case BPF_S_ALU_AND_X: 159 case BPF_S_ALU_AND_X:
158 A &= X; 160 A &= X;
159 continue; 161 continue;
160 case BPF_S_ALU_AND_K: 162 case BPF_S_ALU_AND_K:
161 A &= fentry->k; 163 A &= f_k;
162 continue; 164 continue;
163 case BPF_S_ALU_OR_X: 165 case BPF_S_ALU_OR_X:
164 A |= X; 166 A |= X;
165 continue; 167 continue;
166 case BPF_S_ALU_OR_K: 168 case BPF_S_ALU_OR_K:
167 A |= fentry->k; 169 A |= f_k;
168 continue; 170 continue;
169 case BPF_S_ALU_LSH_X: 171 case BPF_S_ALU_LSH_X:
170 A <<= X; 172 A <<= X;
171 continue; 173 continue;
172 case BPF_S_ALU_LSH_K: 174 case BPF_S_ALU_LSH_K:
173 A <<= fentry->k; 175 A <<= f_k;
174 continue; 176 continue;
175 case BPF_S_ALU_RSH_X: 177 case BPF_S_ALU_RSH_X:
176 A >>= X; 178 A >>= X;
177 continue; 179 continue;
178 case BPF_S_ALU_RSH_K: 180 case BPF_S_ALU_RSH_K:
179 A >>= fentry->k; 181 A >>= f_k;
180 continue; 182 continue;
181 case BPF_S_ALU_NEG: 183 case BPF_S_ALU_NEG:
182 A = -A; 184 A = -A;
183 continue; 185 continue;
184 case BPF_S_JMP_JA: 186 case BPF_S_JMP_JA:
185 pc += fentry->k; 187 pc += f_k;
186 continue; 188 continue;
187 case BPF_S_JMP_JGT_K: 189 case BPF_S_JMP_JGT_K:
188 pc += (A > fentry->k) ? fentry->jt : fentry->jf; 190 pc += (A > f_k) ? fentry->jt : fentry->jf;
189 continue; 191 continue;
190 case BPF_S_JMP_JGE_K: 192 case BPF_S_JMP_JGE_K:
191 pc += (A >= fentry->k) ? fentry->jt : fentry->jf; 193 pc += (A >= f_k) ? fentry->jt : fentry->jf;
192 continue; 194 continue;
193 case BPF_S_JMP_JEQ_K: 195 case BPF_S_JMP_JEQ_K:
194 pc += (A == fentry->k) ? fentry->jt : fentry->jf; 196 pc += (A == f_k) ? fentry->jt : fentry->jf;
195 continue; 197 continue;
196 case BPF_S_JMP_JSET_K: 198 case BPF_S_JMP_JSET_K:
197 pc += (A & fentry->k) ? fentry->jt : fentry->jf; 199 pc += (A & f_k) ? fentry->jt : fentry->jf;
198 continue; 200 continue;
199 case BPF_S_JMP_JGT_X: 201 case BPF_S_JMP_JGT_X:
200 pc += (A > X) ? fentry->jt : fentry->jf; 202 pc += (A > X) ? fentry->jt : fentry->jf;
@@ -209,7 +211,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
209 pc += (A & X) ? fentry->jt : fentry->jf; 211 pc += (A & X) ? fentry->jt : fentry->jf;
210 continue; 212 continue;
211 case BPF_S_LD_W_ABS: 213 case BPF_S_LD_W_ABS:
212 k = fentry->k; 214 k = f_k;
213load_w: 215load_w:
214 ptr = load_pointer(skb, k, 4, &tmp); 216 ptr = load_pointer(skb, k, 4, &tmp);
215 if (ptr != NULL) { 217 if (ptr != NULL) {
@@ -218,7 +220,7 @@ load_w:
218 } 220 }
219 break; 221 break;
220 case BPF_S_LD_H_ABS: 222 case BPF_S_LD_H_ABS:
221 k = fentry->k; 223 k = f_k;
222load_h: 224load_h:
223 ptr = load_pointer(skb, k, 2, &tmp); 225 ptr = load_pointer(skb, k, 2, &tmp);
224 if (ptr != NULL) { 226 if (ptr != NULL) {
@@ -227,7 +229,7 @@ load_h:
227 } 229 }
228 break; 230 break;
229 case BPF_S_LD_B_ABS: 231 case BPF_S_LD_B_ABS:
230 k = fentry->k; 232 k = f_k;
231load_b: 233load_b:
232 ptr = load_pointer(skb, k, 1, &tmp); 234 ptr = load_pointer(skb, k, 1, &tmp);
233 if (ptr != NULL) { 235 if (ptr != NULL) {
@@ -242,32 +244,34 @@ load_b:
242 X = skb->len; 244 X = skb->len;
243 continue; 245 continue;
244 case BPF_S_LD_W_IND: 246 case BPF_S_LD_W_IND:
245 k = X + fentry->k; 247 k = X + f_k;
246 goto load_w; 248 goto load_w;
247 case BPF_S_LD_H_IND: 249 case BPF_S_LD_H_IND:
248 k = X + fentry->k; 250 k = X + f_k;
249 goto load_h; 251 goto load_h;
250 case BPF_S_LD_B_IND: 252 case BPF_S_LD_B_IND:
251 k = X + fentry->k; 253 k = X + f_k;
252 goto load_b; 254 goto load_b;
253 case BPF_S_LDX_B_MSH: 255 case BPF_S_LDX_B_MSH:
254 ptr = load_pointer(skb, fentry->k, 1, &tmp); 256 ptr = load_pointer(skb, f_k, 1, &tmp);
255 if (ptr != NULL) { 257 if (ptr != NULL) {
256 X = (*(u8 *)ptr & 0xf) << 2; 258 X = (*(u8 *)ptr & 0xf) << 2;
257 continue; 259 continue;
258 } 260 }
259 return 0; 261 return 0;
260 case BPF_S_LD_IMM: 262 case BPF_S_LD_IMM:
261 A = fentry->k; 263 A = f_k;
262 continue; 264 continue;
263 case BPF_S_LDX_IMM: 265 case BPF_S_LDX_IMM:
264 X = fentry->k; 266 X = f_k;
265 continue; 267 continue;
266 case BPF_S_LD_MEM: 268 case BPF_S_LD_MEM:
267 A = mem[fentry->k]; 269 A = (memvalid & (1UL << f_k)) ?
270 mem[f_k] : 0;
268 continue; 271 continue;
269 case BPF_S_LDX_MEM: 272 case BPF_S_LDX_MEM:
270 X = mem[fentry->k]; 273 X = (memvalid & (1UL << f_k)) ?
274 mem[f_k] : 0;
271 continue; 275 continue;
272 case BPF_S_MISC_TAX: 276 case BPF_S_MISC_TAX:
273 X = A; 277 X = A;
@@ -276,14 +280,16 @@ load_b:
276 A = X; 280 A = X;
277 continue; 281 continue;
278 case BPF_S_RET_K: 282 case BPF_S_RET_K:
279 return fentry->k; 283 return f_k;
280 case BPF_S_RET_A: 284 case BPF_S_RET_A:
281 return A; 285 return A;
282 case BPF_S_ST: 286 case BPF_S_ST:
283 mem[fentry->k] = A; 287 memvalid |= 1UL << f_k;
288 mem[f_k] = A;
284 continue; 289 continue;
285 case BPF_S_STX: 290 case BPF_S_STX:
286 mem[fentry->k] = X; 291 memvalid |= 1UL << f_k;
292 mem[f_k] = X;
287 continue; 293 continue;
288 default: 294 default:
289 WARN_ON(1); 295 WARN_ON(1);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index fbce4b05a53..33bc3823ac6 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -887,7 +887,7 @@ static ssize_t pktgen_if_write(struct file *file,
887 i += len; 887 i += len;
888 888
889 if (debug) { 889 if (debug) {
890 size_t copy = min(count, 1023); 890 size_t copy = min_t(size_t, count, 1023);
891 char tb[copy + 1]; 891 char tb[copy + 1];
892 if (copy_from_user(tb, user_buffer, copy)) 892 if (copy_from_user(tb, user_buffer, copy))
893 return -EFAULT; 893 return -EFAULT;
@@ -2612,8 +2612,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2612 /* Update any of the values, used when we're incrementing various 2612 /* Update any of the values, used when we're incrementing various
2613 * fields. 2613 * fields.
2614 */ 2614 */
2615 queue_map = pkt_dev->cur_queue_map;
2616 mod_cur_headers(pkt_dev); 2615 mod_cur_headers(pkt_dev);
2616 queue_map = pkt_dev->cur_queue_map;
2617 2617
2618 datalen = (odev->hard_header_len + 16) & ~0xf; 2618 datalen = (odev->hard_header_len + 16) & ~0xf;
2619 2619
@@ -2976,8 +2976,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2976 /* Update any of the values, used when we're incrementing various 2976 /* Update any of the values, used when we're incrementing various
2977 * fields. 2977 * fields.
2978 */ 2978 */
2979 queue_map = pkt_dev->cur_queue_map;
2980 mod_cur_headers(pkt_dev); 2979 mod_cur_headers(pkt_dev);
2980 queue_map = pkt_dev->cur_queue_map;
2981 2981
2982 skb = __netdev_alloc_skb(odev, 2982 skb = __netdev_alloc_skb(odev,
2983 pkt_dev->cur_pkt_size + 64 2983 pkt_dev->cur_pkt_size + 64
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 8121268ddbd..841c287ef40 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -347,16 +347,17 @@ static size_t rtnl_link_get_size(const struct net_device *dev)
347 if (!ops) 347 if (!ops)
348 return 0; 348 return 0;
349 349
350 size = nlmsg_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ 350 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
351 nlmsg_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ 351 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
352 352
353 if (ops->get_size) 353 if (ops->get_size)
354 /* IFLA_INFO_DATA + nested data */ 354 /* IFLA_INFO_DATA + nested data */
355 size += nlmsg_total_size(sizeof(struct nlattr)) + 355 size += nla_total_size(sizeof(struct nlattr)) +
356 ops->get_size(dev); 356 ops->get_size(dev);
357 357
358 if (ops->get_xstats_size) 358 if (ops->get_xstats_size)
359 size += ops->get_xstats_size(dev); /* IFLA_INFO_XSTATS */ 359 /* IFLA_INFO_XSTATS */
360 size += nla_total_size(ops->get_xstats_size(dev));
360 361
361 return size; 362 return size;
362} 363}
diff --git a/net/core/sock.c b/net/core/sock.c
index 3eed5424e65..fb608011146 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1653,10 +1653,10 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
1653{ 1653{
1654 struct proto *prot = sk->sk_prot; 1654 struct proto *prot = sk->sk_prot;
1655 int amt = sk_mem_pages(size); 1655 int amt = sk_mem_pages(size);
1656 int allocated; 1656 long allocated;
1657 1657
1658 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 1658 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1659 allocated = atomic_add_return(amt, prot->memory_allocated); 1659 allocated = atomic_long_add_return(amt, prot->memory_allocated);
1660 1660
1661 /* Under limit. */ 1661 /* Under limit. */
1662 if (allocated <= prot->sysctl_mem[0]) { 1662 if (allocated <= prot->sysctl_mem[0]) {
@@ -1714,7 +1714,7 @@ suppress_allocation:
1714 1714
1715 /* Alas. Undo changes. */ 1715 /* Alas. Undo changes. */
1716 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 1716 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1717 atomic_sub(amt, prot->memory_allocated); 1717 atomic_long_sub(amt, prot->memory_allocated);
1718 return 0; 1718 return 0;
1719} 1719}
1720EXPORT_SYMBOL(__sk_mem_schedule); 1720EXPORT_SYMBOL(__sk_mem_schedule);
@@ -1727,12 +1727,12 @@ void __sk_mem_reclaim(struct sock *sk)
1727{ 1727{
1728 struct proto *prot = sk->sk_prot; 1728 struct proto *prot = sk->sk_prot;
1729 1729
1730 atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 1730 atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
1731 prot->memory_allocated); 1731 prot->memory_allocated);
1732 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; 1732 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1733 1733
1734 if (prot->memory_pressure && *prot->memory_pressure && 1734 if (prot->memory_pressure && *prot->memory_pressure &&
1735 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0])) 1735 (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1736 *prot->memory_pressure = 0; 1736 *prot->memory_pressure = 0;
1737} 1737}
1738EXPORT_SYMBOL(__sk_mem_reclaim); 1738EXPORT_SYMBOL(__sk_mem_reclaim);
@@ -2452,12 +2452,12 @@ static char proto_method_implemented(const void *method)
2452 2452
2453static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 2453static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2454{ 2454{
2455 seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s " 2455 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
2456 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 2456 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2457 proto->name, 2457 proto->name,
2458 proto->obj_size, 2458 proto->obj_size,
2459 sock_prot_inuse_get(seq_file_net(seq), proto), 2459 sock_prot_inuse_get(seq_file_net(seq), proto),
2460 proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1, 2460 proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L,
2461 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI", 2461 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
2462 proto->max_header, 2462 proto->max_header,
2463 proto->slab == NULL ? "no" : "yes", 2463 proto->slab == NULL ? "no" : "yes",
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index d6b93d19790..a76b78de679 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -155,7 +155,7 @@ static const struct proto_ops dn_proto_ops;
155static DEFINE_RWLOCK(dn_hash_lock); 155static DEFINE_RWLOCK(dn_hash_lock);
156static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE]; 156static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
157static struct hlist_head dn_wild_sk; 157static struct hlist_head dn_wild_sk;
158static atomic_t decnet_memory_allocated; 158static atomic_long_t decnet_memory_allocated;
159 159
160static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags); 160static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags);
161static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags); 161static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
index be3eb8e2328..28f8b5e5f73 100644
--- a/net/decnet/sysctl_net_decnet.c
+++ b/net/decnet/sysctl_net_decnet.c
@@ -38,7 +38,7 @@ int decnet_log_martians = 1;
38int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW; 38int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW;
39 39
40/* Reasonable defaults, I hope, based on tcp's defaults */ 40/* Reasonable defaults, I hope, based on tcp's defaults */
41int sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 }; 41long sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 };
42int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 }; 42int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
43int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 }; 43int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
44 44
@@ -324,7 +324,7 @@ static ctl_table dn_table[] = {
324 .data = &sysctl_decnet_mem, 324 .data = &sysctl_decnet_mem,
325 .maxlen = sizeof(sysctl_decnet_mem), 325 .maxlen = sizeof(sysctl_decnet_mem),
326 .mode = 0644, 326 .mode = 0644,
327 .proc_handler = proc_dointvec, 327 .proc_handler = proc_doulongvec_minmax
328 }, 328 },
329 { 329 {
330 .procname = "decnet_rmem", 330 .procname = "decnet_rmem",
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index a29edf2219c..c079cc0ec65 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -47,11 +47,8 @@ extern int fib_detect_death(struct fib_info *fi, int order,
47static inline void fib_result_assign(struct fib_result *res, 47static inline void fib_result_assign(struct fib_result *res,
48 struct fib_info *fi) 48 struct fib_info *fi)
49{ 49{
50 if (res->fi != NULL) 50 /* we used to play games with refcounts, but we now use RCU */
51 fib_info_put(res->fi);
52 res->fi = fi; 51 res->fi = fi;
53 if (fi != NULL)
54 atomic_inc(&fi->fib_clntref);
55} 52}
56 53
57#endif /* _FIB_LOOKUP_H */ 54#endif /* _FIB_LOOKUP_H */
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index c8877c6c721..3c53c2d89e3 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2306,10 +2306,8 @@ void ip_mc_drop_socket(struct sock *sk)
2306 2306
2307 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); 2307 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
2308 (void) ip_mc_leave_src(sk, iml, in_dev); 2308 (void) ip_mc_leave_src(sk, iml, in_dev);
2309 if (in_dev != NULL) { 2309 if (in_dev != NULL)
2310 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); 2310 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
2311 in_dev_put(in_dev);
2312 }
2313 /* decrease mem now to avoid the memleak warning */ 2311 /* decrease mem now to avoid the memleak warning */
2314 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); 2312 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
2315 call_rcu(&iml->rcu, ip_mc_socklist_reclaim); 2313 call_rcu(&iml->rcu, ip_mc_socklist_reclaim);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index ba804266584..2ada17129fc 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -490,9 +490,11 @@ static int inet_csk_diag_dump(struct sock *sk,
490{ 490{
491 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 491 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
492 492
493 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { 493 if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
494 struct inet_diag_entry entry; 494 struct inet_diag_entry entry;
495 struct rtattr *bc = (struct rtattr *)(r + 1); 495 const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
496 sizeof(*r),
497 INET_DIAG_REQ_BYTECODE);
496 struct inet_sock *inet = inet_sk(sk); 498 struct inet_sock *inet = inet_sk(sk);
497 499
498 entry.family = sk->sk_family; 500 entry.family = sk->sk_family;
@@ -512,7 +514,7 @@ static int inet_csk_diag_dump(struct sock *sk,
512 entry.dport = ntohs(inet->inet_dport); 514 entry.dport = ntohs(inet->inet_dport);
513 entry.userlocks = sk->sk_userlocks; 515 entry.userlocks = sk->sk_userlocks;
514 516
515 if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) 517 if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
516 return 0; 518 return 0;
517 } 519 }
518 520
@@ -527,9 +529,11 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
527{ 529{
528 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 530 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
529 531
530 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { 532 if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
531 struct inet_diag_entry entry; 533 struct inet_diag_entry entry;
532 struct rtattr *bc = (struct rtattr *)(r + 1); 534 const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
535 sizeof(*r),
536 INET_DIAG_REQ_BYTECODE);
533 537
534 entry.family = tw->tw_family; 538 entry.family = tw->tw_family;
535#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 539#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
@@ -548,7 +552,7 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
548 entry.dport = ntohs(tw->tw_dport); 552 entry.dport = ntohs(tw->tw_dport);
549 entry.userlocks = 0; 553 entry.userlocks = 0;
550 554
551 if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) 555 if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
552 return 0; 556 return 0;
553 } 557 }
554 558
@@ -618,7 +622,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
618 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 622 struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
619 struct inet_connection_sock *icsk = inet_csk(sk); 623 struct inet_connection_sock *icsk = inet_csk(sk);
620 struct listen_sock *lopt; 624 struct listen_sock *lopt;
621 struct rtattr *bc = NULL; 625 const struct nlattr *bc = NULL;
622 struct inet_sock *inet = inet_sk(sk); 626 struct inet_sock *inet = inet_sk(sk);
623 int j, s_j; 627 int j, s_j;
624 int reqnum, s_reqnum; 628 int reqnum, s_reqnum;
@@ -638,8 +642,9 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
638 if (!lopt || !lopt->qlen) 642 if (!lopt || !lopt->qlen)
639 goto out; 643 goto out;
640 644
641 if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { 645 if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
642 bc = (struct rtattr *)(r + 1); 646 bc = nlmsg_find_attr(cb->nlh, sizeof(*r),
647 INET_DIAG_REQ_BYTECODE);
643 entry.sport = inet->inet_num; 648 entry.sport = inet->inet_num;
644 entry.userlocks = sk->sk_userlocks; 649 entry.userlocks = sk->sk_userlocks;
645 } 650 }
@@ -672,8 +677,8 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
672 &ireq->rmt_addr; 677 &ireq->rmt_addr;
673 entry.dport = ntohs(ireq->rmt_port); 678 entry.dport = ntohs(ireq->rmt_port);
674 679
675 if (!inet_diag_bc_run(RTA_DATA(bc), 680 if (!inet_diag_bc_run(nla_data(bc),
676 RTA_PAYLOAD(bc), &entry)) 681 nla_len(bc), &entry))
677 continue; 682 continue;
678 } 683 }
679 684
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 3cad2591ace..3fac340a28d 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -927,6 +927,7 @@ static int get_info(struct net *net, void __user *user,
927 private = &tmp; 927 private = &tmp;
928 } 928 }
929#endif 929#endif
930 memset(&info, 0, sizeof(info));
930 info.valid_hooks = t->valid_hooks; 931 info.valid_hooks = t->valid_hooks;
931 memcpy(info.hook_entry, private->hook_entry, 932 memcpy(info.hook_entry, private->hook_entry,
932 sizeof(info.hook_entry)); 933 sizeof(info.hook_entry));
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index d31b007a6d8..a846d633b3b 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1124,6 +1124,7 @@ static int get_info(struct net *net, void __user *user,
1124 private = &tmp; 1124 private = &tmp;
1125 } 1125 }
1126#endif 1126#endif
1127 memset(&info, 0, sizeof(info));
1127 info.valid_hooks = t->valid_hooks; 1128 info.valid_hooks = t->valid_hooks;
1128 memcpy(info.hook_entry, private->hook_entry, 1129 memcpy(info.hook_entry, private->hook_entry,
1129 sizeof(info.hook_entry)); 1130 sizeof(info.hook_entry));
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 295c97431e4..c04787ce1a7 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -47,26 +47,6 @@ __nf_nat_proto_find(u_int8_t protonum)
47 return rcu_dereference(nf_nat_protos[protonum]); 47 return rcu_dereference(nf_nat_protos[protonum]);
48} 48}
49 49
50static const struct nf_nat_protocol *
51nf_nat_proto_find_get(u_int8_t protonum)
52{
53 const struct nf_nat_protocol *p;
54
55 rcu_read_lock();
56 p = __nf_nat_proto_find(protonum);
57 if (!try_module_get(p->me))
58 p = &nf_nat_unknown_protocol;
59 rcu_read_unlock();
60
61 return p;
62}
63
64static void
65nf_nat_proto_put(const struct nf_nat_protocol *p)
66{
67 module_put(p->me);
68}
69
70/* We keep an extra hash for each conntrack, for fast searching. */ 50/* We keep an extra hash for each conntrack, for fast searching. */
71static inline unsigned int 51static inline unsigned int
72hash_by_src(const struct net *net, u16 zone, 52hash_by_src(const struct net *net, u16 zone,
@@ -588,6 +568,26 @@ static struct nf_ct_ext_type nat_extend __read_mostly = {
588#include <linux/netfilter/nfnetlink.h> 568#include <linux/netfilter/nfnetlink.h>
589#include <linux/netfilter/nfnetlink_conntrack.h> 569#include <linux/netfilter/nfnetlink_conntrack.h>
590 570
571static const struct nf_nat_protocol *
572nf_nat_proto_find_get(u_int8_t protonum)
573{
574 const struct nf_nat_protocol *p;
575
576 rcu_read_lock();
577 p = __nf_nat_proto_find(protonum);
578 if (!try_module_get(p->me))
579 p = &nf_nat_unknown_protocol;
580 rcu_read_unlock();
581
582 return p;
583}
584
585static void
586nf_nat_proto_put(const struct nf_nat_protocol *p)
587{
588 module_put(p->me);
589}
590
591static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = { 591static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
592 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 }, 592 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
593 [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 }, 593 [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 4ae1f203f7c..1b48eb1ed45 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -59,13 +59,13 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
59 local_bh_enable(); 59 local_bh_enable();
60 60
61 socket_seq_show(seq); 61 socket_seq_show(seq);
62 seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n", 62 seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n",
63 sock_prot_inuse_get(net, &tcp_prot), orphans, 63 sock_prot_inuse_get(net, &tcp_prot), orphans,
64 tcp_death_row.tw_count, sockets, 64 tcp_death_row.tw_count, sockets,
65 atomic_read(&tcp_memory_allocated)); 65 atomic_long_read(&tcp_memory_allocated));
66 seq_printf(seq, "UDP: inuse %d mem %d\n", 66 seq_printf(seq, "UDP: inuse %d mem %ld\n",
67 sock_prot_inuse_get(net, &udp_prot), 67 sock_prot_inuse_get(net, &udp_prot),
68 atomic_read(&udp_memory_allocated)); 68 atomic_long_read(&udp_memory_allocated));
69 seq_printf(seq, "UDPLITE: inuse %d\n", 69 seq_printf(seq, "UDPLITE: inuse %d\n",
70 sock_prot_inuse_get(net, &udplite_prot)); 70 sock_prot_inuse_get(net, &udplite_prot));
71 seq_printf(seq, "RAW: inuse %d\n", 71 seq_printf(seq, "RAW: inuse %d\n",
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index d96c1da4b17..e91911d7aae 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -398,7 +398,7 @@ static struct ctl_table ipv4_table[] = {
398 .data = &sysctl_tcp_mem, 398 .data = &sysctl_tcp_mem,
399 .maxlen = sizeof(sysctl_tcp_mem), 399 .maxlen = sizeof(sysctl_tcp_mem),
400 .mode = 0644, 400 .mode = 0644,
401 .proc_handler = proc_dointvec 401 .proc_handler = proc_doulongvec_minmax
402 }, 402 },
403 { 403 {
404 .procname = "tcp_wmem", 404 .procname = "tcp_wmem",
@@ -602,8 +602,7 @@ static struct ctl_table ipv4_table[] = {
602 .data = &sysctl_udp_mem, 602 .data = &sysctl_udp_mem,
603 .maxlen = sizeof(sysctl_udp_mem), 603 .maxlen = sizeof(sysctl_udp_mem),
604 .mode = 0644, 604 .mode = 0644,
605 .proc_handler = proc_dointvec_minmax, 605 .proc_handler = proc_doulongvec_minmax,
606 .extra1 = &zero
607 }, 606 },
608 { 607 {
609 .procname = "udp_rmem_min", 608 .procname = "udp_rmem_min",
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1664a0590bb..08141996948 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -282,7 +282,7 @@ int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
282struct percpu_counter tcp_orphan_count; 282struct percpu_counter tcp_orphan_count;
283EXPORT_SYMBOL_GPL(tcp_orphan_count); 283EXPORT_SYMBOL_GPL(tcp_orphan_count);
284 284
285int sysctl_tcp_mem[3] __read_mostly; 285long sysctl_tcp_mem[3] __read_mostly;
286int sysctl_tcp_wmem[3] __read_mostly; 286int sysctl_tcp_wmem[3] __read_mostly;
287int sysctl_tcp_rmem[3] __read_mostly; 287int sysctl_tcp_rmem[3] __read_mostly;
288 288
@@ -290,7 +290,7 @@ EXPORT_SYMBOL(sysctl_tcp_mem);
290EXPORT_SYMBOL(sysctl_tcp_rmem); 290EXPORT_SYMBOL(sysctl_tcp_rmem);
291EXPORT_SYMBOL(sysctl_tcp_wmem); 291EXPORT_SYMBOL(sysctl_tcp_wmem);
292 292
293atomic_t tcp_memory_allocated; /* Current allocated memory. */ 293atomic_long_t tcp_memory_allocated; /* Current allocated memory. */
294EXPORT_SYMBOL(tcp_memory_allocated); 294EXPORT_SYMBOL(tcp_memory_allocated);
295 295
296/* 296/*
@@ -2246,7 +2246,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2246 /* Values greater than interface MTU won't take effect. However 2246 /* Values greater than interface MTU won't take effect. However
2247 * at the point when this call is done we typically don't yet 2247 * at the point when this call is done we typically don't yet
2248 * know which interface is going to be used */ 2248 * know which interface is going to be used */
2249 if (val < 8 || val > MAX_TCP_WINDOW) { 2249 if (val < 64 || val > MAX_TCP_WINDOW) {
2250 err = -EINVAL; 2250 err = -EINVAL;
2251 break; 2251 break;
2252 } 2252 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3357f69e353..6d8ab1c4efc 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -259,8 +259,11 @@ static void tcp_fixup_sndbuf(struct sock *sk)
259 int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 + 259 int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 +
260 sizeof(struct sk_buff); 260 sizeof(struct sk_buff);
261 261
262 if (sk->sk_sndbuf < 3 * sndmem) 262 if (sk->sk_sndbuf < 3 * sndmem) {
263 sk->sk_sndbuf = min(3 * sndmem, sysctl_tcp_wmem[2]); 263 sk->sk_sndbuf = 3 * sndmem;
264 if (sk->sk_sndbuf > sysctl_tcp_wmem[2])
265 sk->sk_sndbuf = sysctl_tcp_wmem[2];
266 }
264} 267}
265 268
266/* 2. Tuning advertised window (window_clamp, rcv_ssthresh) 269/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -396,7 +399,7 @@ static void tcp_clamp_window(struct sock *sk)
396 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && 399 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
397 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && 400 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
398 !tcp_memory_pressure && 401 !tcp_memory_pressure &&
399 atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) { 402 atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
400 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), 403 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
401 sysctl_tcp_rmem[2]); 404 sysctl_tcp_rmem[2]);
402 } 405 }
@@ -4861,7 +4864,7 @@ static int tcp_should_expand_sndbuf(struct sock *sk)
4861 return 0; 4864 return 0;
4862 4865
4863 /* If we are under soft global TCP memory pressure, do not expand. */ 4866 /* If we are under soft global TCP memory pressure, do not expand. */
4864 if (atomic_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0]) 4867 if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
4865 return 0; 4868 return 0;
4866 4869
4867 /* If we filled the congestion window, do not expand. */ 4870 /* If we filled the congestion window, do not expand. */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 8f8527d4168..69ccbc1dde9 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -415,6 +415,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
415 !icsk->icsk_backoff) 415 !icsk->icsk_backoff)
416 break; 416 break;
417 417
418 if (sock_owned_by_user(sk))
419 break;
420
418 icsk->icsk_backoff--; 421 icsk->icsk_backoff--;
419 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) << 422 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) <<
420 icsk->icsk_backoff; 423 icsk->icsk_backoff;
@@ -429,11 +432,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
429 if (remaining) { 432 if (remaining) {
430 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 433 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
431 remaining, TCP_RTO_MAX); 434 remaining, TCP_RTO_MAX);
432 } else if (sock_owned_by_user(sk)) {
433 /* RTO revert clocked out retransmission,
434 * but socket is locked. Will defer. */
435 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
436 HZ/20, TCP_RTO_MAX);
437 } else { 435 } else {
438 /* RTO revert clocked out retransmission. 436 /* RTO revert clocked out retransmission.
439 * Will retransmit now */ 437 * Will retransmit now */
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 28cb2d733a3..5e0a3a582a5 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -110,7 +110,7 @@
110struct udp_table udp_table __read_mostly; 110struct udp_table udp_table __read_mostly;
111EXPORT_SYMBOL(udp_table); 111EXPORT_SYMBOL(udp_table);
112 112
113int sysctl_udp_mem[3] __read_mostly; 113long sysctl_udp_mem[3] __read_mostly;
114EXPORT_SYMBOL(sysctl_udp_mem); 114EXPORT_SYMBOL(sysctl_udp_mem);
115 115
116int sysctl_udp_rmem_min __read_mostly; 116int sysctl_udp_rmem_min __read_mostly;
@@ -119,7 +119,7 @@ EXPORT_SYMBOL(sysctl_udp_rmem_min);
119int sysctl_udp_wmem_min __read_mostly; 119int sysctl_udp_wmem_min __read_mostly;
120EXPORT_SYMBOL(sysctl_udp_wmem_min); 120EXPORT_SYMBOL(sysctl_udp_wmem_min);
121 121
122atomic_t udp_memory_allocated; 122atomic_long_t udp_memory_allocated;
123EXPORT_SYMBOL(udp_memory_allocated); 123EXPORT_SYMBOL(udp_memory_allocated);
124 124
125#define MAX_UDP_PORTS 65536 125#define MAX_UDP_PORTS 65536
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e048ec62d10..b41ce0f0d51 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2740,10 +2740,6 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2740 /* Flag it for later restoration when link comes up */ 2740 /* Flag it for later restoration when link comes up */
2741 ifa->flags |= IFA_F_TENTATIVE; 2741 ifa->flags |= IFA_F_TENTATIVE;
2742 ifa->state = INET6_IFADDR_STATE_DAD; 2742 ifa->state = INET6_IFADDR_STATE_DAD;
2743
2744 write_unlock_bh(&idev->lock);
2745
2746 in6_ifa_hold(ifa);
2747 } else { 2743 } else {
2748 list_del(&ifa->if_list); 2744 list_del(&ifa->if_list);
2749 2745
@@ -2758,19 +2754,15 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2758 ifa->state = INET6_IFADDR_STATE_DEAD; 2754 ifa->state = INET6_IFADDR_STATE_DEAD;
2759 spin_unlock_bh(&ifa->state_lock); 2755 spin_unlock_bh(&ifa->state_lock);
2760 2756
2761 if (state == INET6_IFADDR_STATE_DEAD) 2757 if (state == INET6_IFADDR_STATE_DEAD) {
2762 goto put_ifa; 2758 in6_ifa_put(ifa);
2759 } else {
2760 __ipv6_ifa_notify(RTM_DELADDR, ifa);
2761 atomic_notifier_call_chain(&inet6addr_chain,
2762 NETDEV_DOWN, ifa);
2763 }
2764 write_lock_bh(&idev->lock);
2763 } 2765 }
2764
2765 __ipv6_ifa_notify(RTM_DELADDR, ifa);
2766 if (ifa->state == INET6_IFADDR_STATE_DEAD)
2767 atomic_notifier_call_chain(&inet6addr_chain,
2768 NETDEV_DOWN, ifa);
2769
2770put_ifa:
2771 in6_ifa_put(ifa);
2772
2773 write_lock_bh(&idev->lock);
2774 } 2766 }
2775 2767
2776 list_splice(&keep_list, &idev->addr_list); 2768 list_splice(&keep_list, &idev->addr_list);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 51df035897e..455582384ec 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1137,6 +1137,7 @@ static int get_info(struct net *net, void __user *user,
1137 private = &tmp; 1137 private = &tmp;
1138 } 1138 }
1139#endif 1139#endif
1140 memset(&info, 0, sizeof(info));
1140 info.valid_hooks = t->valid_hooks; 1141 info.valid_hooks = t->valid_hooks;
1141 memcpy(info.hook_entry, private->hook_entry, 1142 memcpy(info.hook_entry, private->hook_entry,
1142 sizeof(info.hook_entry)); 1143 sizeof(info.hook_entry));
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 3a3f129a44c..79d43aa8fa8 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -286,7 +286,7 @@ found:
286 286
287 /* Check for overlap with preceding fragment. */ 287 /* Check for overlap with preceding fragment. */
288 if (prev && 288 if (prev &&
289 (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset > 0) 289 (NFCT_FRAG6_CB(prev)->offset + prev->len) > offset)
290 goto discard_fq; 290 goto discard_fq;
291 291
292 /* Look for overlap with succeeding segment. */ 292 /* Look for overlap with succeeding segment. */
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index c7ba3149633..0f276645375 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -349,7 +349,7 @@ found:
349 349
350 /* Check for overlap with preceding fragment. */ 350 /* Check for overlap with preceding fragment. */
351 if (prev && 351 if (prev &&
352 (FRAG6_CB(prev)->offset + prev->len) - offset > 0) 352 (FRAG6_CB(prev)->offset + prev->len) > offset)
353 goto discard_fq; 353 goto discard_fq;
354 354
355 /* Look for overlap with succeeding segment. */ 355 /* Look for overlap with succeeding segment. */
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 25661f968f3..96455ffb76f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1945,8 +1945,12 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1945 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); 1945 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
1946 struct neighbour *neigh; 1946 struct neighbour *neigh;
1947 1947
1948 if (rt == NULL) 1948 if (rt == NULL) {
1949 if (net_ratelimit())
1950 pr_warning("IPv6: Maximum number of routes reached,"
1951 " consider increasing route/max_size.\n");
1949 return ERR_PTR(-ENOMEM); 1952 return ERR_PTR(-ENOMEM);
1953 }
1950 1954
1951 dev_hold(net->loopback_dev); 1955 dev_hold(net->loopback_dev);
1952 in6_dev_hold(idev); 1956 in6_dev_hold(idev);
@@ -2741,6 +2745,7 @@ static void __net_exit ip6_route_net_exit(struct net *net)
2741 kfree(net->ipv6.ip6_prohibit_entry); 2745 kfree(net->ipv6.ip6_prohibit_entry);
2742 kfree(net->ipv6.ip6_blk_hole_entry); 2746 kfree(net->ipv6.ip6_blk_hole_entry);
2743#endif 2747#endif
2748 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
2744} 2749}
2745 2750
2746static struct pernet_operations ip6_route_net_ops = { 2751static struct pernet_operations ip6_route_net_ops = {
@@ -2832,5 +2837,6 @@ void ip6_route_cleanup(void)
2832 xfrm6_fini(); 2837 xfrm6_fini();
2833 fib6_gc_cleanup(); 2838 fib6_gc_cleanup();
2834 unregister_pernet_subsys(&ip6_route_net_ops); 2839 unregister_pernet_subsys(&ip6_route_net_ops);
2840 dst_entries_destroy(&ip6_dst_blackhole_ops);
2835 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); 2841 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
2836} 2842}
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 104ec3b283d..b8dbae82fab 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -249,7 +249,7 @@ static int l2tp_dfs_seq_open(struct inode *inode, struct file *file)
249 struct seq_file *seq; 249 struct seq_file *seq;
250 int rc = -ENOMEM; 250 int rc = -ENOMEM;
251 251
252 pd = kzalloc(GFP_KERNEL, sizeof(*pd)); 252 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
253 if (pd == NULL) 253 if (pd == NULL)
254 goto out; 254 goto out;
255 255
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index f9163b12c7f..7aa85591dbe 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -391,6 +391,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
391 u32 hw_reconf_flags = 0; 391 u32 hw_reconf_flags = 0;
392 int i; 392 int i;
393 393
394 if (local->scan_sdata == sdata)
395 ieee80211_scan_cancel(local);
396
394 clear_bit(SDATA_STATE_RUNNING, &sdata->state); 397 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
395 398
396 /* 399 /*
@@ -523,9 +526,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
523 synchronize_rcu(); 526 synchronize_rcu();
524 skb_queue_purge(&sdata->skb_queue); 527 skb_queue_purge(&sdata->skb_queue);
525 528
526 if (local->scan_sdata == sdata)
527 ieee80211_scan_cancel(local);
528
529 /* 529 /*
530 * Disable beaconing here for mesh only, AP and IBSS 530 * Disable beaconing here for mesh only, AP and IBSS
531 * are already taken care of. 531 * are already taken care of.
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 1eacf8d9966..27a5ea6b6a0 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1312,7 +1312,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
1312 if (!hash) { 1312 if (!hash) {
1313 *vmalloced = 1; 1313 *vmalloced = 1;
1314 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); 1314 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1315 hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); 1315 hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
1316 PAGE_KERNEL);
1316 } 1317 }
1317 1318
1318 if (hash && nulls) 1319 if (hash && nulls)
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index ed6d9295802..dc7bb74110d 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -292,6 +292,12 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
292 292
293 for (i = 0; i < MAX_NF_CT_PROTO; i++) 293 for (i = 0; i < MAX_NF_CT_PROTO; i++)
294 proto_array[i] = &nf_conntrack_l4proto_generic; 294 proto_array[i] = &nf_conntrack_l4proto_generic;
295
296 /* Before making proto_array visible to lockless readers,
297 * we must make sure its content is committed to memory.
298 */
299 smp_wmb();
300
295 nf_ct_protos[l4proto->l3proto] = proto_array; 301 nf_ct_protos[l4proto->l3proto] = proto_array;
296 } else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] != 302 } else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] !=
297 &nf_conntrack_l4proto_generic) { 303 &nf_conntrack_l4proto_generic) {
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 3616f27b9d4..8298e676f5a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1610,9 +1610,11 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1610 1610
1611 err = -EINVAL; 1611 err = -EINVAL;
1612 vnet_hdr_len = sizeof(vnet_hdr); 1612 vnet_hdr_len = sizeof(vnet_hdr);
1613 if ((len -= vnet_hdr_len) < 0) 1613 if (len < vnet_hdr_len)
1614 goto out_free; 1614 goto out_free;
1615 1615
1616 len -= vnet_hdr_len;
1617
1616 if (skb_is_gso(skb)) { 1618 if (skb_is_gso(skb)) {
1617 struct skb_shared_info *sinfo = skb_shinfo(skb); 1619 struct skb_shared_info *sinfo = skb_shinfo(skb);
1618 1620
@@ -1719,7 +1721,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1719 rcu_read_lock(); 1721 rcu_read_lock();
1720 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); 1722 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
1721 if (dev) 1723 if (dev)
1722 strlcpy(uaddr->sa_data, dev->name, 15); 1724 strncpy(uaddr->sa_data, dev->name, 14);
1723 else 1725 else
1724 memset(uaddr->sa_data, 0, 14); 1726 memset(uaddr->sa_data, 0, 14);
1725 rcu_read_unlock(); 1727 rcu_read_unlock();
@@ -1742,6 +1744,7 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1742 sll->sll_family = AF_PACKET; 1744 sll->sll_family = AF_PACKET;
1743 sll->sll_ifindex = po->ifindex; 1745 sll->sll_ifindex = po->ifindex;
1744 sll->sll_protocol = po->num; 1746 sll->sll_protocol = po->num;
1747 sll->sll_pkttype = 0;
1745 rcu_read_lock(); 1748 rcu_read_lock();
1746 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); 1749 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
1747 if (dev) { 1750 if (dev) {
diff --git a/net/rds/loop.c b/net/rds/loop.c
index c390156b426..aeec1d483b1 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -134,8 +134,12 @@ static int rds_loop_conn_alloc(struct rds_connection *conn, gfp_t gfp)
134static void rds_loop_conn_free(void *arg) 134static void rds_loop_conn_free(void *arg)
135{ 135{
136 struct rds_loop_connection *lc = arg; 136 struct rds_loop_connection *lc = arg;
137 unsigned long flags;
138
137 rdsdebug("lc %p\n", lc); 139 rdsdebug("lc %p\n", lc);
140 spin_lock_irqsave(&loop_conns_lock, flags);
138 list_del(&lc->loop_node); 141 list_del(&lc->loop_node);
142 spin_unlock_irqrestore(&loop_conns_lock, flags);
139 kfree(lc); 143 kfree(lc);
140} 144}
141 145
diff --git a/net/rds/message.c b/net/rds/message.c
index 848cff45183..1fd3d29023d 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -249,8 +249,10 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
249 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); 249 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
250 rm->data.op_nents = ceil(total_len, PAGE_SIZE); 250 rm->data.op_nents = ceil(total_len, PAGE_SIZE);
251 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); 251 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
252 if (!rm->data.op_sg) 252 if (!rm->data.op_sg) {
253 rds_message_put(rm);
253 return ERR_PTR(-ENOMEM); 254 return ERR_PTR(-ENOMEM);
255 }
254 256
255 for (i = 0; i < rm->data.op_nents; ++i) { 257 for (i = 0; i < rm->data.op_nents; ++i) {
256 sg_set_page(&rm->data.op_sg[i], 258 sg_set_page(&rm->data.op_sg[i],
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 08a8c6cf2d1..8e0a32001c9 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -221,7 +221,13 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
221static void rds_tcp_conn_free(void *arg) 221static void rds_tcp_conn_free(void *arg)
222{ 222{
223 struct rds_tcp_connection *tc = arg; 223 struct rds_tcp_connection *tc = arg;
224 unsigned long flags;
224 rdsdebug("freeing tc %p\n", tc); 225 rdsdebug("freeing tc %p\n", tc);
226
227 spin_lock_irqsave(&rds_tcp_conn_lock, flags);
228 list_del(&tc->t_tcp_node);
229 spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
230
225 kmem_cache_free(rds_tcp_conn_slab, tc); 231 kmem_cache_free(rds_tcp_conn_slab, tc);
226} 232}
227 233
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index efd4f95fd05..f23d9155b1e 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -268,6 +268,10 @@ static int basic_dump(struct tcf_proto *tp, unsigned long fh,
268 goto nla_put_failure; 268 goto nla_put_failure;
269 269
270 nla_nest_end(skb, nest); 270 nla_nest_end(skb, nest);
271
272 if (tcf_exts_dump_stats(skb, &f->exts, &basic_ext_map) < 0)
273 goto nla_put_failure;
274
271 return skb->len; 275 return skb->len;
272 276
273nla_put_failure: 277nla_put_failure:
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 37dff78e9cb..d49c40fb7e0 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -34,8 +34,6 @@ struct cgroup_subsys net_cls_subsys = {
34 .populate = cgrp_populate, 34 .populate = cgrp_populate,
35#ifdef CONFIG_NET_CLS_CGROUP 35#ifdef CONFIG_NET_CLS_CGROUP
36 .subsys_id = net_cls_subsys_id, 36 .subsys_id = net_cls_subsys_id,
37#else
38#define net_cls_subsys_id net_cls_subsys.subsys_id
39#endif 37#endif
40 .module = THIS_MODULE, 38 .module = THIS_MODULE,
41}; 39};
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index 76325325741..ea8f566e720 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -103,7 +103,8 @@ retry:
103 103
104static void em_text_destroy(struct tcf_proto *tp, struct tcf_ematch *m) 104static void em_text_destroy(struct tcf_proto *tp, struct tcf_ematch *m)
105{ 105{
106 textsearch_destroy(EM_TEXT_PRIV(m)->config); 106 if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config)
107 textsearch_destroy(EM_TEXT_PRIV(m)->config);
107} 108}
108 109
109static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m) 110static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m)
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 1ef29c74d85..e58f9476f29 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -92,7 +92,7 @@ static struct sctp_af *sctp_af_v6_specific;
92struct kmem_cache *sctp_chunk_cachep __read_mostly; 92struct kmem_cache *sctp_chunk_cachep __read_mostly;
93struct kmem_cache *sctp_bucket_cachep __read_mostly; 93struct kmem_cache *sctp_bucket_cachep __read_mostly;
94 94
95int sysctl_sctp_mem[3]; 95long sysctl_sctp_mem[3];
96int sysctl_sctp_rmem[3]; 96int sysctl_sctp_rmem[3];
97int sysctl_sctp_wmem[3]; 97int sysctl_sctp_wmem[3];
98 98
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index e34ca9cc116..6bd554323a3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -111,12 +111,12 @@ static void sctp_sock_migrate(struct sock *, struct sock *,
111static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; 111static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG;
112 112
113extern struct kmem_cache *sctp_bucket_cachep; 113extern struct kmem_cache *sctp_bucket_cachep;
114extern int sysctl_sctp_mem[3]; 114extern long sysctl_sctp_mem[3];
115extern int sysctl_sctp_rmem[3]; 115extern int sysctl_sctp_rmem[3];
116extern int sysctl_sctp_wmem[3]; 116extern int sysctl_sctp_wmem[3];
117 117
118static int sctp_memory_pressure; 118static int sctp_memory_pressure;
119static atomic_t sctp_memory_allocated; 119static atomic_long_t sctp_memory_allocated;
120struct percpu_counter sctp_sockets_allocated; 120struct percpu_counter sctp_sockets_allocated;
121 121
122static void sctp_enter_memory_pressure(struct sock *sk) 122static void sctp_enter_memory_pressure(struct sock *sk)
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 832590bbe0c..50cb57f0919 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -54,7 +54,7 @@ static int sack_timer_max = 500;
54static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ 54static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
55static int rwnd_scale_max = 16; 55static int rwnd_scale_max = 16;
56 56
57extern int sysctl_sctp_mem[3]; 57extern long sysctl_sctp_mem[3];
58extern int sysctl_sctp_rmem[3]; 58extern int sysctl_sctp_rmem[3];
59extern int sysctl_sctp_wmem[3]; 59extern int sysctl_sctp_wmem[3];
60 60
@@ -203,7 +203,7 @@ static ctl_table sctp_table[] = {
203 .data = &sysctl_sctp_mem, 203 .data = &sysctl_sctp_mem,
204 .maxlen = sizeof(sysctl_sctp_mem), 204 .maxlen = sizeof(sysctl_sctp_mem),
205 .mode = 0644, 205 .mode = 0644,
206 .proc_handler = proc_dointvec, 206 .proc_handler = proc_doulongvec_minmax
207 }, 207 },
208 { 208 {
209 .procname = "sctp_rmem", 209 .procname = "sctp_rmem",
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 33217fc3d69..e9f0d500448 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -396,6 +396,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
396 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 396 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
397 struct tipc_sock *tsock = tipc_sk(sock->sk); 397 struct tipc_sock *tsock = tipc_sk(sock->sk);
398 398
399 memset(addr, 0, sizeof(*addr));
399 if (peer) { 400 if (peer) {
400 if ((sock->state != SS_CONNECTED) && 401 if ((sock->state != SS_CONNECTED) &&
401 ((peer != 2) || (sock->state != SS_DISCONNECTING))) 402 ((peer != 2) || (sock->state != SS_DISCONNECTING)))
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index c506241f863..4e78e3f2679 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -224,8 +224,8 @@ static int nl80211_prepare_netdev_dump(struct sk_buff *skb,
224 } 224 }
225 225
226 *rdev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx); 226 *rdev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx);
227 if (IS_ERR(dev)) { 227 if (IS_ERR(*rdev)) {
228 err = PTR_ERR(dev); 228 err = PTR_ERR(*rdev);
229 goto out_rtnl; 229 goto out_rtnl;
230 } 230 }
231 231
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 771bab00754..55187c8f642 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -61,6 +61,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
61 while (len > 0) { 61 while (len > 0) {
62 switch (*p & X25_FAC_CLASS_MASK) { 62 switch (*p & X25_FAC_CLASS_MASK) {
63 case X25_FAC_CLASS_A: 63 case X25_FAC_CLASS_A:
64 if (len < 2)
65 return 0;
64 switch (*p) { 66 switch (*p) {
65 case X25_FAC_REVERSE: 67 case X25_FAC_REVERSE:
66 if((p[1] & 0x81) == 0x81) { 68 if((p[1] & 0x81) == 0x81) {
@@ -104,6 +106,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
104 len -= 2; 106 len -= 2;
105 break; 107 break;
106 case X25_FAC_CLASS_B: 108 case X25_FAC_CLASS_B:
109 if (len < 3)
110 return 0;
107 switch (*p) { 111 switch (*p) {
108 case X25_FAC_PACKET_SIZE: 112 case X25_FAC_PACKET_SIZE:
109 facilities->pacsize_in = p[1]; 113 facilities->pacsize_in = p[1];
@@ -125,6 +129,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
125 len -= 3; 129 len -= 3;
126 break; 130 break;
127 case X25_FAC_CLASS_C: 131 case X25_FAC_CLASS_C:
132 if (len < 4)
133 return 0;
128 printk(KERN_DEBUG "X.25: unknown facility %02X, " 134 printk(KERN_DEBUG "X.25: unknown facility %02X, "
129 "values %02X, %02X, %02X\n", 135 "values %02X, %02X, %02X\n",
130 p[0], p[1], p[2], p[3]); 136 p[0], p[1], p[2], p[3]);
@@ -132,26 +138,26 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
132 len -= 4; 138 len -= 4;
133 break; 139 break;
134 case X25_FAC_CLASS_D: 140 case X25_FAC_CLASS_D:
141 if (len < p[1] + 2)
142 return 0;
135 switch (*p) { 143 switch (*p) {
136 case X25_FAC_CALLING_AE: 144 case X25_FAC_CALLING_AE:
137 if (p[1] > X25_MAX_DTE_FACIL_LEN) 145 if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
138 break; 146 return 0;
139 dte_facs->calling_len = p[2]; 147 dte_facs->calling_len = p[2];
140 memcpy(dte_facs->calling_ae, &p[3], p[1] - 1); 148 memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
141 *vc_fac_mask |= X25_MASK_CALLING_AE; 149 *vc_fac_mask |= X25_MASK_CALLING_AE;
142 break; 150 break;
143 case X25_FAC_CALLED_AE: 151 case X25_FAC_CALLED_AE:
144 if (p[1] > X25_MAX_DTE_FACIL_LEN) 152 if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
145 break; 153 return 0;
146 dte_facs->called_len = p[2]; 154 dte_facs->called_len = p[2];
147 memcpy(dte_facs->called_ae, &p[3], p[1] - 1); 155 memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
148 *vc_fac_mask |= X25_MASK_CALLED_AE; 156 *vc_fac_mask |= X25_MASK_CALLED_AE;
149 break; 157 break;
150 default: 158 default:
151 printk(KERN_DEBUG "X.25: unknown facility %02X," 159 printk(KERN_DEBUG "X.25: unknown facility %02X,"
152 "length %d, values %02X, %02X, " 160 "length %d\n", p[0], p[1]);
153 "%02X, %02X\n",
154 p[0], p[1], p[2], p[3], p[4], p[5]);
155 break; 161 break;
156 } 162 }
157 len -= p[1] + 2; 163 len -= p[1] + 2;
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index 63178961efa..f729f022be6 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -119,6 +119,8 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
119 &x25->vc_facil_mask); 119 &x25->vc_facil_mask);
120 if (len > 0) 120 if (len > 0)
121 skb_pull(skb, len); 121 skb_pull(skb, len);
122 else
123 return -1;
122 /* 124 /*
123 * Copy any Call User Data. 125 * Copy any Call User Data.
124 */ 126 */
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index c0efe102d65..af6e9f3de95 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -875,7 +875,7 @@ const char *sym_expand_string_value(const char *in)
875 symval = sym_get_string_value(sym); 875 symval = sym_get_string_value(sym);
876 } 876 }
877 877
878 newlen = strlen(res) + strlen(symval) + strlen(src); 878 newlen = strlen(res) + strlen(symval) + strlen(src) + 1;
879 if (newlen > reslen) { 879 if (newlen > reslen) {
880 reslen = newlen; 880 reslen = newlen;
881 res = realloc(res, reslen); 881 res = realloc(res, reslen);
diff --git a/security/Kconfig b/security/Kconfig
index bd72ae62349..e80da955e68 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -39,6 +39,18 @@ config KEYS_DEBUG_PROC_KEYS
39 39
40 If you are unsure as to whether this is required, answer N. 40 If you are unsure as to whether this is required, answer N.
41 41
42config SECURITY_DMESG_RESTRICT
43 bool "Restrict unprivileged access to the kernel syslog"
44 default n
45 help
46 This enforces restrictions on unprivileged users reading the kernel
47 syslog via dmesg(8).
48
49 If this option is not selected, no restrictions will be enforced
50 unless the dmesg_restrict sysctl is explicitly set to (1).
51
52 If you are unsure how to answer this question, answer N.
53
42config SECURITY 54config SECURITY
43 bool "Enable different security models" 55 bool "Enable different security models"
44 depends on SYSFS 56 depends on SYSFS
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index cf1de4462cc..b7106f192b7 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -922,7 +922,7 @@ static int __init apparmor_init(void)
922 error = register_security(&apparmor_ops); 922 error = register_security(&apparmor_ops);
923 if (error) { 923 if (error) {
924 AA_ERROR("Unable to register AppArmor\n"); 924 AA_ERROR("Unable to register AppArmor\n");
925 goto register_security_out; 925 goto set_init_cxt_out;
926 } 926 }
927 927
928 /* Report that AppArmor successfully initialized */ 928 /* Report that AppArmor successfully initialized */
@@ -936,6 +936,9 @@ static int __init apparmor_init(void)
936 936
937 return error; 937 return error;
938 938
939set_init_cxt_out:
940 aa_free_task_context(current->real_cred->security);
941
939register_security_out: 942register_security_out:
940 aa_free_root_ns(); 943 aa_free_root_ns();
941 944
@@ -944,7 +947,6 @@ alloc_out:
944 947
945 apparmor_enabled = 0; 948 apparmor_enabled = 0;
946 return error; 949 return error;
947
948} 950}
949 951
950security_initcall(apparmor_init); 952security_initcall(apparmor_init);
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 52cc865f146..4f0eadee78b 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -306,7 +306,7 @@ static struct aa_namespace *alloc_namespace(const char *prefix,
306 return ns; 306 return ns;
307 307
308fail_unconfined: 308fail_unconfined:
309 kzfree(ns->base.name); 309 kzfree(ns->base.hname);
310fail_ns: 310fail_ns:
311 kzfree(ns); 311 kzfree(ns);
312 return NULL; 312 return NULL;
diff --git a/security/capability.c b/security/capability.c
index 30ae00fbecd..c773635ca3a 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -17,6 +17,11 @@ static int cap_sysctl(ctl_table *table, int op)
17 return 0; 17 return 0;
18} 18}
19 19
20static int cap_syslog(int type)
21{
22 return 0;
23}
24
20static int cap_quotactl(int cmds, int type, int id, struct super_block *sb) 25static int cap_quotactl(int cmds, int type, int id, struct super_block *sb)
21{ 26{
22 return 0; 27 return 0;
diff --git a/security/commoncap.c b/security/commoncap.c
index 5e632b4857e..64c2ed9c901 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -27,7 +27,6 @@
27#include <linux/sched.h> 27#include <linux/sched.h>
28#include <linux/prctl.h> 28#include <linux/prctl.h>
29#include <linux/securebits.h> 29#include <linux/securebits.h>
30#include <linux/syslog.h>
31 30
32/* 31/*
33 * If a non-root user executes a setuid-root binary in 32 * If a non-root user executes a setuid-root binary in
@@ -884,24 +883,6 @@ error:
884} 883}
885 884
886/** 885/**
887 * cap_syslog - Determine whether syslog function is permitted
888 * @type: Function requested
889 * @from_file: Whether this request came from an open file (i.e. /proc)
890 *
891 * Determine whether the current process is permitted to use a particular
892 * syslog function, returning 0 if permission is granted, -ve if not.
893 */
894int cap_syslog(int type, bool from_file)
895{
896 if (type != SYSLOG_ACTION_OPEN && from_file)
897 return 0;
898 if ((type != SYSLOG_ACTION_READ_ALL &&
899 type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN))
900 return -EPERM;
901 return 0;
902}
903
904/**
905 * cap_vm_enough_memory - Determine whether a new virtual mapping is permitted 886 * cap_vm_enough_memory - Determine whether a new virtual mapping is permitted
906 * @mm: The VM space in which the new mapping is to be made 887 * @mm: The VM space in which the new mapping is to be made
907 * @pages: The size of the mapping 888 * @pages: The size of the mapping
diff --git a/security/security.c b/security/security.c
index 3ef5e2a7a74..1b798d3df71 100644
--- a/security/security.c
+++ b/security/security.c
@@ -197,9 +197,9 @@ int security_quota_on(struct dentry *dentry)
197 return security_ops->quota_on(dentry); 197 return security_ops->quota_on(dentry);
198} 198}
199 199
200int security_syslog(int type, bool from_file) 200int security_syslog(int type)
201{ 201{
202 return security_ops->syslog(type, from_file); 202 return security_ops->syslog(type);
203} 203}
204 204
205int security_settime(struct timespec *ts, struct timezone *tz) 205int security_settime(struct timespec *ts, struct timezone *tz)
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index d9154cf90ae..65fa8bf596f 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1973,14 +1973,10 @@ static int selinux_quota_on(struct dentry *dentry)
1973 return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON); 1973 return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON);
1974} 1974}
1975 1975
1976static int selinux_syslog(int type, bool from_file) 1976static int selinux_syslog(int type)
1977{ 1977{
1978 int rc; 1978 int rc;
1979 1979
1980 rc = cap_syslog(type, from_file);
1981 if (rc)
1982 return rc;
1983
1984 switch (type) { 1980 switch (type) {
1985 case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */ 1981 case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */
1986 case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */ 1982 case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index bc39f4067af..489a85afa47 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -157,15 +157,11 @@ static int smack_ptrace_traceme(struct task_struct *ptp)
157 * 157 *
158 * Returns 0 on success, error code otherwise. 158 * Returns 0 on success, error code otherwise.
159 */ 159 */
160static int smack_syslog(int type, bool from_file) 160static int smack_syslog(int typefrom_file)
161{ 161{
162 int rc; 162 int rc = 0;
163 char *sp = current_security(); 163 char *sp = current_security();
164 164
165 rc = cap_syslog(type, from_file);
166 if (rc != 0)
167 return rc;
168
169 if (capable(CAP_MAC_OVERRIDE)) 165 if (capable(CAP_MAC_OVERRIDE))
170 return 0; 166 return 0;
171 167
diff --git a/sound/pci/asihpi/hpi6000.c b/sound/pci/asihpi/hpi6000.c
index f7e374ec441..1b9bf9395cf 100644
--- a/sound/pci/asihpi/hpi6000.c
+++ b/sound/pci/asihpi/hpi6000.c
@@ -625,6 +625,8 @@ static short create_adapter_obj(struct hpi_adapter_obj *pao,
625 control_cache_size, (struct hpi_control_cache_info *) 625 control_cache_size, (struct hpi_control_cache_info *)
626 &phw->control_cache[0] 626 &phw->control_cache[0]
627 ); 627 );
628 if (!phw->p_cache)
629 pao->has_control_cache = 0;
628 } else 630 } else
629 pao->has_control_cache = 0; 631 pao->has_control_cache = 0;
630 632
diff --git a/sound/pci/asihpi/hpi6205.c b/sound/pci/asihpi/hpi6205.c
index 22c5fc62553..2672f6591ce 100644
--- a/sound/pci/asihpi/hpi6205.c
+++ b/sound/pci/asihpi/hpi6205.c
@@ -644,6 +644,8 @@ static u16 create_adapter_obj(struct hpi_adapter_obj *pao,
644 interface->control_cache.size_in_bytes, 644 interface->control_cache.size_in_bytes,
645 (struct hpi_control_cache_info *) 645 (struct hpi_control_cache_info *)
646 p_control_cache_virtual); 646 p_control_cache_virtual);
647 if (!phw->p_cache)
648 err = HPI_ERROR_MEMORY_ALLOC;
647 } 649 }
648 if (!err) { 650 if (!err) {
649 err = hpios_locked_mem_get_phys_addr(&phw-> 651 err = hpios_locked_mem_get_phys_addr(&phw->
diff --git a/sound/pci/asihpi/hpicmn.c b/sound/pci/asihpi/hpicmn.c
index dda4f1c6f65..d67f4d3db91 100644
--- a/sound/pci/asihpi/hpicmn.c
+++ b/sound/pci/asihpi/hpicmn.c
@@ -571,14 +571,20 @@ struct hpi_control_cache *hpi_alloc_control_cache(const u32
571{ 571{
572 struct hpi_control_cache *p_cache = 572 struct hpi_control_cache *p_cache =
573 kmalloc(sizeof(*p_cache), GFP_KERNEL); 573 kmalloc(sizeof(*p_cache), GFP_KERNEL);
574 if (!p_cache)
575 return NULL;
576 p_cache->p_info =
577 kmalloc(sizeof(*p_cache->p_info) * number_of_controls,
578 GFP_KERNEL);
579 if (!p_cache->p_info) {
580 kfree(p_cache);
581 return NULL;
582 }
574 p_cache->cache_size_in_bytes = size_in_bytes; 583 p_cache->cache_size_in_bytes = size_in_bytes;
575 p_cache->control_count = number_of_controls; 584 p_cache->control_count = number_of_controls;
576 p_cache->p_cache = 585 p_cache->p_cache =
577 (struct hpi_control_cache_single *)pDSP_control_buffer; 586 (struct hpi_control_cache_single *)pDSP_control_buffer;
578 p_cache->init = 0; 587 p_cache->init = 0;
579 p_cache->p_info =
580 kmalloc(sizeof(*p_cache->p_info) * p_cache->control_count,
581 GFP_KERNEL);
582 return p_cache; 588 return p_cache;
583} 589}
584 590
diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
index 3e5ca8fb519..e377287192a 100644
--- a/sound/pci/cs46xx/dsp_spos.c
+++ b/sound/pci/cs46xx/dsp_spos.c
@@ -225,39 +225,25 @@ struct dsp_spos_instance *cs46xx_dsp_spos_create (struct snd_cs46xx * chip)
225{ 225{
226 struct dsp_spos_instance * ins = kzalloc(sizeof(struct dsp_spos_instance), GFP_KERNEL); 226 struct dsp_spos_instance * ins = kzalloc(sizeof(struct dsp_spos_instance), GFP_KERNEL);
227 227
228 if (ins == NULL) 228 if (ins == NULL)
229 return NULL; 229 return NULL;
230 230
231 /* better to use vmalloc for this big table */ 231 /* better to use vmalloc for this big table */
232 ins->symbol_table.nsymbols = 0;
233 ins->symbol_table.symbols = vmalloc(sizeof(struct dsp_symbol_entry) * 232 ins->symbol_table.symbols = vmalloc(sizeof(struct dsp_symbol_entry) *
234 DSP_MAX_SYMBOLS); 233 DSP_MAX_SYMBOLS);
235 ins->symbol_table.highest_frag_index = 0; 234 ins->code.data = kmalloc(DSP_CODE_BYTE_SIZE, GFP_KERNEL);
236 235 ins->modules = kmalloc(sizeof(struct dsp_module_desc) * DSP_MAX_MODULES, GFP_KERNEL);
237 if (ins->symbol_table.symbols == NULL) { 236 if (!ins->symbol_table.symbols || !ins->code.data || !ins->modules) {
238 cs46xx_dsp_spos_destroy(chip); 237 cs46xx_dsp_spos_destroy(chip);
239 goto error; 238 goto error;
240 } 239 }
241 240 ins->symbol_table.nsymbols = 0;
241 ins->symbol_table.highest_frag_index = 0;
242 ins->code.offset = 0; 242 ins->code.offset = 0;
243 ins->code.size = 0; 243 ins->code.size = 0;
244 ins->code.data = kmalloc(DSP_CODE_BYTE_SIZE, GFP_KERNEL);
245
246 if (ins->code.data == NULL) {
247 cs46xx_dsp_spos_destroy(chip);
248 goto error;
249 }
250
251 ins->nscb = 0; 244 ins->nscb = 0;
252 ins->ntask = 0; 245 ins->ntask = 0;
253
254 ins->nmodules = 0; 246 ins->nmodules = 0;
255 ins->modules = kmalloc(sizeof(struct dsp_module_desc) * DSP_MAX_MODULES, GFP_KERNEL);
256
257 if (ins->modules == NULL) {
258 cs46xx_dsp_spos_destroy(chip);
259 goto error;
260 }
261 247
262 /* default SPDIF input sample rate 248 /* default SPDIF input sample rate
263 to 48000 khz */ 249 to 48000 khz */
@@ -271,8 +257,8 @@ struct dsp_spos_instance *cs46xx_dsp_spos_create (struct snd_cs46xx * chip)
271 257
272 /* set left and right validity bits and 258 /* set left and right validity bits and
273 default channel status */ 259 default channel status */
274 ins->spdif_csuv_default = 260 ins->spdif_csuv_default =
275 ins->spdif_csuv_stream = 261 ins->spdif_csuv_stream =
276 /* byte 0 */ ((unsigned int)_wrap_all_bits( (SNDRV_PCM_DEFAULT_CON_SPDIF & 0xff)) << 24) | 262 /* byte 0 */ ((unsigned int)_wrap_all_bits( (SNDRV_PCM_DEFAULT_CON_SPDIF & 0xff)) << 24) |
277 /* byte 1 */ ((unsigned int)_wrap_all_bits( ((SNDRV_PCM_DEFAULT_CON_SPDIF >> 8) & 0xff)) << 16) | 263 /* byte 1 */ ((unsigned int)_wrap_all_bits( ((SNDRV_PCM_DEFAULT_CON_SPDIF >> 8) & 0xff)) << 16) |
278 /* byte 3 */ (unsigned int)_wrap_all_bits( (SNDRV_PCM_DEFAULT_CON_SPDIF >> 24) & 0xff) | 264 /* byte 3 */ (unsigned int)_wrap_all_bits( (SNDRV_PCM_DEFAULT_CON_SPDIF >> 24) & 0xff) |
@@ -281,6 +267,9 @@ struct dsp_spos_instance *cs46xx_dsp_spos_create (struct snd_cs46xx * chip)
281 return ins; 267 return ins;
282 268
283error: 269error:
270 kfree(ins->modules);
271 kfree(ins->code.data);
272 vfree(ins->symbol_table.symbols);
284 kfree(ins); 273 kfree(ins);
285 return NULL; 274 return NULL;
286} 275}
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 460fb2ef7e3..18af38ebf75 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -1166,6 +1166,7 @@ static const char *cs420x_models[CS420X_MODELS] = {
1166 1166
1167static struct snd_pci_quirk cs420x_cfg_tbl[] = { 1167static struct snd_pci_quirk cs420x_cfg_tbl[] = {
1168 SND_PCI_QUIRK(0x10de, 0x0ac0, "MacBookPro 5,3", CS420X_MBP53), 1168 SND_PCI_QUIRK(0x10de, 0x0ac0, "MacBookPro 5,3", CS420X_MBP53),
1169 SND_PCI_QUIRK(0x10de, 0x0d94, "MacBookAir 3,1(2)", CS420X_MBP55),
1169 SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55), 1170 SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55),
1170 SND_PCI_QUIRK(0x10de, 0xcb89, "MacBookPro 7,1", CS420X_MBP55), 1171 SND_PCI_QUIRK(0x10de, 0xcb89, "MacBookPro 7,1", CS420X_MBP55),
1171 SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27), 1172 SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),
diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c
index ef9af3f4ace..1bd7a540fd4 100644
--- a/sound/pci/lx6464es/lx6464es.c
+++ b/sound/pci/lx6464es/lx6464es.c
@@ -425,7 +425,7 @@ exit:
425static void lx_trigger_start(struct lx6464es *chip, struct lx_stream *lx_stream) 425static void lx_trigger_start(struct lx6464es *chip, struct lx_stream *lx_stream)
426{ 426{
427 struct snd_pcm_substream *substream = lx_stream->stream; 427 struct snd_pcm_substream *substream = lx_stream->stream;
428 const int is_capture = lx_stream->is_capture; 428 const unsigned int is_capture = lx_stream->is_capture;
429 429
430 int err; 430 int err;
431 431
@@ -473,7 +473,7 @@ static void lx_trigger_start(struct lx6464es *chip, struct lx_stream *lx_stream)
473 473
474static void lx_trigger_stop(struct lx6464es *chip, struct lx_stream *lx_stream) 474static void lx_trigger_stop(struct lx6464es *chip, struct lx_stream *lx_stream)
475{ 475{
476 const int is_capture = lx_stream->is_capture; 476 const unsigned int is_capture = lx_stream->is_capture;
477 int err; 477 int err;
478 478
479 snd_printd(LXP "stopping: stopping stream\n"); 479 snd_printd(LXP "stopping: stopping stream\n");
diff --git a/sound/pci/lx6464es/lx6464es.h b/sound/pci/lx6464es/lx6464es.h
index 51afc048961..aea621eafbb 100644
--- a/sound/pci/lx6464es/lx6464es.h
+++ b/sound/pci/lx6464es/lx6464es.h
@@ -60,7 +60,7 @@ struct lx_stream {
60 snd_pcm_uframes_t frame_pos; 60 snd_pcm_uframes_t frame_pos;
61 enum lx_stream_status status; /* free, open, running, draining 61 enum lx_stream_status status; /* free, open, running, draining
62 * pause */ 62 * pause */
63 int is_capture:1; 63 unsigned int is_capture:1;
64}; 64};
65 65
66 66
diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c
index 3086b751da4..617f98b0cba 100644
--- a/sound/pci/lx6464es/lx_core.c
+++ b/sound/pci/lx6464es/lx_core.c
@@ -1152,7 +1152,7 @@ static int lx_interrupt_request_new_buffer(struct lx6464es *chip,
1152 struct lx_stream *lx_stream) 1152 struct lx_stream *lx_stream)
1153{ 1153{
1154 struct snd_pcm_substream *substream = lx_stream->stream; 1154 struct snd_pcm_substream *substream = lx_stream->stream;
1155 int is_capture = lx_stream->is_capture; 1155 const unsigned int is_capture = lx_stream->is_capture;
1156 int err; 1156 int err;
1157 unsigned long flags; 1157 unsigned long flags;
1158 1158
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 7dae05d8783..782f741cd00 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -60,7 +60,7 @@ static const struct rc_config {
60 { USB_ID(0x041e, 0x3000), 0, 1, 2, 1, 18, 0x0013 }, /* Extigy */ 60 { USB_ID(0x041e, 0x3000), 0, 1, 2, 1, 18, 0x0013 }, /* Extigy */
61 { USB_ID(0x041e, 0x3020), 2, 1, 6, 6, 18, 0x0013 }, /* Audigy 2 NX */ 61 { USB_ID(0x041e, 0x3020), 2, 1, 6, 6, 18, 0x0013 }, /* Audigy 2 NX */
62 { USB_ID(0x041e, 0x3040), 2, 2, 6, 6, 2, 0x6e91 }, /* Live! 24-bit */ 62 { USB_ID(0x041e, 0x3040), 2, 2, 6, 6, 2, 0x6e91 }, /* Live! 24-bit */
63 { USB_ID(0x041e, 0x3042), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi */ 63 { USB_ID(0x041e, 0x3042), 0, 1, 1, 1, 1, 0x000d }, /* Usb X-Fi S51 */
64 { USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */ 64 { USB_ID(0x041e, 0x3048), 2, 2, 6, 6, 2, 0x6e91 }, /* Toshiba SB0500 */
65}; 65};
66 66
@@ -183,7 +183,13 @@ static int snd_audigy2nx_led_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e
183 if (value > 1) 183 if (value > 1)
184 return -EINVAL; 184 return -EINVAL;
185 changed = value != mixer->audigy2nx_leds[index]; 185 changed = value != mixer->audigy2nx_leds[index];
186 err = snd_usb_ctl_msg(mixer->chip->dev, 186 if (mixer->chip->usb_id == USB_ID(0x041e, 0x3042))
187 err = snd_usb_ctl_msg(mixer->chip->dev,
188 usb_sndctrlpipe(mixer->chip->dev, 0), 0x24,
189 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
190 !value, 0, NULL, 0, 100);
191 else
192 err = snd_usb_ctl_msg(mixer->chip->dev,
187 usb_sndctrlpipe(mixer->chip->dev, 0), 0x24, 193 usb_sndctrlpipe(mixer->chip->dev, 0), 0x24,
188 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, 194 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
189 value, index + 2, NULL, 0, 100); 195 value, index + 2, NULL, 0, 100);
@@ -225,8 +231,12 @@ static int snd_audigy2nx_controls_create(struct usb_mixer_interface *mixer)
225 int i, err; 231 int i, err;
226 232
227 for (i = 0; i < ARRAY_SIZE(snd_audigy2nx_controls); ++i) { 233 for (i = 0; i < ARRAY_SIZE(snd_audigy2nx_controls); ++i) {
234 /* USB X-Fi S51 doesn't have a CMSS LED */
235 if ((mixer->chip->usb_id == USB_ID(0x041e, 0x3042)) && i == 0)
236 continue;
228 if (i > 1 && /* Live24ext has 2 LEDs only */ 237 if (i > 1 && /* Live24ext has 2 LEDs only */
229 (mixer->chip->usb_id == USB_ID(0x041e, 0x3040) || 238 (mixer->chip->usb_id == USB_ID(0x041e, 0x3040) ||
239 mixer->chip->usb_id == USB_ID(0x041e, 0x3042) ||
230 mixer->chip->usb_id == USB_ID(0x041e, 0x3048))) 240 mixer->chip->usb_id == USB_ID(0x041e, 0x3048)))
231 break; 241 break;
232 err = snd_ctl_add(mixer->chip->card, 242 err = snd_ctl_add(mixer->chip->card,
@@ -365,6 +375,7 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
365 375
366 if (mixer->chip->usb_id == USB_ID(0x041e, 0x3020) || 376 if (mixer->chip->usb_id == USB_ID(0x041e, 0x3020) ||
367 mixer->chip->usb_id == USB_ID(0x041e, 0x3040) || 377 mixer->chip->usb_id == USB_ID(0x041e, 0x3040) ||
378 mixer->chip->usb_id == USB_ID(0x041e, 0x3042) ||
368 mixer->chip->usb_id == USB_ID(0x041e, 0x3048)) { 379 mixer->chip->usb_id == USB_ID(0x041e, 0x3048)) {
369 if ((err = snd_audigy2nx_controls_create(mixer)) < 0) 380 if ((err = snd_audigy2nx_controls_create(mixer)) < 0)
370 return err; 381 return err;
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index cff3a3c465d..4132522ac90 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -676,8 +676,10 @@ static int snd_usb_pcm_check_knot(struct snd_pcm_runtime *runtime,
676 if (!needs_knot) 676 if (!needs_knot)
677 return 0; 677 return 0;
678 678
679 subs->rate_list.count = count;
680 subs->rate_list.list = kmalloc(sizeof(int) * count, GFP_KERNEL); 679 subs->rate_list.list = kmalloc(sizeof(int) * count, GFP_KERNEL);
680 if (!subs->rate_list.list)
681 return -ENOMEM;
682 subs->rate_list.count = count;
681 subs->rate_list.mask = 0; 683 subs->rate_list.mask = 0;
682 count = 0; 684 count = 0;
683 list_for_each_entry(fp, &subs->fmt_list, list) { 685 list_for_each_entry(fp, &subs->fmt_list, list) {
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 122ec9dc485..26aff6bf9e5 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -8,7 +8,11 @@ perf-trace - Read perf.data (created by perf record) and display trace output
8SYNOPSIS 8SYNOPSIS
9-------- 9--------
10[verse] 10[verse]
11'perf trace' {record <script> | report <script> [args] } 11'perf trace' [<options>]
12'perf trace' [<options>] record <script> [<record-options>] <command>
13'perf trace' [<options>] report <script> [script-args]
14'perf trace' [<options>] <script> <required-script-args> [<record-options>] <command>
15'perf trace' [<options>] <top-script> [script-args]
12 16
13DESCRIPTION 17DESCRIPTION
14----------- 18-----------
@@ -24,23 +28,53 @@ There are several variants of perf trace:
24 available via 'perf trace -l'). The following variants allow you to 28 available via 'perf trace -l'). The following variants allow you to
25 record and run those scripts: 29 record and run those scripts:
26 30
27 'perf trace record <script>' to record the events required for 'perf 31 'perf trace record <script> <command>' to record the events required
28 trace report'. <script> is the name displayed in the output of 32 for 'perf trace report'. <script> is the name displayed in the
29 'perf trace --list' i.e. the actual script name minus any language 33 output of 'perf trace --list' i.e. the actual script name minus any
30 extension. 34 language extension. If <command> is not specified, the events are
35 recorded using the -a (system-wide) 'perf record' option.
31 36
32 'perf trace report <script>' to run and display the results of 37 'perf trace report <script> [args]' to run and display the results
33 <script>. <script> is the name displayed in the output of 'perf 38 of <script>. <script> is the name displayed in the output of 'perf
34 trace --list' i.e. the actual script name minus any language 39 trace --list' i.e. the actual script name minus any language
35 extension. The perf.data output from a previous run of 'perf trace 40 extension. The perf.data output from a previous run of 'perf trace
36 record <script>' is used and should be present for this command to 41 record <script>' is used and should be present for this command to
37 succeed. 42 succeed. [args] refers to the (mainly optional) args expected by
43 the script.
44
45 'perf trace <script> <required-script-args> <command>' to both
46 record the events required for <script> and to run the <script>
47 using 'live-mode' i.e. without writing anything to disk. <script>
48 is the name displayed in the output of 'perf trace --list' i.e. the
49 actual script name minus any language extension. If <command> is
50 not specified, the events are recorded using the -a (system-wide)
51 'perf record' option. If <script> has any required args, they
52 should be specified before <command>. This mode doesn't allow for
53 optional script args to be specified; if optional script args are
54 desired, they can be specified using separate 'perf trace record'
55 and 'perf trace report' commands, with the stdout of the record step
56 piped to the stdin of the report script, using the '-o -' and '-i -'
57 options of the corresponding commands.
58
59 'perf trace <top-script>' to both record the events required for
60 <top-script> and to run the <top-script> using 'live-mode'
61 i.e. without writing anything to disk. <top-script> is the name
62 displayed in the output of 'perf trace --list' i.e. the actual
63 script name minus any language extension; a <top-script> is defined
64 as any script name ending with the string 'top'.
65
66 [<record-options>] can be passed to the record steps of 'perf trace
67 record' and 'live-mode' variants; this isn't possible however for
68 <top-script> 'live-mode' or 'perf trace report' variants.
38 69
39 See the 'SEE ALSO' section for links to language-specific 70 See the 'SEE ALSO' section for links to language-specific
40 information on how to write and run your own trace scripts. 71 information on how to write and run your own trace scripts.
41 72
42OPTIONS 73OPTIONS
43------- 74-------
75<command>...::
76 Any command you can specify in a shell.
77
44-D:: 78-D::
45--dump-raw-trace=:: 79--dump-raw-trace=::
46 Display verbose dump of the trace data. 80 Display verbose dump of the trace data.
@@ -64,6 +98,13 @@ OPTIONS
64 Generate perf-trace.[ext] starter script for given language, 98 Generate perf-trace.[ext] starter script for given language,
65 using current perf.data. 99 using current perf.data.
66 100
101-a::
102 Force system-wide collection. Scripts run without a <command>
103 normally use -a by default, while scripts run with a <command>
104 normally don't - this option allows the latter to be run in
105 system-wide mode.
106
107
67SEE ALSO 108SEE ALSO
68-------- 109--------
69linkperf:perf-record[1], linkperf:perf-trace-perl[1], 110linkperf:perf-record[1], linkperf:perf-trace-perl[1],
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 4e75583ddd6..93bd2ff001f 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -790,7 +790,7 @@ static const char * const record_usage[] = {
790 790
791static bool force, append_file; 791static bool force, append_file;
792 792
793static const struct option options[] = { 793const struct option record_options[] = {
794 OPT_CALLBACK('e', "event", NULL, "event", 794 OPT_CALLBACK('e', "event", NULL, "event",
795 "event selector. use 'perf list' to list available events", 795 "event selector. use 'perf list' to list available events",
796 parse_events), 796 parse_events),
@@ -839,16 +839,16 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
839{ 839{
840 int i, j, err = -ENOMEM; 840 int i, j, err = -ENOMEM;
841 841
842 argc = parse_options(argc, argv, options, record_usage, 842 argc = parse_options(argc, argv, record_options, record_usage,
843 PARSE_OPT_STOP_AT_NON_OPTION); 843 PARSE_OPT_STOP_AT_NON_OPTION);
844 if (!argc && target_pid == -1 && target_tid == -1 && 844 if (!argc && target_pid == -1 && target_tid == -1 &&
845 !system_wide && !cpu_list) 845 !system_wide && !cpu_list)
846 usage_with_options(record_usage, options); 846 usage_with_options(record_usage, record_options);
847 847
848 if (force && append_file) { 848 if (force && append_file) {
849 fprintf(stderr, "Can't overwrite and append at the same time." 849 fprintf(stderr, "Can't overwrite and append at the same time."
850 " You need to choose between -f and -A"); 850 " You need to choose between -f and -A");
851 usage_with_options(record_usage, options); 851 usage_with_options(record_usage, record_options);
852 } else if (append_file) { 852 } else if (append_file) {
853 write_mode = WRITE_APPEND; 853 write_mode = WRITE_APPEND;
854 } else { 854 } else {
@@ -871,7 +871,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
871 if (thread_num <= 0) { 871 if (thread_num <= 0) {
872 fprintf(stderr, "Can't find all threads of pid %d\n", 872 fprintf(stderr, "Can't find all threads of pid %d\n",
873 target_pid); 873 target_pid);
874 usage_with_options(record_usage, options); 874 usage_with_options(record_usage, record_options);
875 } 875 }
876 } else { 876 } else {
877 all_tids=malloc(sizeof(pid_t)); 877 all_tids=malloc(sizeof(pid_t));
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index b513e40974f..dd625808c2a 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -69,7 +69,6 @@ static int target_tid = -1;
69static pid_t *all_tids = NULL; 69static pid_t *all_tids = NULL;
70static int thread_num = 0; 70static int thread_num = 0;
71static bool inherit = false; 71static bool inherit = false;
72static int profile_cpu = -1;
73static int nr_cpus = 0; 72static int nr_cpus = 0;
74static int realtime_prio = 0; 73static int realtime_prio = 0;
75static bool group = false; 74static bool group = false;
@@ -558,13 +557,13 @@ static void print_sym_table(void)
558 else 557 else
559 printf(" (all"); 558 printf(" (all");
560 559
561 if (profile_cpu != -1) 560 if (cpu_list)
562 printf(", cpu: %d)\n", profile_cpu); 561 printf(", CPU%s: %s)\n", nr_cpus > 1 ? "s" : "", cpu_list);
563 else { 562 else {
564 if (target_tid != -1) 563 if (target_tid != -1)
565 printf(")\n"); 564 printf(")\n");
566 else 565 else
567 printf(", %d CPUs)\n", nr_cpus); 566 printf(", %d CPU%s)\n", nr_cpus, nr_cpus > 1 ? "s" : "");
568 } 567 }
569 568
570 printf("%-*.*s\n", win_width, win_width, graph_dotted_line); 569 printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
@@ -1187,11 +1186,10 @@ int group_fd;
1187static void start_counter(int i, int counter) 1186static void start_counter(int i, int counter)
1188{ 1187{
1189 struct perf_event_attr *attr; 1188 struct perf_event_attr *attr;
1190 int cpu; 1189 int cpu = -1;
1191 int thread_index; 1190 int thread_index;
1192 1191
1193 cpu = profile_cpu; 1192 if (target_tid == -1)
1194 if (target_tid == -1 && profile_cpu == -1)
1195 cpu = cpumap[i]; 1193 cpu = cpumap[i];
1196 1194
1197 attr = attrs + counter; 1195 attr = attrs + counter;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 2f8df45c4dc..86cfe3800e6 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -10,6 +10,7 @@
10#include "util/symbol.h" 10#include "util/symbol.h"
11#include "util/thread.h" 11#include "util/thread.h"
12#include "util/trace-event.h" 12#include "util/trace-event.h"
13#include "util/parse-options.h"
13#include "util/util.h" 14#include "util/util.h"
14 15
15static char const *script_name; 16static char const *script_name;
@@ -17,6 +18,7 @@ static char const *generate_script_lang;
17static bool debug_mode; 18static bool debug_mode;
18static u64 last_timestamp; 19static u64 last_timestamp;
19static u64 nr_unordered; 20static u64 nr_unordered;
21extern const struct option record_options[];
20 22
21static int default_start_script(const char *script __unused, 23static int default_start_script(const char *script __unused,
22 int argc __unused, 24 int argc __unused,
@@ -328,7 +330,7 @@ static struct script_desc *script_desc__new(const char *name)
328{ 330{
329 struct script_desc *s = zalloc(sizeof(*s)); 331 struct script_desc *s = zalloc(sizeof(*s));
330 332
331 if (s != NULL) 333 if (s != NULL && name)
332 s->name = strdup(name); 334 s->name = strdup(name);
333 335
334 return s; 336 return s;
@@ -337,6 +339,8 @@ static struct script_desc *script_desc__new(const char *name)
337static void script_desc__delete(struct script_desc *s) 339static void script_desc__delete(struct script_desc *s)
338{ 340{
339 free(s->name); 341 free(s->name);
342 free(s->half_liner);
343 free(s->args);
340 free(s); 344 free(s);
341} 345}
342 346
@@ -537,8 +541,40 @@ static char *get_script_path(const char *script_root, const char *suffix)
537 return path; 541 return path;
538} 542}
539 543
544static bool is_top_script(const char *script_path)
545{
546 return ends_with((char *)script_path, "top") == NULL ? false : true;
547}
548
549static int has_required_arg(char *script_path)
550{
551 struct script_desc *desc;
552 int n_args = 0;
553 char *p;
554
555 desc = script_desc__new(NULL);
556
557 if (read_script_info(desc, script_path))
558 goto out;
559
560 if (!desc->args)
561 goto out;
562
563 for (p = desc->args; *p; p++)
564 if (*p == '<')
565 n_args++;
566out:
567 script_desc__delete(desc);
568
569 return n_args;
570}
571
540static const char * const trace_usage[] = { 572static const char * const trace_usage[] = {
541 "perf trace [<options>] <command>", 573 "perf trace [<options>]",
574 "perf trace [<options>] record <script> [<record-options>] <command>",
575 "perf trace [<options>] report <script> [script-args]",
576 "perf trace [<options>] <script> [<record-options>] <command>",
577 "perf trace [<options>] <top-script> [script-args]",
542 NULL 578 NULL
543}; 579};
544 580
@@ -564,50 +600,81 @@ static const struct option options[] = {
564 OPT_END() 600 OPT_END()
565}; 601};
566 602
603static bool have_cmd(int argc, const char **argv)
604{
605 char **__argv = malloc(sizeof(const char *) * argc);
606
607 if (!__argv)
608 die("malloc");
609 memcpy(__argv, argv, sizeof(const char *) * argc);
610 argc = parse_options(argc, (const char **)__argv, record_options,
611 NULL, PARSE_OPT_STOP_AT_NON_OPTION);
612 free(__argv);
613
614 return argc != 0;
615}
616
567int cmd_trace(int argc, const char **argv, const char *prefix __used) 617int cmd_trace(int argc, const char **argv, const char *prefix __used)
568{ 618{
619 char *rec_script_path = NULL;
620 char *rep_script_path = NULL;
569 struct perf_session *session; 621 struct perf_session *session;
570 const char *suffix = NULL; 622 char *script_path = NULL;
571 const char **__argv; 623 const char **__argv;
572 char *script_path; 624 bool system_wide;
573 int i, err; 625 int i, j, err;
574 626
575 if (argc >= 2 && strncmp(argv[1], "rec", strlen("rec")) == 0) { 627 setup_scripting();
576 if (argc < 3) { 628
577 fprintf(stderr, 629 argc = parse_options(argc, argv, options, trace_usage,
578 "Please specify a record script\n"); 630 PARSE_OPT_STOP_AT_NON_OPTION);
579 return -1; 631
580 } 632 if (argc > 1 && !strncmp(argv[0], "rec", strlen("rec"))) {
581 suffix = RECORD_SUFFIX; 633 rec_script_path = get_script_path(argv[1], RECORD_SUFFIX);
634 if (!rec_script_path)
635 return cmd_record(argc, argv, NULL);
582 } 636 }
583 637
584 if (argc >= 2 && strncmp(argv[1], "rep", strlen("rep")) == 0) { 638 if (argc > 1 && !strncmp(argv[0], "rep", strlen("rep"))) {
585 if (argc < 3) { 639 rep_script_path = get_script_path(argv[1], REPORT_SUFFIX);
640 if (!rep_script_path) {
586 fprintf(stderr, 641 fprintf(stderr,
587 "Please specify a report script\n"); 642 "Please specify a valid report script"
643 "(see 'perf trace -l' for listing)\n");
588 return -1; 644 return -1;
589 } 645 }
590 suffix = REPORT_SUFFIX;
591 } 646 }
592 647
593 /* make sure PERF_EXEC_PATH is set for scripts */ 648 /* make sure PERF_EXEC_PATH is set for scripts */
594 perf_set_argv_exec_path(perf_exec_path()); 649 perf_set_argv_exec_path(perf_exec_path());
595 650
596 if (!suffix && argc >= 2 && strncmp(argv[1], "-", strlen("-")) != 0) { 651 if (argc && !script_name && !rec_script_path && !rep_script_path) {
597 char *record_script_path, *report_script_path;
598 int live_pipe[2]; 652 int live_pipe[2];
653 int rep_args;
599 pid_t pid; 654 pid_t pid;
600 655
601 record_script_path = get_script_path(argv[1], RECORD_SUFFIX); 656 rec_script_path = get_script_path(argv[0], RECORD_SUFFIX);
602 if (!record_script_path) { 657 rep_script_path = get_script_path(argv[0], REPORT_SUFFIX);
603 fprintf(stderr, "record script not found\n"); 658
604 return -1; 659 if (!rec_script_path && !rep_script_path) {
660 fprintf(stderr, " Couldn't find script %s\n\n See perf"
661 " trace -l for available scripts.\n", argv[0]);
662 usage_with_options(trace_usage, options);
605 } 663 }
606 664
607 report_script_path = get_script_path(argv[1], REPORT_SUFFIX); 665 if (is_top_script(argv[0])) {
608 if (!report_script_path) { 666 rep_args = argc - 1;
609 fprintf(stderr, "report script not found\n"); 667 } else {
610 return -1; 668 int rec_args;
669
670 rep_args = has_required_arg(rep_script_path);
671 rec_args = (argc - 1) - rep_args;
672 if (rec_args < 0) {
673 fprintf(stderr, " %s script requires options."
674 "\n\n See perf trace -l for available "
675 "scripts and options.\n", argv[0]);
676 usage_with_options(trace_usage, options);
677 }
611 } 678 }
612 679
613 if (pipe(live_pipe) < 0) { 680 if (pipe(live_pipe) < 0) {
@@ -622,60 +689,84 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
622 } 689 }
623 690
624 if (!pid) { 691 if (!pid) {
692 system_wide = true;
693 j = 0;
694
625 dup2(live_pipe[1], 1); 695 dup2(live_pipe[1], 1);
626 close(live_pipe[0]); 696 close(live_pipe[0]);
627 697
628 __argv = malloc(6 * sizeof(const char *)); 698 if (!is_top_script(argv[0]))
629 __argv[0] = "/bin/sh"; 699 system_wide = !have_cmd(argc - rep_args,
630 __argv[1] = record_script_path; 700 &argv[rep_args]);
631 __argv[2] = "-q"; 701
632 __argv[3] = "-o"; 702 __argv = malloc((argc + 6) * sizeof(const char *));
633 __argv[4] = "-"; 703 if (!__argv)
634 __argv[5] = NULL; 704 die("malloc");
705
706 __argv[j++] = "/bin/sh";
707 __argv[j++] = rec_script_path;
708 if (system_wide)
709 __argv[j++] = "-a";
710 __argv[j++] = "-q";
711 __argv[j++] = "-o";
712 __argv[j++] = "-";
713 for (i = rep_args + 1; i < argc; i++)
714 __argv[j++] = argv[i];
715 __argv[j++] = NULL;
635 716
636 execvp("/bin/sh", (char **)__argv); 717 execvp("/bin/sh", (char **)__argv);
718 free(__argv);
637 exit(-1); 719 exit(-1);
638 } 720 }
639 721
640 dup2(live_pipe[0], 0); 722 dup2(live_pipe[0], 0);
641 close(live_pipe[1]); 723 close(live_pipe[1]);
642 724
643 __argv = malloc((argc + 3) * sizeof(const char *)); 725 __argv = malloc((argc + 4) * sizeof(const char *));
644 __argv[0] = "/bin/sh"; 726 if (!__argv)
645 __argv[1] = report_script_path; 727 die("malloc");
646 for (i = 2; i < argc; i++) 728 j = 0;
647 __argv[i] = argv[i]; 729 __argv[j++] = "/bin/sh";
648 __argv[i++] = "-i"; 730 __argv[j++] = rep_script_path;
649 __argv[i++] = "-"; 731 for (i = 1; i < rep_args + 1; i++)
650 __argv[i++] = NULL; 732 __argv[j++] = argv[i];
733 __argv[j++] = "-i";
734 __argv[j++] = "-";
735 __argv[j++] = NULL;
651 736
652 execvp("/bin/sh", (char **)__argv); 737 execvp("/bin/sh", (char **)__argv);
738 free(__argv);
653 exit(-1); 739 exit(-1);
654 } 740 }
655 741
656 if (suffix) { 742 if (rec_script_path)
657 script_path = get_script_path(argv[2], suffix); 743 script_path = rec_script_path;
658 if (!script_path) { 744 if (rep_script_path)
659 fprintf(stderr, "script not found\n"); 745 script_path = rep_script_path;
660 return -1; 746
661 } 747 if (script_path) {
662 748 system_wide = false;
663 __argv = malloc((argc + 1) * sizeof(const char *)); 749 j = 0;
664 __argv[0] = "/bin/sh"; 750
665 __argv[1] = script_path; 751 if (rec_script_path)
666 for (i = 3; i < argc; i++) 752 system_wide = !have_cmd(argc - 1, &argv[1]);
667 __argv[i - 1] = argv[i]; 753
668 __argv[argc - 1] = NULL; 754 __argv = malloc((argc + 2) * sizeof(const char *));
755 if (!__argv)
756 die("malloc");
757 __argv[j++] = "/bin/sh";
758 __argv[j++] = script_path;
759 if (system_wide)
760 __argv[j++] = "-a";
761 for (i = 2; i < argc; i++)
762 __argv[j++] = argv[i];
763 __argv[j++] = NULL;
669 764
670 execvp("/bin/sh", (char **)__argv); 765 execvp("/bin/sh", (char **)__argv);
766 free(__argv);
671 exit(-1); 767 exit(-1);
672 } 768 }
673 769
674 setup_scripting();
675
676 argc = parse_options(argc, argv, options, trace_usage,
677 PARSE_OPT_STOP_AT_NON_OPTION);
678
679 if (symbol__init() < 0) 770 if (symbol__init() < 0)
680 return -1; 771 return -1;
681 if (!script_name) 772 if (!script_name)
diff --git a/tools/perf/scripts/perl/bin/failed-syscalls-record b/tools/perf/scripts/perl/bin/failed-syscalls-record
index eb5846bcb56..8104895a7b6 100644
--- a/tools/perf/scripts/perl/bin/failed-syscalls-record
+++ b/tools/perf/scripts/perl/bin/failed-syscalls-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e raw_syscalls:sys_exit $@ 2perf record -e raw_syscalls:sys_exit $@
diff --git a/tools/perf/scripts/perl/bin/rw-by-file-record b/tools/perf/scripts/perl/bin/rw-by-file-record
index 5bfaae5a6cb..33efc8673aa 100644
--- a/tools/perf/scripts/perl/bin/rw-by-file-record
+++ b/tools/perf/scripts/perl/bin/rw-by-file-record
@@ -1,3 +1,3 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e syscalls:sys_enter_read -e syscalls:sys_enter_write $@ 2perf record -e syscalls:sys_enter_read -e syscalls:sys_enter_write $@
3 3
diff --git a/tools/perf/scripts/perl/bin/rw-by-pid-record b/tools/perf/scripts/perl/bin/rw-by-pid-record
index 6e0b2f7755a..7cb9db23044 100644
--- a/tools/perf/scripts/perl/bin/rw-by-pid-record
+++ b/tools/perf/scripts/perl/bin/rw-by-pid-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@ 2perf record -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
diff --git a/tools/perf/scripts/perl/bin/rwtop-record b/tools/perf/scripts/perl/bin/rwtop-record
index 6e0b2f7755a..7cb9db23044 100644
--- a/tools/perf/scripts/perl/bin/rwtop-record
+++ b/tools/perf/scripts/perl/bin/rwtop-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@ 2perf record -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
diff --git a/tools/perf/scripts/perl/bin/wakeup-latency-record b/tools/perf/scripts/perl/bin/wakeup-latency-record
index 9f2acaaae9f..464251a1bd7 100644
--- a/tools/perf/scripts/perl/bin/wakeup-latency-record
+++ b/tools/perf/scripts/perl/bin/wakeup-latency-record
@@ -1,5 +1,5 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e sched:sched_switch -e sched:sched_wakeup $@ 2perf record -e sched:sched_switch -e sched:sched_wakeup $@
3 3
4 4
5 5
diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-record b/tools/perf/scripts/perl/bin/workqueue-stats-record
index 85301f2471f..8edda9078d5 100644
--- a/tools/perf/scripts/perl/bin/workqueue-stats-record
+++ b/tools/perf/scripts/perl/bin/workqueue-stats-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion $@ 2perf record -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion $@
diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
index eb5846bcb56..8104895a7b6 100644
--- a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
+++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e raw_syscalls:sys_exit $@ 2perf record -e raw_syscalls:sys_exit $@
diff --git a/tools/perf/scripts/python/bin/futex-contention-record b/tools/perf/scripts/python/bin/futex-contention-record
index 5ecbb433caf..b1495c9a9b2 100644
--- a/tools/perf/scripts/python/bin/futex-contention-record
+++ b/tools/perf/scripts/python/bin/futex-contention-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e syscalls:sys_enter_futex -e syscalls:sys_exit_futex $@ 2perf record -e syscalls:sys_enter_futex -e syscalls:sys_exit_futex $@
diff --git a/tools/perf/scripts/python/bin/netdev-times-record b/tools/perf/scripts/python/bin/netdev-times-record
index d931a828126..558754b840a 100644
--- a/tools/perf/scripts/python/bin/netdev-times-record
+++ b/tools/perf/scripts/python/bin/netdev-times-record
@@ -1,5 +1,5 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e net:net_dev_xmit -e net:net_dev_queue \ 2perf record -e net:net_dev_xmit -e net:net_dev_queue \
3 -e net:netif_receive_skb -e net:netif_rx \ 3 -e net:netif_receive_skb -e net:netif_rx \
4 -e skb:consume_skb -e skb:kfree_skb \ 4 -e skb:consume_skb -e skb:kfree_skb \
5 -e skb:skb_copy_datagram_iovec -e napi:napi_poll \ 5 -e skb:skb_copy_datagram_iovec -e napi:napi_poll \
diff --git a/tools/perf/scripts/python/bin/sched-migration-record b/tools/perf/scripts/python/bin/sched-migration-record
index 17a3e9bd9e8..7493fddbe99 100644
--- a/tools/perf/scripts/python/bin/sched-migration-record
+++ b/tools/perf/scripts/python/bin/sched-migration-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -m 16384 -a -e sched:sched_wakeup -e sched:sched_wakeup_new -e sched:sched_switch -e sched:sched_migrate_task $@ 2perf record -m 16384 -e sched:sched_wakeup -e sched:sched_wakeup_new -e sched:sched_switch -e sched:sched_migrate_task $@
diff --git a/tools/perf/scripts/python/bin/sctop-record b/tools/perf/scripts/python/bin/sctop-record
index 1fc5998b721..4efbfaa7f6a 100644
--- a/tools/perf/scripts/python/bin/sctop-record
+++ b/tools/perf/scripts/python/bin/sctop-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e raw_syscalls:sys_enter $@ 2perf record -e raw_syscalls:sys_enter $@
diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-record b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
index 1fc5998b721..4efbfaa7f6a 100644
--- a/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
+++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e raw_syscalls:sys_enter $@ 2perf record -e raw_syscalls:sys_enter $@
diff --git a/tools/perf/scripts/python/bin/syscall-counts-record b/tools/perf/scripts/python/bin/syscall-counts-record
index 1fc5998b721..4efbfaa7f6a 100644
--- a/tools/perf/scripts/python/bin/syscall-counts-record
+++ b/tools/perf/scripts/python/bin/syscall-counts-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e raw_syscalls:sys_enter $@ 2perf record -e raw_syscalls:sys_enter $@
diff --git a/tools/perf/util/ui/util.c b/tools/perf/util/ui/util.c
index 9706d9d4027..056c69521a3 100644
--- a/tools/perf/util/ui/util.c
+++ b/tools/perf/util/ui/util.c
@@ -104,9 +104,10 @@ out_destroy_form:
104 return rc; 104 return rc;
105} 105}
106 106
107static const char yes[] = "Yes", no[] = "No";
108
107bool ui__dialog_yesno(const char *msg) 109bool ui__dialog_yesno(const char *msg)
108{ 110{
109 /* newtWinChoice should really be accepting const char pointers... */ 111 /* newtWinChoice should really be accepting const char pointers... */
110 char yes[] = "Yes", no[] = "No"; 112 return newtWinChoice(NULL, (char *)yes, (char *)no, (char *)msg) == 1;
111 return newtWinChoice(NULL, yes, no, (char *)msg) == 1;
112} 113}