aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-11-14 14:57:05 -0500
committerDavid S. Miller <davem@davemloft.net>2010-11-14 14:57:05 -0500
commitc25ecd0a21d5e08160cb5cc984f9e2b8ee347443 (patch)
tree0e4dcacf1bf603f259b8d27445a10e60fa8d00d7
parent190683a9d5457e6d962c232ffbecac3ab158dddd (diff)
parent9457b24a0955bbdd2e89220a75de69fe09501bba (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
-rw-r--r--Documentation/ABI/obsolete/proc-pid-oom_adj22
-rw-r--r--Documentation/block/switching-sched.txt8
-rw-r--r--Documentation/filesystems/xfs-delayed-logging-design.txt11
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Documentation/leds-class.txt21
-rw-r--r--Documentation/leds/leds-lp5521.txt88
-rw-r--r--Documentation/leds/leds-lp5523.txt83
-rw-r--r--Documentation/networking/ip-sysctl.txt9
-rw-r--r--Documentation/rbtree.txt4
-rw-r--r--Documentation/sysctl/kernel.txt14
-rw-r--r--MAINTAINERS14
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/common/gic.c28
-rw-r--r--arch/arm/include/asm/hardware/it8152.h2
-rw-r--r--arch/arm/kernel/hw_breakpoint.c3
-rw-r--r--arch/arm/kernel/perf_event.c2
-rw-r--r--arch/arm/kernel/stacktrace.c2
-rw-r--r--arch/arm/kernel/traps.c5
-rw-r--r--arch/arm/kernel/unwind.c2
-rw-r--r--arch/arm/mach-ep93xx/include/mach/dma.h111
-rw-r--r--arch/arm/mach-kirkwood/common.c7
-rw-r--r--arch/arm/mach-kirkwood/d2net_v2-setup.c2
-rw-r--r--arch/arm/mach-kirkwood/lacie_v2-common.c14
-rw-r--r--arch/arm/mach-kirkwood/lacie_v2-common.h2
-rw-r--r--arch/arm/mach-kirkwood/mpp.c4
-rw-r--r--arch/arm/mach-kirkwood/netspace_v2-setup.c6
-rw-r--r--arch/arm/mach-kirkwood/netxbig_v2-setup.c4
-rw-r--r--arch/arm/mach-kirkwood/ts41x-setup.c14
-rw-r--r--arch/arm/mach-mmp/include/mach/cputype.h3
-rw-r--r--arch/arm/mach-mv78xx0/mpp.c4
-rw-r--r--arch/arm/mach-orion5x/mpp.c4
-rw-r--r--arch/arm/mach-orion5x/ts78xx-setup.c2
-rw-r--r--arch/arm/mach-pxa/cm-x2xx.c2
-rw-r--r--arch/arm/mach-pxa/saar.c2
-rw-r--r--arch/arm/mach-vexpress/ct-ca9x4.c2
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/plat-omap/devices.c4
-rw-r--r--arch/arm/plat-orion/include/plat/pcie.h3
-rw-r--r--arch/arm/plat-orion/pcie.c5
-rw-r--r--arch/um/include/asm/ptrace-generic.h4
-rw-r--r--arch/um/kernel/ptrace.c2
-rw-r--r--arch/x86/include/asm/apic.h10
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h189
-rw-r--r--arch/x86/kernel/apic/apic.c1
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c4
-rw-r--r--arch/x86/kernel/microcode_amd.c2
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c7
-rw-r--r--arch/x86/kernel/pvclock.c38
-rw-r--r--arch/x86/mm/tlb.c2
-rw-r--r--arch/x86/pci/xen.c8
-rw-r--r--arch/x86/platform/uv/tlb_uv.c13
-rw-r--r--arch/x86/xen/mmu.c2
-rw-r--r--arch/x86/xen/setup.c18
-rw-r--r--block/blk-core.c11
-rw-r--r--block/blk-ioc.c14
-rw-r--r--block/blk-map.c2
-rw-r--r--block/compat_ioctl.c4
-rw-r--r--block/elevator.c4
-rw-r--r--block/ioctl.c7
-rw-r--r--block/scsi_ioctl.c34
-rw-r--r--crypto/pcrypt.c1
-rw-r--r--drivers/ata/libata-scsi.c5
-rw-r--r--drivers/ata/pata_legacy.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c2
-rw-r--r--drivers/block/aoe/aoeblk.c3
-rw-r--r--drivers/block/cciss.c131
-rw-r--r--drivers/block/cciss.h4
-rw-r--r--drivers/block/drbd/drbd_actlog.c42
-rw-r--r--drivers/block/drbd/drbd_int.h52
-rw-r--r--drivers/block/drbd/drbd_main.c148
-rw-r--r--drivers/block/drbd/drbd_nl.c25
-rw-r--r--drivers/block/drbd/drbd_proc.c1
-rw-r--r--drivers/block/drbd/drbd_receiver.c217
-rw-r--r--drivers/block/drbd/drbd_req.c38
-rw-r--r--drivers/block/drbd/drbd_worker.c23
-rw-r--r--drivers/block/loop.c6
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/bluetooth/btusb.c5
-rw-r--r--drivers/char/agp/intel-gtt.c6
-rw-r--r--drivers/char/amiserial.c1
-rw-r--r--drivers/char/nozomi.c1
-rw-r--r--drivers/char/pcmcia/synclink_cs.c1
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c26
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c118
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c8
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c70
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c2
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c16
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c129
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h3
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c8
-rw-r--r--drivers/gpu/drm/radeon/r100.c4
-rw-r--r--drivers/gpu/drm/radeon/r300.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c41
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/rs400.c2
-rw-r--r--drivers/gpu/drm/radeon/rs600.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c86
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c81
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/stub/Kconfig3
-rw-r--r--drivers/hwmon/ad7414.c6
-rw-r--r--drivers/hwmon/adt7470.c4
-rw-r--r--drivers/hwmon/gpio-fan.c8
-rw-r--r--drivers/input/input.c87
-rw-r--r--drivers/input/keyboard/adp5588-keys.c74
-rw-r--r--drivers/input/keyboard/atkbd.c12
-rw-r--r--drivers/input/misc/pcf8574_keypad.c23
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h11
-rw-r--r--drivers/input/tablet/acecad.c3
-rw-r--r--drivers/leds/Kconfig20
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/led-class.c105
-rw-r--r--drivers/leds/led-triggers.c2
-rw-r--r--drivers/leds/leds-gpio.c2
-rw-r--r--drivers/leds/leds-lp5521.c821
-rw-r--r--drivers/leds/leds-lp5523.c1065
-rw-r--r--drivers/leds/ledtrig-timer.c124
-rw-r--r--drivers/macintosh/adb-iop.c4
-rw-r--r--drivers/md/md.c20
-rw-r--r--drivers/misc/apds9802als.c2
-rw-r--r--drivers/misc/bh1770glc.c8
-rw-r--r--drivers/misc/isl29020.c4
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c42
-rw-r--r--drivers/net/cxgb4vf/sge.c122
-rw-r--r--drivers/net/cxgb4vf/t4vf_common.h1
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c19
-rw-r--r--drivers/net/gianfar_ethtool.c5
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c60
-rw-r--r--drivers/net/pcmcia/axnet_cs.c30
-rw-r--r--drivers/net/r8169.c9
-rw-r--r--drivers/net/ucc_geth.c25
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h1
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c3
-rw-r--r--drivers/net/wireless/libertas/cfg.c5
-rw-r--r--drivers/net/wireless/libertas/dev.h1
-rw-r--r--drivers/net/wireless/libertas/main.c7
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig3
-rw-r--r--drivers/pci/xen-pcifront.c6
-rw-r--r--drivers/rapidio/rio.c4
-rw-r--r--drivers/scsi/scsi_error.c18
-rw-r--r--drivers/serial/8250.c5
-rw-r--r--drivers/serial/8250_pci.c5
-rw-r--r--drivers/serial/bfin_5xx.c31
-rw-r--r--drivers/serial/kgdboc.c59
-rw-r--r--drivers/staging/ath6kl/Kconfig2
-rw-r--r--drivers/staging/ath6kl/os/linux/ar6000_drv.c5
-rw-r--r--drivers/staging/ath6kl/os/linux/cfg80211.c7
-rw-r--r--drivers/staging/batman-adv/hard-interface.c15
-rw-r--r--drivers/staging/batman-adv/routing.c12
-rw-r--r--drivers/staging/batman-adv/routing.h4
-rw-r--r--drivers/staging/batman-adv/unicast.c2
-rw-r--r--drivers/staging/bcm/Bcmchar.c49
-rw-r--r--drivers/staging/brcm80211/README2
-rw-r--r--drivers/staging/brcm80211/TODO2
-rw-r--r--drivers/staging/brcm80211/brcmfmac/dhd_linux.c2
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c12
-rw-r--r--drivers/staging/cpia/cpia.c6
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c1
-rw-r--r--drivers/staging/hv/hv_utils.c3
-rw-r--r--drivers/staging/intel_sst/intel_sst_app_interface.c284
-rw-r--r--drivers/staging/intel_sst/intel_sst_common.h4
-rw-r--r--drivers/staging/keucr/init.c18
-rw-r--r--drivers/staging/keucr/ms.c14
-rw-r--r--drivers/staging/keucr/msscsi.c6
-rw-r--r--drivers/staging/keucr/sdscsi.c4
-rw-r--r--drivers/staging/keucr/smilsub.c18
-rw-r--r--drivers/staging/keucr/transport.c2
-rw-r--r--drivers/staging/rt2860/common/cmm_aes.c2
-rw-r--r--drivers/staging/rt2860/usb_main_dev.c1
-rw-r--r--drivers/staging/rtl8192e/r8192E_core.c3
-rw-r--r--drivers/staging/stradis/stradis.c11
-rw-r--r--drivers/staging/tidspbridge/Kconfig1
-rw-r--r--drivers/staging/tidspbridge/Makefile7
-rw-r--r--drivers/staging/tidspbridge/core/_deh.h5
-rw-r--r--drivers/staging/tidspbridge/core/_tiomap.h19
-rw-r--r--drivers/staging/tidspbridge/core/dsp-mmu.c317
-rw-r--r--drivers/staging/tidspbridge/core/io_sm.c180
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c1083
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430_pwr.c4
-rw-r--r--drivers/staging/tidspbridge/core/tiomap_io.c17
-rw-r--r--drivers/staging/tidspbridge/core/ue_deh.c115
-rw-r--r--drivers/staging/tidspbridge/hw/EasiGlobal.h41
-rw-r--r--drivers/staging/tidspbridge/hw/MMUAccInt.h76
-rw-r--r--drivers/staging/tidspbridge/hw/MMURegAcM.h225
-rw-r--r--drivers/staging/tidspbridge/hw/hw_defs.h58
-rw-r--r--drivers/staging/tidspbridge/hw/hw_mmu.c562
-rw-r--r--drivers/staging/tidspbridge/hw/hw_mmu.h163
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h1
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dev.h24
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dmm.h75
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/drv.h10
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h67
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspdefs.h44
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/dspioctl.h7
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/proc.h46
-rw-r--r--drivers/staging/tidspbridge/pmgr/dev.c63
-rw-r--r--drivers/staging/tidspbridge/pmgr/dmm.c533
-rw-r--r--drivers/staging/tidspbridge/pmgr/dspapi.c34
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv.c15
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c2
-rw-r--r--drivers/staging/tidspbridge/rmgr/node.c48
-rw-r--r--drivers/staging/tidspbridge/rmgr/proc.c197
-rw-r--r--drivers/staging/udlfb/udlfb.c2
-rw-r--r--drivers/staging/vt6656/main_usb.c3
-rw-r--r--drivers/staging/westbridge/astoria/api/src/cyasusb.c1
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c6
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c2
-rw-r--r--drivers/tty/n_gsm.c5
-rw-r--r--drivers/tty/tty_buffer.c14
-rw-r--r--drivers/tty/tty_ldisc.c49
-rw-r--r--drivers/tty/vt/vc_screen.c6
-rw-r--r--drivers/usb/core/devio.c7
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/goku_udc.h3
-rw-r--r--drivers/usb/gadget/u_serial.c54
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/ehci-mxc.c14
-rw-r--r--drivers/usb/host/ohci-jz4740.c2
-rw-r--r--drivers/usb/misc/iowarrior.c1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1
-rw-r--r--drivers/usb/musb/blackfin.c80
-rw-r--r--drivers/usb/musb/musb_core.c41
-rw-r--r--drivers/usb/musb/musb_core.h2
-rw-r--r--drivers/usb/musb/musb_gadget.c41
-rw-r--r--drivers/usb/musb/musb_regs.h3
-rw-r--r--drivers/usb/musb/musbhsdma.c14
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/storage/uas.c5
-rw-r--r--drivers/uwb/allocator.c3
-rw-r--r--drivers/video/backlight/adp8860_bl.c8
-rw-r--r--drivers/video/backlight/l4f00242t03.c2
-rw-r--r--drivers/video/backlight/lms283gf05.c2
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c18
-rw-r--r--drivers/video/backlight/pwm_bl.c7
-rw-r--r--drivers/video/backlight/s6e63m0.c7
-rw-r--r--drivers/xen/events.c25
-rw-r--r--fs/bio.c23
-rw-r--r--fs/cifs/inode.c1
-rw-r--r--fs/cifs/ioctl.c12
-rw-r--r--fs/hugetlbfs/inode.c3
-rw-r--r--fs/ioprio.c18
-rw-r--r--fs/locks.c19
-rw-r--r--fs/nfsd/nfs4state.c16
-rw-r--r--fs/openpromfs/inode.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c7
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c1
-rw-r--r--fs/xfs/xfs_filestream.c8
-rw-r--r--fs/xfs/xfs_mount.c1
-rw-r--r--fs/xfs/xfs_quota.h20
-rw-r--r--include/drm/ttm/ttm_bo_api.h4
-rw-r--r--include/drm/ttm/ttm_bo_driver.h79
-rw-r--r--include/linux/atomic.h37
-rw-r--r--include/linux/bio.h4
-rw-r--r--include/linux/blk_types.h6
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/drbd.h2
-rw-r--r--include/linux/highmem.h1
-rw-r--r--include/linux/i2c/adp5588.h15
-rw-r--r--include/linux/if_vlan.h25
-rw-r--r--include/linux/input.h4
-rw-r--r--include/linux/iocontext.h1
-rw-r--r--include/linux/kernel.h3
-rw-r--r--include/linux/leds-lp5521.h47
-rw-r--r--include/linux/leds-lp5523.h47
-rw-r--r--include/linux/leds.h47
-rw-r--r--include/linux/netfilter.h2
-rw-r--r--include/linux/perf_event.h10
-rw-r--r--include/linux/pwm_backlight.h1
-rw-r--r--include/linux/radix-tree.h39
-rw-r--r--include/linux/resource.h1
-rw-r--r--include/linux/sunrpc/svc_xprt.h18
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/musb.h2
-rw-r--r--include/net/dn.h2
-rw-r--r--include/net/sock.h4
-rw-r--r--include/net/tcp.h6
-rw-r--r--include/net/udp.h4
-rw-r--r--kernel/latencytop.c17
-rw-r--r--kernel/perf_event.c42
-rw-r--r--kernel/printk.c6
-rw-r--r--kernel/range.c2
-rw-r--r--kernel/sysctl.c9
-rw-r--r--kernel/trace/blktrace.c4
-rw-r--r--lib/radix-tree.c83
-rw-r--r--mm/filemap.c29
-rw-r--r--mm/memcontrol.c16
-rw-r--r--mm/mprotect.c2
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/bluetooth/hci_event.c6
-rw-r--r--net/bluetooth/hidp/Kconfig2
-rw-r--r--net/bluetooth/l2cap.c8
-rw-r--r--net/bluetooth/rfcomm/core.c13
-rw-r--r--net/can/bcm.c2
-rw-r--r--net/core/dst.c1
-rw-r--r--net/core/filter.c64
-rw-r--r--net/core/rtnetlink.c9
-rw-r--r--net/core/sock.c14
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/decnet/sysctl_net_decnet.c4
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/proc.c8
-rw-r--r--net/ipv4/sysctl_net_ipv4.c5
-rw-r--r--net/ipv4/tcp.c6
-rw-r--r--net/ipv4/tcp_input.c11
-rw-r--r--net/ipv4/tcp_ipv4.c8
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/addrconf.c24
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/route.c6
-rw-r--r--net/mac80211/iface.c6
-rw-r--r--net/packet/af_packet.c7
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/sctp/sysctl.c4
-rw-r--r--net/tipc/socket.c1
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--net/x25/x25_facilities.c12
-rw-r--r--security/Kconfig12
-rw-r--r--security/apparmor/lsm.c6
-rw-r--r--security/apparmor/policy.c2
-rw-r--r--security/commoncap.c2
-rw-r--r--tools/perf/Documentation/perf-trace.txt57
-rw-r--r--tools/perf/builtin-record.c10
-rw-r--r--tools/perf/builtin-top.c12
-rw-r--r--tools/perf/builtin-trace.c209
-rw-r--r--tools/perf/scripts/perl/bin/failed-syscalls-record2
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-file-record2
-rw-r--r--tools/perf/scripts/perl/bin/rw-by-pid-record2
-rw-r--r--tools/perf/scripts/perl/bin/rwtop-record2
-rw-r--r--tools/perf/scripts/perl/bin/wakeup-latency-record2
-rw-r--r--tools/perf/scripts/perl/bin/workqueue-stats-record2
-rw-r--r--tools/perf/scripts/python/bin/failed-syscalls-by-pid-record2
-rw-r--r--tools/perf/scripts/python/bin/futex-contention-record2
-rw-r--r--tools/perf/scripts/python/bin/netdev-times-record2
-rw-r--r--tools/perf/scripts/python/bin/sched-migration-record2
-rw-r--r--tools/perf/scripts/python/bin/sctop-record2
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-by-pid-record2
-rw-r--r--tools/perf/scripts/python/bin/syscall-counts-record2
-rw-r--r--tools/perf/util/ui/util.c5
380 files changed, 8964 insertions, 2730 deletions
diff --git a/Documentation/ABI/obsolete/proc-pid-oom_adj b/Documentation/ABI/obsolete/proc-pid-oom_adj
new file mode 100644
index 000000000000..cf63f264ce0f
--- /dev/null
+++ b/Documentation/ABI/obsolete/proc-pid-oom_adj
@@ -0,0 +1,22 @@
1What: /proc/<pid>/oom_adj
2When: August 2012
3Why: /proc/<pid>/oom_adj allows userspace to influence the oom killer's
4 badness heuristic used to determine which task to kill when the kernel
5 is out of memory.
6
7 The badness heuristic has since been rewritten since the introduction of
8 this tunable such that its meaning is deprecated. The value was
9 implemented as a bitshift on a score generated by the badness()
10 function that did not have any precise units of measure. With the
11 rewrite, the score is given as a proportion of available memory to the
12 task allocating pages, so using a bitshift which grows the score
13 exponentially is, thus, impossible to tune with fine granularity.
14
15 A much more powerful interface, /proc/<pid>/oom_score_adj, was
16 introduced with the oom killer rewrite that allows users to increase or
17 decrease the badness() score linearly. This interface will replace
18 /proc/<pid>/oom_adj.
19
20 A warning will be emitted to the kernel log if an application uses this
21 deprecated interface. After it is printed once, future warnings will be
22 suppressed until the kernel is rebooted.
diff --git a/Documentation/block/switching-sched.txt b/Documentation/block/switching-sched.txt
index d5af3f630814..71cfbdc0f74d 100644
--- a/Documentation/block/switching-sched.txt
+++ b/Documentation/block/switching-sched.txt
@@ -16,7 +16,7 @@ you can do so by typing:
16As of the Linux 2.6.10 kernel, it is now possible to change the 16As of the Linux 2.6.10 kernel, it is now possible to change the
17IO scheduler for a given block device on the fly (thus making it possible, 17IO scheduler for a given block device on the fly (thus making it possible,
18for instance, to set the CFQ scheduler for the system default, but 18for instance, to set the CFQ scheduler for the system default, but
19set a specific device to use the anticipatory or noop schedulers - which 19set a specific device to use the deadline or noop schedulers - which
20can improve that device's throughput). 20can improve that device's throughput).
21 21
22To set a specific scheduler, simply do this: 22To set a specific scheduler, simply do this:
@@ -31,7 +31,7 @@ a "cat /sys/block/DEV/queue/scheduler" - the list of valid names
31will be displayed, with the currently selected scheduler in brackets: 31will be displayed, with the currently selected scheduler in brackets:
32 32
33# cat /sys/block/hda/queue/scheduler 33# cat /sys/block/hda/queue/scheduler
34noop anticipatory deadline [cfq] 34noop deadline [cfq]
35# echo anticipatory > /sys/block/hda/queue/scheduler 35# echo deadline > /sys/block/hda/queue/scheduler
36# cat /sys/block/hda/queue/scheduler 36# cat /sys/block/hda/queue/scheduler
37noop [anticipatory] deadline cfq 37noop [deadline] cfq
diff --git a/Documentation/filesystems/xfs-delayed-logging-design.txt b/Documentation/filesystems/xfs-delayed-logging-design.txt
index 96d0df28bed3..7445bf335dae 100644
--- a/Documentation/filesystems/xfs-delayed-logging-design.txt
+++ b/Documentation/filesystems/xfs-delayed-logging-design.txt
@@ -794,17 +794,6 @@ designed.
794 794
795Roadmap: 795Roadmap:
796 796
7972.6.37 Remove experimental tag from mount option
798 => should be roughly 6 months after initial merge
799 => enough time to:
800 => gain confidence and fix problems reported by early
801 adopters (a.k.a. guinea pigs)
802 => address worst performance regressions and undesired
803 behaviours
804 => start tuning/optimising code for parallelism
805 => start tuning/optimising algorithms consuming
806 excessive CPU time
807
8082.6.39 Switch default mount option to use delayed logging 7972.6.39 Switch default mount option to use delayed logging
809 => should be roughly 12 months after initial merge 798 => should be roughly 12 months after initial merge
810 => enough time to shake out remaining problems before next round of 799 => enough time to shake out remaining problems before next round of
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index ed45e9802aa8..92e83e53148f 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -706,7 +706,7 @@ and is between 256 and 4096 characters. It is defined in the file
706 arch/x86/kernel/cpu/cpufreq/elanfreq.c. 706 arch/x86/kernel/cpu/cpufreq/elanfreq.c.
707 707
708 elevator= [IOSCHED] 708 elevator= [IOSCHED]
709 Format: {"anticipatory" | "cfq" | "deadline" | "noop"} 709 Format: {"cfq" | "deadline" | "noop"}
710 See Documentation/block/as-iosched.txt and 710 See Documentation/block/as-iosched.txt and
711 Documentation/block/deadline-iosched.txt for details. 711 Documentation/block/deadline-iosched.txt for details.
712 712
diff --git a/Documentation/leds-class.txt b/Documentation/leds-class.txt
index 8fd5ca2ae32d..58b266bd1846 100644
--- a/Documentation/leds-class.txt
+++ b/Documentation/leds-class.txt
@@ -60,15 +60,18 @@ Hardware accelerated blink of LEDs
60 60
61Some LEDs can be programmed to blink without any CPU interaction. To 61Some LEDs can be programmed to blink without any CPU interaction. To
62support this feature, a LED driver can optionally implement the 62support this feature, a LED driver can optionally implement the
63blink_set() function (see <linux/leds.h>). If implemented, triggers can 63blink_set() function (see <linux/leds.h>). To set an LED to blinking,
64attempt to use it before falling back to software timers. The blink_set() 64however, it is better to use use the API function led_blink_set(),
65function should return 0 if the blink setting is supported, or -EINVAL 65as it will check and implement software fallback if necessary.
66otherwise, which means that LED blinking will be handled by software. 66
67 67To turn off blinking again, use the API function led_brightness_set()
68The blink_set() function should choose a user friendly blinking 68as that will not just set the LED brightness but also stop any software
69value if it is called with *delay_on==0 && *delay_off==0 parameters. In 69timers that may have been required for blinking.
70this case the driver should give back the chosen value through delay_on 70
71and delay_off parameters to the leds subsystem. 71The blink_set() function should choose a user friendly blinking value
72if it is called with *delay_on==0 && *delay_off==0 parameters. In this
73case the driver should give back the chosen value through delay_on and
74delay_off parameters to the leds subsystem.
72 75
73Setting the brightness to zero with brightness_set() callback function 76Setting the brightness to zero with brightness_set() callback function
74should completely turn off the LED and cancel the previously programmed 77should completely turn off the LED and cancel the previously programmed
diff --git a/Documentation/leds/leds-lp5521.txt b/Documentation/leds/leds-lp5521.txt
new file mode 100644
index 000000000000..c4d8d151e0fe
--- /dev/null
+++ b/Documentation/leds/leds-lp5521.txt
@@ -0,0 +1,88 @@
1Kernel driver for lp5521
2========================
3
4* National Semiconductor LP5521 led driver chip
5* Datasheet: http://www.national.com/pf/LP/LP5521.html
6
7Authors: Mathias Nyman, Yuri Zaporozhets, Samu Onkalo
8Contact: Samu Onkalo (samu.p.onkalo-at-nokia.com)
9
10Description
11-----------
12
13LP5521 can drive up to 3 channels. Leds can be controlled directly via
14the led class control interface. Channels have generic names:
15lp5521:channelx, where x is 0 .. 2
16
17All three channels can be also controlled using the engine micro programs.
18More details of the instructions can be found from the public data sheet.
19
20Control interface for the engines:
21x is 1 .. 3
22enginex_mode : disabled, load, run
23enginex_load : store program (visible only in engine load mode)
24
25Example (start to blink the channel 2 led):
26cd /sys/class/leds/lp5521:channel2/device
27echo "load" > engine3_mode
28echo "037f4d0003ff6000" > engine3_load
29echo "run" > engine3_mode
30
31stop the engine:
32echo "disabled" > engine3_mode
33
34sysfs contains a selftest entry.
35The test communicates with the chip and checks that
36the clock mode is automatically set to the requested one.
37
38Each channel has its own led current settings.
39/sys/class/leds/lp5521:channel0/led_current - RW
40/sys/class/leds/lp5521:channel0/max_current - RO
41Format: 10x mA i.e 10 means 1.0 mA
42
43example platform data:
44
45Note: chan_nr can have values between 0 and 2.
46
47static struct lp5521_led_config lp5521_led_config[] = {
48 {
49 .chan_nr = 0,
50 .led_current = 50,
51 .max_current = 130,
52 }, {
53 .chan_nr = 1,
54 .led_current = 0,
55 .max_current = 130,
56 }, {
57 .chan_nr = 2,
58 .led_current = 0,
59 .max_current = 130,
60 }
61};
62
63static int lp5521_setup(void)
64{
65 /* setup HW resources */
66}
67
68static void lp5521_release(void)
69{
70 /* Release HW resources */
71}
72
73static void lp5521_enable(bool state)
74{
75 /* Control of chip enable signal */
76}
77
78static struct lp5521_platform_data lp5521_platform_data = {
79 .led_config = lp5521_led_config,
80 .num_channels = ARRAY_SIZE(lp5521_led_config),
81 .clock_mode = LP5521_CLOCK_EXT,
82 .setup_resources = lp5521_setup,
83 .release_resources = lp5521_release,
84 .enable = lp5521_enable,
85};
86
87If the current is set to 0 in the platform data, that channel is
88disabled and it is not visible in the sysfs.
diff --git a/Documentation/leds/leds-lp5523.txt b/Documentation/leds/leds-lp5523.txt
new file mode 100644
index 000000000000..fad2feb8b7ce
--- /dev/null
+++ b/Documentation/leds/leds-lp5523.txt
@@ -0,0 +1,83 @@
1Kernel driver for lp5523
2========================
3
4* National Semiconductor LP5523 led driver chip
5* Datasheet: http://www.national.com/pf/LP/LP5523.html
6
7Authors: Mathias Nyman, Yuri Zaporozhets, Samu Onkalo
8Contact: Samu Onkalo (samu.p.onkalo-at-nokia.com)
9
10Description
11-----------
12LP5523 can drive up to 9 channels. Leds can be controlled directly via
13the led class control interface. Channels have generic names:
14lp5523:channelx where x is 0...8
15
16The chip provides 3 engines. Each engine can control channels without
17interaction from the main CPU. Details of the micro engine code can be found
18from the public data sheet. Leds can be muxed to different channels.
19
20Control interface for the engines:
21x is 1 .. 3
22enginex_mode : disabled, load, run
23enginex_load : microcode load (visible only in load mode)
24enginex_leds : led mux control (visible only in load mode)
25
26cd /sys/class/leds/lp5523:channel2/device
27echo "load" > engine3_mode
28echo "9d80400004ff05ff437f0000" > engine3_load
29echo "111111111" > engine3_leds
30echo "run" > engine3_mode
31
32sysfs contains a selftest entry. It measures each channel
33voltage level and checks if it looks reasonable. If the level is too high,
34the led is missing; if the level is too low, there is a short circuit.
35
36Selftest uses always the current from the platform data.
37
38Each channel contains led current settings.
39/sys/class/leds/lp5523:channel2/led_current - RW
40/sys/class/leds/lp5523:channel2/max_current - RO
41Format: 10x mA i.e 10 means 1.0 mA
42
43Example platform data:
44
45Note - chan_nr can have values between 0 and 8.
46
47static struct lp5523_led_config lp5523_led_config[] = {
48 {
49 .chan_nr = 0,
50 .led_current = 50,
51 .max_current = 130,
52 },
53...
54 }, {
55 .chan_nr = 8,
56 .led_current = 50,
57 .max_current = 130,
58 }
59};
60
61static int lp5523_setup(void)
62{
63 /* Setup HW resources */
64}
65
66static void lp5523_release(void)
67{
68 /* Release HW resources */
69}
70
71static void lp5523_enable(bool state)
72{
73 /* Control chip enable signal */
74}
75
76static struct lp5523_platform_data lp5523_platform_data = {
77 .led_config = lp5523_led_config,
78 .num_channels = ARRAY_SIZE(lp5523_led_config),
79 .clock_mode = LP5523_CLOCK_EXT,
80 .setup_resources = lp5523_setup,
81 .release_resources = lp5523_release,
82 .enable = lp5523_enable,
83};
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index c7165f4cb792..fe95105992c5 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -20,6 +20,15 @@ ip_no_pmtu_disc - BOOLEAN
20min_pmtu - INTEGER 20min_pmtu - INTEGER
21 default 562 - minimum discovered Path MTU 21 default 562 - minimum discovered Path MTU
22 22
23route/max_size - INTEGER
24 Maximum number of routes allowed in the kernel. Increase
25 this when using large numbers of interfaces and/or routes.
26
27neigh/default/gc_thresh3 - INTEGER
28 Maximum number of neighbor entries allowed. Increase this
29 when using large numbers of interfaces and when communicating
30 with large numbers of directly-connected peers.
31
23mtu_expires - INTEGER 32mtu_expires - INTEGER
24 Time, in seconds, that cached PMTU information is kept. 33 Time, in seconds, that cached PMTU information is kept.
25 34
diff --git a/Documentation/rbtree.txt b/Documentation/rbtree.txt
index 221f38be98f4..19f8278c3854 100644
--- a/Documentation/rbtree.txt
+++ b/Documentation/rbtree.txt
@@ -21,8 +21,8 @@ three rotations, respectively, to balance the tree), with slightly slower
21To quote Linux Weekly News: 21To quote Linux Weekly News:
22 22
23 There are a number of red-black trees in use in the kernel. 23 There are a number of red-black trees in use in the kernel.
24 The anticipatory, deadline, and CFQ I/O schedulers all employ 24 The deadline and CFQ I/O schedulers employ rbtrees to
25 rbtrees to track requests; the packet CD/DVD driver does the same. 25 track requests; the packet CD/DVD driver does the same.
26 The high-resolution timer code uses an rbtree to organize outstanding 26 The high-resolution timer code uses an rbtree to organize outstanding
27 timer requests. The ext3 filesystem tracks directory entries in a 27 timer requests. The ext3 filesystem tracks directory entries in a
28 red-black tree. Virtual memory areas (VMAs) are tracked with red-black 28 red-black tree. Virtual memory areas (VMAs) are tracked with red-black
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 3894eaa23486..209e1584c3dc 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -28,6 +28,7 @@ show up in /proc/sys/kernel:
28- core_uses_pid 28- core_uses_pid
29- ctrl-alt-del 29- ctrl-alt-del
30- dentry-state 30- dentry-state
31- dmesg_restrict
31- domainname 32- domainname
32- hostname 33- hostname
33- hotplug 34- hotplug
@@ -213,6 +214,19 @@ to decide what to do with it.
213 214
214============================================================== 215==============================================================
215 216
217dmesg_restrict:
218
219This toggle indicates whether unprivileged users are prevented from using
220dmesg(8) to view messages from the kernel's log buffer. When
221dmesg_restrict is set to (0) there are no restrictions. When
222dmesg_restrict is set set to (1), users must have CAP_SYS_ADMIN to use
223dmesg(8).
224
225The kernel config option CONFIG_SECURITY_DMESG_RESTRICT sets the default
226value of dmesg_restrict.
227
228==============================================================
229
216domainname & hostname: 230domainname & hostname:
217 231
218These files can be used to set the NIS/YP domainname and the 232These files can be used to set the NIS/YP domainname and the
diff --git a/MAINTAINERS b/MAINTAINERS
index 0094224ca79b..88b74a75d932 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -161,7 +161,7 @@ M: Greg Kroah-Hartman <gregkh@suse.de>
161L: linux-serial@vger.kernel.org 161L: linux-serial@vger.kernel.org
162W: http://serial.sourceforge.net 162W: http://serial.sourceforge.net
163S: Maintained 163S: Maintained
164T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ 164T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
165F: drivers/serial/8250* 165F: drivers/serial/8250*
166F: include/linux/serial_8250.h 166F: include/linux/serial_8250.h
167 167
@@ -5676,7 +5676,7 @@ S: Maintained
5676 5676
5677STAGING SUBSYSTEM 5677STAGING SUBSYSTEM
5678M: Greg Kroah-Hartman <gregkh@suse.de> 5678M: Greg Kroah-Hartman <gregkh@suse.de>
5679T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-next-2.6.git 5679T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6.git
5680L: devel@driverdev.osuosl.org 5680L: devel@driverdev.osuosl.org
5681S: Maintained 5681S: Maintained
5682F: drivers/staging/ 5682F: drivers/staging/
@@ -5910,7 +5910,7 @@ S: Maintained
5910TTY LAYER 5910TTY LAYER
5911M: Greg Kroah-Hartman <gregkh@suse.de> 5911M: Greg Kroah-Hartman <gregkh@suse.de>
5912S: Maintained 5912S: Maintained
5913T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ 5913T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
5914F: drivers/char/tty_* 5914F: drivers/char/tty_*
5915F: drivers/serial/serial_core.c 5915F: drivers/serial/serial_core.c
5916F: include/linux/serial_core.h 5916F: include/linux/serial_core.h
@@ -6233,7 +6233,7 @@ USB SUBSYSTEM
6233M: Greg Kroah-Hartman <gregkh@suse.de> 6233M: Greg Kroah-Hartman <gregkh@suse.de>
6234L: linux-usb@vger.kernel.org 6234L: linux-usb@vger.kernel.org
6235W: http://www.linux-usb.org 6235W: http://www.linux-usb.org
6236T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ 6236T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6.git
6237S: Supported 6237S: Supported
6238F: Documentation/usb/ 6238F: Documentation/usb/
6239F: drivers/net/usb/ 6239F: drivers/net/usb/
@@ -6598,14 +6598,14 @@ F: drivers/platform/x86
6598 6598
6599XEN PCI SUBSYSTEM 6599XEN PCI SUBSYSTEM
6600M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 6600M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
6601L: xen-devel@lists.xensource.com 6601L: xen-devel@lists.xensource.com (moderated for non-subscribers)
6602S: Supported 6602S: Supported
6603F: arch/x86/pci/*xen* 6603F: arch/x86/pci/*xen*
6604F: drivers/pci/*xen* 6604F: drivers/pci/*xen*
6605 6605
6606XEN SWIOTLB SUBSYSTEM 6606XEN SWIOTLB SUBSYSTEM
6607M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 6607M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
6608L: xen-devel@lists.xensource.com 6608L: xen-devel@lists.xensource.com (moderated for non-subscribers)
6609S: Supported 6609S: Supported
6610F: arch/x86/xen/*swiotlb* 6610F: arch/x86/xen/*swiotlb*
6611F: drivers/xen/*swiotlb* 6611F: drivers/xen/*swiotlb*
@@ -6613,7 +6613,7 @@ F: drivers/xen/*swiotlb*
6613XEN HYPERVISOR INTERFACE 6613XEN HYPERVISOR INTERFACE
6614M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> 6614M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
6615M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 6615M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
6616L: xen-devel@lists.xen.org 6616L: xen-devel@lists.xensource.com (moderated for non-subscribers)
6617L: virtualization@lists.osdl.org 6617L: virtualization@lists.osdl.org
6618S: Supported 6618S: Supported
6619F: arch/x86/xen/ 6619F: arch/x86/xen/
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a19a5266d5fc..8ae3d48d504c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -6,7 +6,7 @@ config ARM
6 select HAVE_MEMBLOCK 6 select HAVE_MEMBLOCK
7 select RTC_LIB 7 select RTC_LIB
8 select SYS_SUPPORTS_APM_EMULATION 8 select SYS_SUPPORTS_APM_EMULATION
9 select GENERIC_ATOMIC64 if (!CPU_32v6K) 9 select GENERIC_ATOMIC64 if (!CPU_32v6K || !AEABI)
10 select HAVE_OPROFILE if (HAVE_PERF_EVENTS) 10 select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
11 select HAVE_ARCH_KGDB 11 select HAVE_ARCH_KGDB
12 select HAVE_KPROBES if (!XIP_KERNEL) 12 select HAVE_KPROBES if (!XIP_KERNEL)
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index ada6359160eb..772f95f1aecd 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -251,15 +251,16 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
251 writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); 251 writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
252 252
253 /* 253 /*
254 * Set priority on all interrupts. 254 * Set priority on all global interrupts.
255 */ 255 */
256 for (i = 0; i < max_irq; i += 4) 256 for (i = 32; i < max_irq; i += 4)
257 writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); 257 writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
258 258
259 /* 259 /*
260 * Disable all interrupts. 260 * Disable all interrupts. Leave the PPI and SGIs alone
261 * as these enables are banked registers.
261 */ 262 */
262 for (i = 0; i < max_irq; i += 32) 263 for (i = 32; i < max_irq; i += 32)
263 writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); 264 writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
264 265
265 /* 266 /*
@@ -277,11 +278,30 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
277 278
278void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base) 279void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
279{ 280{
281 void __iomem *dist_base;
282 int i;
283
280 if (gic_nr >= MAX_GIC_NR) 284 if (gic_nr >= MAX_GIC_NR)
281 BUG(); 285 BUG();
282 286
287 dist_base = gic_data[gic_nr].dist_base;
288 BUG_ON(!dist_base);
289
283 gic_data[gic_nr].cpu_base = base; 290 gic_data[gic_nr].cpu_base = base;
284 291
292 /*
293 * Deal with the banked PPI and SGI interrupts - disable all
294 * PPI interrupts, ensure all SGI interrupts are enabled.
295 */
296 writel(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
297 writel(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
298
299 /*
300 * Set priority on PPI and SGI interrupts
301 */
302 for (i = 0; i < 32; i += 4)
303 writel(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
304
285 writel(0xf0, base + GIC_CPU_PRIMASK); 305 writel(0xf0, base + GIC_CPU_PRIMASK);
286 writel(1, base + GIC_CPU_CTRL); 306 writel(1, base + GIC_CPU_CTRL);
287} 307}
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
index 6700c7fc7ebd..21fa272301f8 100644
--- a/arch/arm/include/asm/hardware/it8152.h
+++ b/arch/arm/include/asm/hardware/it8152.h
@@ -75,7 +75,7 @@ extern unsigned long it8152_base_address;
75 IT8152_PD_IRQ(1) USB (USBR) 75 IT8152_PD_IRQ(1) USB (USBR)
76 IT8152_PD_IRQ(0) Audio controller (ACR) 76 IT8152_PD_IRQ(0) Audio controller (ACR)
77 */ 77 */
78#define IT8152_IRQ(x) (IRQ_BOARD_END + (x)) 78#define IT8152_IRQ(x) (IRQ_BOARD_START + (x))
79 79
80/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */ 80/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
81#define IT8152_LD_IRQ_COUNT 9 81#define IT8152_LD_IRQ_COUNT 9
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 54593b0c241b..21e3a4ab3b8c 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -748,8 +748,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
748 breakpoint_handler(addr, regs); 748 breakpoint_handler(addr, regs);
749 break; 749 break;
750 case ARM_ENTRY_ASYNC_WATCHPOINT: 750 case ARM_ENTRY_ASYNC_WATCHPOINT:
751 WARN_ON("Asynchronous watchpoint exception taken. " 751 WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
752 "Debugging results may be unreliable");
753 case ARM_ENTRY_SYNC_WATCHPOINT: 752 case ARM_ENTRY_SYNC_WATCHPOINT:
754 watchpoint_handler(addr, regs); 753 watchpoint_handler(addr, regs);
755 break; 754 break;
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 49643b1467e6..07a50357492a 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -1749,7 +1749,7 @@ static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
1749static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc, 1749static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
1750 enum armv7_counters counter) 1750 enum armv7_counters counter)
1751{ 1751{
1752 int ret; 1752 int ret = 0;
1753 1753
1754 if (counter == ARMV7_CYCLE_COUNTER) 1754 if (counter == ARMV7_CYCLE_COUNTER)
1755 ret = pmnc & ARMV7_FLAG_C; 1755 ret = pmnc & ARMV7_FLAG_C;
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 20b7411e47fd..c2e112e1a05f 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -28,7 +28,7 @@ int notrace unwind_frame(struct stackframe *frame)
28 28
29 /* only go to a higher address on the stack */ 29 /* only go to a higher address on the stack */
30 low = frame->sp; 30 low = frame->sp;
31 high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE; 31 high = ALIGN(low, THREAD_SIZE);
32 32
33 /* check current frame pointer is within bounds */ 33 /* check current frame pointer is within bounds */
34 if (fp < (low + 12) || fp + 4 >= high) 34 if (fp < (low + 12) || fp + 4 >= high)
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index cda78d59aa31..446aee97436f 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -53,10 +53,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
53void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) 53void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
54{ 54{
55#ifdef CONFIG_KALLSYMS 55#ifdef CONFIG_KALLSYMS
56 char sym1[KSYM_SYMBOL_LEN], sym2[KSYM_SYMBOL_LEN]; 56 printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
57 sprint_symbol(sym1, where);
58 sprint_symbol(sym2, from);
59 printk("[<%08lx>] (%s) from [<%08lx>] (%s)\n", where, sym1, from, sym2);
60#else 57#else
61 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); 58 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
62#endif 59#endif
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 2a161765f6d5..d2cb0b3c9872 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -279,7 +279,7 @@ int unwind_frame(struct stackframe *frame)
279 279
280 /* only go to a higher address on the stack */ 280 /* only go to a higher address on the stack */
281 low = frame->sp; 281 low = frame->sp;
282 high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE; 282 high = ALIGN(low, THREAD_SIZE);
283 283
284 pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__, 284 pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
285 frame->pc, frame->lr, frame->sp); 285 frame->pc, frame->lr, frame->sp);
diff --git a/arch/arm/mach-ep93xx/include/mach/dma.h b/arch/arm/mach-ep93xx/include/mach/dma.h
index 3a5961d3f3b1..5e31b2b25da9 100644
--- a/arch/arm/mach-ep93xx/include/mach/dma.h
+++ b/arch/arm/mach-ep93xx/include/mach/dma.h
@@ -1,5 +1,13 @@
1/* 1/**
2 * arch/arm/mach-ep93xx/include/mach/dma.h 2 * DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine
3 *
4 * The EP93xx DMA M2P subsystem handles DMA transfers between memory and
5 * peripherals. DMA M2P channels are available for audio, UARTs and IrDA.
6 * See chapter 10 of the EP93xx users guide for full details on the DMA M2P
7 * engine.
8 *
9 * See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code.
10 *
3 */ 11 */
4 12
5#ifndef __ASM_ARCH_DMA_H 13#ifndef __ASM_ARCH_DMA_H
@@ -8,12 +16,34 @@
8#include <linux/list.h> 16#include <linux/list.h>
9#include <linux/types.h> 17#include <linux/types.h>
10 18
19/**
20 * struct ep93xx_dma_buffer - Information about a buffer to be transferred
21 * using the DMA M2P engine
22 *
23 * @list: Entry in DMA buffer list
24 * @bus_addr: Physical address of the buffer
25 * @size: Size of the buffer in bytes
26 */
11struct ep93xx_dma_buffer { 27struct ep93xx_dma_buffer {
12 struct list_head list; 28 struct list_head list;
13 u32 bus_addr; 29 u32 bus_addr;
14 u16 size; 30 u16 size;
15}; 31};
16 32
33/**
34 * struct ep93xx_dma_m2p_client - Information about a DMA M2P client
35 *
36 * @name: Unique name for this client
37 * @flags: Client flags
38 * @cookie: User data to pass to callback functions
39 * @buffer_started: Non NULL function to call when a transfer is started.
40 * The arguments are the user data cookie and the DMA
41 * buffer which is starting.
42 * @buffer_finished: Non NULL function to call when a transfer is completed.
43 * The arguments are the user data cookie, the DMA buffer
44 * which has completed, and a boolean flag indicating if
45 * the transfer had an error.
46 */
17struct ep93xx_dma_m2p_client { 47struct ep93xx_dma_m2p_client {
18 char *name; 48 char *name;
19 u8 flags; 49 u8 flags;
@@ -24,10 +54,11 @@ struct ep93xx_dma_m2p_client {
24 struct ep93xx_dma_buffer *buf, 54 struct ep93xx_dma_buffer *buf,
25 int bytes, int error); 55 int bytes, int error);
26 56
27 /* Internal to the DMA code. */ 57 /* private: Internal use only */
28 void *channel; 58 void *channel;
29}; 59};
30 60
61/* DMA M2P ports */
31#define EP93XX_DMA_M2P_PORT_I2S1 0x00 62#define EP93XX_DMA_M2P_PORT_I2S1 0x00
32#define EP93XX_DMA_M2P_PORT_I2S2 0x01 63#define EP93XX_DMA_M2P_PORT_I2S2 0x01
33#define EP93XX_DMA_M2P_PORT_AAC1 0x02 64#define EP93XX_DMA_M2P_PORT_AAC1 0x02
@@ -39,18 +70,80 @@ struct ep93xx_dma_m2p_client {
39#define EP93XX_DMA_M2P_PORT_UART3 0x08 70#define EP93XX_DMA_M2P_PORT_UART3 0x08
40#define EP93XX_DMA_M2P_PORT_IRDA 0x09 71#define EP93XX_DMA_M2P_PORT_IRDA 0x09
41#define EP93XX_DMA_M2P_PORT_MASK 0x0f 72#define EP93XX_DMA_M2P_PORT_MASK 0x0f
42#define EP93XX_DMA_M2P_TX 0x00
43#define EP93XX_DMA_M2P_RX 0x10
44#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20
45#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40
46#define EP93XX_DMA_M2P_ERROR_MASK 0x60
47 73
48int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p); 74/* DMA M2P client flags */
75#define EP93XX_DMA_M2P_TX 0x00 /* Memory to peripheral */
76#define EP93XX_DMA_M2P_RX 0x10 /* Peripheral to memory */
77
78/*
79 * DMA M2P client error handling flags. See the EP93xx users guide
80 * documentation on the DMA M2P CONTROL register for more details
81 */
82#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20 /* Abort on peripheral error */
83#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40 /* Ignore peripheral errors */
84#define EP93XX_DMA_M2P_ERROR_MASK 0x60 /* Mask of error bits */
85
86/**
87 * ep93xx_dma_m2p_client_register - Register a client with the DMA M2P
88 * subsystem
89 *
90 * @m2p: Client information to register
91 * returns 0 on success
92 *
93 * The DMA M2P subsystem allocates a channel and an interrupt line for the DMA
94 * client
95 */
96int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p);
97
98/**
99 * ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P
100 * subsystem
101 *
102 * @m2p: Client to unregister
103 *
104 * Any transfers currently in progress will be completed in hardware, but
105 * ignored in software.
106 */
49void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p); 107void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p);
108
109/**
110 * ep93xx_dma_m2p_submit - Submit a DMA M2P transfer
111 *
112 * @m2p: DMA Client to submit the transfer on
113 * @buf: DMA Buffer to submit
114 *
115 * If the current or next transfer positions are free on the M2P client then
116 * the transfer is started immediately. If not, the transfer is added to the
117 * list of pending transfers. This function must not be called from the
118 * buffer_finished callback for an M2P channel.
119 *
120 */
50void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p, 121void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p,
51 struct ep93xx_dma_buffer *buf); 122 struct ep93xx_dma_buffer *buf);
123
124/**
125 * ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list
126 * for an M2P channel
127 *
128 * @m2p: DMA Client to submit the transfer on
129 * @buf: DMA Buffer to submit
130 *
131 * This function must only be called from the buffer_finished callback for an
132 * M2P channel. It is commonly used to add the next transfer in a chained list
133 * of DMA transfers.
134 */
52void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p, 135void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p,
53 struct ep93xx_dma_buffer *buf); 136 struct ep93xx_dma_buffer *buf);
137
138/**
139 * ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client
140 *
141 * @m2p: DMA client to flush transfers on
142 *
143 * Any transfers currently in progress will be completed in hardware, but
144 * ignored in software.
145 *
146 */
54void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p); 147void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p);
55 148
56#endif /* __ASM_ARCH_DMA_H */ 149#endif /* __ASM_ARCH_DMA_H */
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 51ff23b72d3a..3688123b5ad8 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -854,10 +854,9 @@ int __init kirkwood_find_tclk(void)
854 854
855 kirkwood_pcie_id(&dev, &rev); 855 kirkwood_pcie_id(&dev, &rev);
856 856
857 if ((dev == MV88F6281_DEV_ID && (rev == MV88F6281_REV_A0 || 857 if (dev == MV88F6281_DEV_ID || dev == MV88F6282_DEV_ID)
858 rev == MV88F6281_REV_A1)) || 858 if (((readl(SAMPLE_AT_RESET) >> 21) & 1) == 0)
859 (dev == MV88F6282_DEV_ID)) 859 return 200000000;
860 return 200000000;
861 860
862 return 166666667; 861 return 166666667;
863} 862}
diff --git a/arch/arm/mach-kirkwood/d2net_v2-setup.c b/arch/arm/mach-kirkwood/d2net_v2-setup.c
index 4aa86e4a152c..a31c9499ab36 100644
--- a/arch/arm/mach-kirkwood/d2net_v2-setup.c
+++ b/arch/arm/mach-kirkwood/d2net_v2-setup.c
@@ -225,5 +225,5 @@ MACHINE_START(D2NET_V2, "LaCie d2 Network v2")
225 .init_machine = d2net_v2_init, 225 .init_machine = d2net_v2_init,
226 .map_io = kirkwood_map_io, 226 .map_io = kirkwood_map_io,
227 .init_irq = kirkwood_init_irq, 227 .init_irq = kirkwood_init_irq,
228 .timer = &lacie_v2_timer, 228 .timer = &kirkwood_timer,
229MACHINE_END 229MACHINE_END
diff --git a/arch/arm/mach-kirkwood/lacie_v2-common.c b/arch/arm/mach-kirkwood/lacie_v2-common.c
index d3ea1b6c8a02..285edab776e9 100644
--- a/arch/arm/mach-kirkwood/lacie_v2-common.c
+++ b/arch/arm/mach-kirkwood/lacie_v2-common.c
@@ -111,17 +111,3 @@ void __init lacie_v2_hdd_power_init(int hdd_num)
111 pr_err("Failed to power up HDD%d\n", i + 1); 111 pr_err("Failed to power up HDD%d\n", i + 1);
112 } 112 }
113} 113}
114
115/*****************************************************************************
116 * Timer
117 ****************************************************************************/
118
119static void lacie_v2_timer_init(void)
120{
121 kirkwood_tclk = 166666667;
122 orion_time_init(IRQ_KIRKWOOD_BRIDGE, kirkwood_tclk);
123}
124
125struct sys_timer lacie_v2_timer = {
126 .init = lacie_v2_timer_init,
127};
diff --git a/arch/arm/mach-kirkwood/lacie_v2-common.h b/arch/arm/mach-kirkwood/lacie_v2-common.h
index af521315b87b..fc64f578536e 100644
--- a/arch/arm/mach-kirkwood/lacie_v2-common.h
+++ b/arch/arm/mach-kirkwood/lacie_v2-common.h
@@ -13,6 +13,4 @@ void lacie_v2_register_flash(void);
13void lacie_v2_register_i2c_devices(void); 13void lacie_v2_register_i2c_devices(void);
14void lacie_v2_hdd_power_init(int hdd_num); 14void lacie_v2_hdd_power_init(int hdd_num);
15 15
16extern struct sys_timer lacie_v2_timer;
17
18#endif 16#endif
diff --git a/arch/arm/mach-kirkwood/mpp.c b/arch/arm/mach-kirkwood/mpp.c
index 065187d177c6..27901f702feb 100644
--- a/arch/arm/mach-kirkwood/mpp.c
+++ b/arch/arm/mach-kirkwood/mpp.c
@@ -59,7 +59,7 @@ void __init kirkwood_mpp_conf(unsigned int *mpp_list)
59 } 59 }
60 printk("\n"); 60 printk("\n");
61 61
62 while (*mpp_list) { 62 for ( ; *mpp_list; mpp_list++) {
63 unsigned int num = MPP_NUM(*mpp_list); 63 unsigned int num = MPP_NUM(*mpp_list);
64 unsigned int sel = MPP_SEL(*mpp_list); 64 unsigned int sel = MPP_SEL(*mpp_list);
65 int shift, gpio_mode; 65 int shift, gpio_mode;
@@ -88,8 +88,6 @@ void __init kirkwood_mpp_conf(unsigned int *mpp_list)
88 if (sel != 0) 88 if (sel != 0)
89 gpio_mode = 0; 89 gpio_mode = 0;
90 orion_gpio_set_valid(num, gpio_mode); 90 orion_gpio_set_valid(num, gpio_mode);
91
92 mpp_list++;
93 } 91 }
94 92
95 printk(KERN_DEBUG " final MPP regs:"); 93 printk(KERN_DEBUG " final MPP regs:");
diff --git a/arch/arm/mach-kirkwood/netspace_v2-setup.c b/arch/arm/mach-kirkwood/netspace_v2-setup.c
index 5ea66f1f4178..65ee21fd2f3b 100644
--- a/arch/arm/mach-kirkwood/netspace_v2-setup.c
+++ b/arch/arm/mach-kirkwood/netspace_v2-setup.c
@@ -262,7 +262,7 @@ MACHINE_START(NETSPACE_V2, "LaCie Network Space v2")
262 .init_machine = netspace_v2_init, 262 .init_machine = netspace_v2_init,
263 .map_io = kirkwood_map_io, 263 .map_io = kirkwood_map_io,
264 .init_irq = kirkwood_init_irq, 264 .init_irq = kirkwood_init_irq,
265 .timer = &lacie_v2_timer, 265 .timer = &kirkwood_timer,
266MACHINE_END 266MACHINE_END
267#endif 267#endif
268 268
@@ -272,7 +272,7 @@ MACHINE_START(INETSPACE_V2, "LaCie Internet Space v2")
272 .init_machine = netspace_v2_init, 272 .init_machine = netspace_v2_init,
273 .map_io = kirkwood_map_io, 273 .map_io = kirkwood_map_io,
274 .init_irq = kirkwood_init_irq, 274 .init_irq = kirkwood_init_irq,
275 .timer = &lacie_v2_timer, 275 .timer = &kirkwood_timer,
276MACHINE_END 276MACHINE_END
277#endif 277#endif
278 278
@@ -282,6 +282,6 @@ MACHINE_START(NETSPACE_MAX_V2, "LaCie Network Space Max v2")
282 .init_machine = netspace_v2_init, 282 .init_machine = netspace_v2_init,
283 .map_io = kirkwood_map_io, 283 .map_io = kirkwood_map_io,
284 .init_irq = kirkwood_init_irq, 284 .init_irq = kirkwood_init_irq,
285 .timer = &lacie_v2_timer, 285 .timer = &kirkwood_timer,
286MACHINE_END 286MACHINE_END
287#endif 287#endif
diff --git a/arch/arm/mach-kirkwood/netxbig_v2-setup.c b/arch/arm/mach-kirkwood/netxbig_v2-setup.c
index a1b45d501aef..93afd3c8bfd8 100644
--- a/arch/arm/mach-kirkwood/netxbig_v2-setup.c
+++ b/arch/arm/mach-kirkwood/netxbig_v2-setup.c
@@ -403,7 +403,7 @@ MACHINE_START(NET2BIG_V2, "LaCie 2Big Network v2")
403 .init_machine = netxbig_v2_init, 403 .init_machine = netxbig_v2_init,
404 .map_io = kirkwood_map_io, 404 .map_io = kirkwood_map_io,
405 .init_irq = kirkwood_init_irq, 405 .init_irq = kirkwood_init_irq,
406 .timer = &lacie_v2_timer, 406 .timer = &kirkwood_timer,
407MACHINE_END 407MACHINE_END
408#endif 408#endif
409 409
@@ -413,6 +413,6 @@ MACHINE_START(NET5BIG_V2, "LaCie 5Big Network v2")
413 .init_machine = netxbig_v2_init, 413 .init_machine = netxbig_v2_init,
414 .map_io = kirkwood_map_io, 414 .map_io = kirkwood_map_io,
415 .init_irq = kirkwood_init_irq, 415 .init_irq = kirkwood_init_irq,
416 .timer = &lacie_v2_timer, 416 .timer = &kirkwood_timer,
417MACHINE_END 417MACHINE_END
418#endif 418#endif
diff --git a/arch/arm/mach-kirkwood/ts41x-setup.c b/arch/arm/mach-kirkwood/ts41x-setup.c
index 8be09a0ce4ac..3587a281d993 100644
--- a/arch/arm/mach-kirkwood/ts41x-setup.c
+++ b/arch/arm/mach-kirkwood/ts41x-setup.c
@@ -27,6 +27,10 @@
27#include "mpp.h" 27#include "mpp.h"
28#include "tsx1x-common.h" 28#include "tsx1x-common.h"
29 29
30/* for the PCIe reset workaround */
31#include <plat/pcie.h>
32
33
30#define QNAP_TS41X_JUMPER_JP1 45 34#define QNAP_TS41X_JUMPER_JP1 45
31 35
32static struct i2c_board_info __initdata qnap_ts41x_i2c_rtc = { 36static struct i2c_board_info __initdata qnap_ts41x_i2c_rtc = {
@@ -140,8 +144,16 @@ static void __init qnap_ts41x_init(void)
140 144
141static int __init ts41x_pci_init(void) 145static int __init ts41x_pci_init(void)
142{ 146{
143 if (machine_is_ts41x()) 147 if (machine_is_ts41x()) {
148 /*
149 * Without this explicit reset, the PCIe SATA controller
150 * (Marvell 88sx7042/sata_mv) is known to stop working
151 * after a few minutes.
152 */
153 orion_pcie_reset((void __iomem *)PCIE_VIRT_BASE);
154
144 kirkwood_pcie_init(KW_PCIE0); 155 kirkwood_pcie_init(KW_PCIE0);
156 }
145 157
146 return 0; 158 return 0;
147} 159}
diff --git a/arch/arm/mach-mmp/include/mach/cputype.h b/arch/arm/mach-mmp/include/mach/cputype.h
index f43a68b213f1..8a3b56dfd35d 100644
--- a/arch/arm/mach-mmp/include/mach/cputype.h
+++ b/arch/arm/mach-mmp/include/mach/cputype.h
@@ -46,7 +46,8 @@ static inline int cpu_is_pxa910(void)
46#ifdef CONFIG_CPU_MMP2 46#ifdef CONFIG_CPU_MMP2
47static inline int cpu_is_mmp2(void) 47static inline int cpu_is_mmp2(void)
48{ 48{
49 return (((cpu_readid_id() >> 8) & 0xff) == 0x58); 49 return (((read_cpuid_id() >> 8) & 0xff) == 0x58);
50}
50#else 51#else
51#define cpu_is_mmp2() (0) 52#define cpu_is_mmp2() (0)
52#endif 53#endif
diff --git a/arch/arm/mach-mv78xx0/mpp.c b/arch/arm/mach-mv78xx0/mpp.c
index 354ac514eb89..84db2dfc475c 100644
--- a/arch/arm/mach-mv78xx0/mpp.c
+++ b/arch/arm/mach-mv78xx0/mpp.c
@@ -54,7 +54,7 @@ void __init mv78xx0_mpp_conf(unsigned int *mpp_list)
54 } 54 }
55 printk("\n"); 55 printk("\n");
56 56
57 while (*mpp_list) { 57 for ( ; *mpp_list; mpp_list++) {
58 unsigned int num = MPP_NUM(*mpp_list); 58 unsigned int num = MPP_NUM(*mpp_list);
59 unsigned int sel = MPP_SEL(*mpp_list); 59 unsigned int sel = MPP_SEL(*mpp_list);
60 int shift, gpio_mode; 60 int shift, gpio_mode;
@@ -83,8 +83,6 @@ void __init mv78xx0_mpp_conf(unsigned int *mpp_list)
83 if (sel != 0) 83 if (sel != 0)
84 gpio_mode = 0; 84 gpio_mode = 0;
85 orion_gpio_set_valid(num, gpio_mode); 85 orion_gpio_set_valid(num, gpio_mode);
86
87 mpp_list++;
88 } 86 }
89 87
90 printk(KERN_DEBUG " final MPP regs:"); 88 printk(KERN_DEBUG " final MPP regs:");
diff --git a/arch/arm/mach-orion5x/mpp.c b/arch/arm/mach-orion5x/mpp.c
index bc4c3b9aaf83..db485d3b8144 100644
--- a/arch/arm/mach-orion5x/mpp.c
+++ b/arch/arm/mach-orion5x/mpp.c
@@ -127,7 +127,7 @@ void __init orion5x_mpp_conf(struct orion5x_mpp_mode *mode)
127 /* Initialize gpiolib. */ 127 /* Initialize gpiolib. */
128 orion_gpio_init(); 128 orion_gpio_init();
129 129
130 while (mode->mpp >= 0) { 130 for ( ; mode->mpp >= 0; mode++) {
131 u32 *reg; 131 u32 *reg;
132 int num_type; 132 int num_type;
133 int shift; 133 int shift;
@@ -160,8 +160,6 @@ void __init orion5x_mpp_conf(struct orion5x_mpp_mode *mode)
160 orion_gpio_set_unused(mode->mpp); 160 orion_gpio_set_unused(mode->mpp);
161 161
162 orion_gpio_set_valid(mode->mpp, !!(mode->type == MPP_GPIO)); 162 orion_gpio_set_valid(mode->mpp, !!(mode->type == MPP_GPIO));
163
164 mode++;
165 } 163 }
166 164
167 writel(mpp_0_7_ctrl, MPP_0_7_CTRL); 165 writel(mpp_0_7_ctrl, MPP_0_7_CTRL);
diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
index 16f1bd5324be..c1c1cd04bdde 100644
--- a/arch/arm/mach-orion5x/ts78xx-setup.c
+++ b/arch/arm/mach-orion5x/ts78xx-setup.c
@@ -239,7 +239,7 @@ static struct platform_nand_data ts78xx_ts_nand_data = {
239static struct resource ts78xx_ts_nand_resources = { 239static struct resource ts78xx_ts_nand_resources = {
240 .start = TS_NAND_DATA, 240 .start = TS_NAND_DATA,
241 .end = TS_NAND_DATA + 4, 241 .end = TS_NAND_DATA + 4,
242 .flags = IORESOURCE_IO, 242 .flags = IORESOURCE_MEM,
243}; 243};
244 244
245static struct platform_device ts78xx_ts_nand_device = { 245static struct platform_device ts78xx_ts_nand_device = {
diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c
index ac5598ce9724..d34b99febeb9 100644
--- a/arch/arm/mach-pxa/cm-x2xx.c
+++ b/arch/arm/mach-pxa/cm-x2xx.c
@@ -476,8 +476,6 @@ static void __init cmx2xx_init(void)
476 476
477static void __init cmx2xx_init_irq(void) 477static void __init cmx2xx_init_irq(void)
478{ 478{
479 pxa27x_init_irq();
480
481 if (cpu_is_pxa25x()) { 479 if (cpu_is_pxa25x()) {
482 pxa25x_init_irq(); 480 pxa25x_init_irq();
483 cmx2xx_pci_init_irq(CMX255_GPIO_IT8152_IRQ); 481 cmx2xx_pci_init_irq(CMX255_GPIO_IT8152_IRQ);
diff --git a/arch/arm/mach-pxa/saar.c b/arch/arm/mach-pxa/saar.c
index 4b521e045d75..ffa50e633ee6 100644
--- a/arch/arm/mach-pxa/saar.c
+++ b/arch/arm/mach-pxa/saar.c
@@ -116,7 +116,7 @@ static struct platform_device smc91x_device = {
116 }, 116 },
117}; 117};
118 118
119#if defined(CONFIG_FB_PXA) || (CONFIG_FB_PXA_MODULE) 119#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
120static uint16_t lcd_power_on[] = { 120static uint16_t lcd_power_on[] = {
121 /* single frame */ 121 /* single frame */
122 SMART_CMD_NOOP, 122 SMART_CMD_NOOP,
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index c2e405a9e025..fd25ccd7272f 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -54,7 +54,9 @@ static struct map_desc ct_ca9x4_io_desc[] __initdata = {
54 54
55static void __init ct_ca9x4_map_io(void) 55static void __init ct_ca9x4_map_io(void)
56{ 56{
57#ifdef CONFIG_LOCAL_TIMERS
57 twd_base = MMIO_P2V(A9_MPCORE_TWD); 58 twd_base = MMIO_P2V(A9_MPCORE_TWD);
59#endif
58 v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc)); 60 v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
59} 61}
60 62
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e4dd0646e859..ac6a36142fcd 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -198,7 +198,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
198 * fragmentation of the DMA space, and also prevents allocations 198 * fragmentation of the DMA space, and also prevents allocations
199 * smaller than a section from crossing a section boundary. 199 * smaller than a section from crossing a section boundary.
200 */ 200 */
201 bit = fls(size - 1) + 1; 201 bit = fls(size - 1);
202 if (bit > SECTION_SHIFT) 202 if (bit > SECTION_SHIFT)
203 bit = SECTION_SHIFT; 203 bit = SECTION_SHIFT;
204 align = 1 << bit; 204 align = 1 << bit;
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index 6f42a18b8aa4..fc819120978d 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -284,12 +284,14 @@ void __init omap_dsp_reserve_sdram_memblock(void)
284 if (!size) 284 if (!size)
285 return; 285 return;
286 286
287 paddr = __memblock_alloc_base(size, SZ_1M, MEMBLOCK_REAL_LIMIT); 287 paddr = memblock_alloc(size, SZ_1M);
288 if (!paddr) { 288 if (!paddr) {
289 pr_err("%s: failed to reserve %x bytes\n", 289 pr_err("%s: failed to reserve %x bytes\n",
290 __func__, size); 290 __func__, size);
291 return; 291 return;
292 } 292 }
293 memblock_free(paddr, size);
294 memblock_remove(paddr, size);
293 295
294 omap_dsp_phys_mempool_base = paddr; 296 omap_dsp_phys_mempool_base = paddr;
295} 297}
diff --git a/arch/arm/plat-orion/include/plat/pcie.h b/arch/arm/plat-orion/include/plat/pcie.h
index 3ebfef72b4e7..cc99163e73fd 100644
--- a/arch/arm/plat-orion/include/plat/pcie.h
+++ b/arch/arm/plat-orion/include/plat/pcie.h
@@ -11,12 +11,15 @@
11#ifndef __PLAT_PCIE_H 11#ifndef __PLAT_PCIE_H
12#define __PLAT_PCIE_H 12#define __PLAT_PCIE_H
13 13
14struct pci_bus;
15
14u32 orion_pcie_dev_id(void __iomem *base); 16u32 orion_pcie_dev_id(void __iomem *base);
15u32 orion_pcie_rev(void __iomem *base); 17u32 orion_pcie_rev(void __iomem *base);
16int orion_pcie_link_up(void __iomem *base); 18int orion_pcie_link_up(void __iomem *base);
17int orion_pcie_x4_mode(void __iomem *base); 19int orion_pcie_x4_mode(void __iomem *base);
18int orion_pcie_get_local_bus_nr(void __iomem *base); 20int orion_pcie_get_local_bus_nr(void __iomem *base);
19void orion_pcie_set_local_bus_nr(void __iomem *base, int nr); 21void orion_pcie_set_local_bus_nr(void __iomem *base, int nr);
22void orion_pcie_reset(void __iomem *base);
20void orion_pcie_setup(void __iomem *base, 23void orion_pcie_setup(void __iomem *base,
21 struct mbus_dram_target_info *dram); 24 struct mbus_dram_target_info *dram);
22int orion_pcie_rd_conf(void __iomem *base, struct pci_bus *bus, 25int orion_pcie_rd_conf(void __iomem *base, struct pci_bus *bus,
diff --git a/arch/arm/plat-orion/pcie.c b/arch/arm/plat-orion/pcie.c
index 779553a1595e..af2d733c50b5 100644
--- a/arch/arm/plat-orion/pcie.c
+++ b/arch/arm/plat-orion/pcie.c
@@ -182,11 +182,6 @@ void __init orion_pcie_setup(void __iomem *base,
182 u32 mask; 182 u32 mask;
183 183
184 /* 184 /*
185 * soft reset PCIe unit
186 */
187 orion_pcie_reset(base);
188
189 /*
190 * Point PCIe unit MBUS decode windows to DRAM space. 185 * Point PCIe unit MBUS decode windows to DRAM space.
191 */ 186 */
192 orion_pcie_setup_wins(base, dram); 187 orion_pcie_setup_wins(base, dram);
diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h
index 2cd899f75a3c..b7c5bab9bd77 100644
--- a/arch/um/include/asm/ptrace-generic.h
+++ b/arch/um/include/asm/ptrace-generic.h
@@ -38,8 +38,8 @@ struct pt_regs {
38 38
39struct task_struct; 39struct task_struct;
40 40
41extern long subarch_ptrace(struct task_struct *child, long request, long addr, 41extern long subarch_ptrace(struct task_struct *child, long request,
42 long data); 42 unsigned long addr, unsigned long data);
43extern unsigned long getreg(struct task_struct *child, int regno); 43extern unsigned long getreg(struct task_struct *child, int regno);
44extern int putreg(struct task_struct *child, int regno, unsigned long value); 44extern int putreg(struct task_struct *child, int regno, unsigned long value);
45extern int get_fpregs(struct user_i387_struct __user *buf, 45extern int get_fpregs(struct user_i387_struct __user *buf,
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index a5e33f29bbeb..701b672c1122 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -122,7 +122,7 @@ long arch_ptrace(struct task_struct *child, long request,
122 break; 122 break;
123 123
124 case PTRACE_SET_THREAD_AREA: 124 case PTRACE_SET_THREAD_AREA:
125 ret = ptrace_set_thread_area(child, addr, datavp); 125 ret = ptrace_set_thread_area(child, addr, vp);
126 break; 126 break;
127 127
128 case PTRACE_FAULTINFO: { 128 case PTRACE_FAULTINFO: {
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 286de34b0ed6..f6ce0bda3b98 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -141,13 +141,13 @@ static inline void native_apic_msr_write(u32 reg, u32 v)
141 141
142static inline u32 native_apic_msr_read(u32 reg) 142static inline u32 native_apic_msr_read(u32 reg)
143{ 143{
144 u32 low, high; 144 u64 msr;
145 145
146 if (reg == APIC_DFR) 146 if (reg == APIC_DFR)
147 return -1; 147 return -1;
148 148
149 rdmsr(APIC_BASE_MSR + (reg >> 4), low, high); 149 rdmsrl(APIC_BASE_MSR + (reg >> 4), msr);
150 return low; 150 return (u32)msr;
151} 151}
152 152
153static inline void native_x2apic_wait_icr_idle(void) 153static inline void native_x2apic_wait_icr_idle(void)
@@ -181,12 +181,12 @@ extern void enable_x2apic(void);
181extern void x2apic_icr_write(u32 low, u32 id); 181extern void x2apic_icr_write(u32 low, u32 id);
182static inline int x2apic_enabled(void) 182static inline int x2apic_enabled(void)
183{ 183{
184 int msr, msr2; 184 u64 msr;
185 185
186 if (!cpu_has_x2apic) 186 if (!cpu_has_x2apic)
187 return 0; 187 return 0;
188 188
189 rdmsr(MSR_IA32_APICBASE, msr, msr2); 189 rdmsrl(MSR_IA32_APICBASE, msr);
190 if (msr & X2APIC_ENABLE) 190 if (msr & X2APIC_ENABLE)
191 return 1; 191 return 1;
192 return 0; 192 return 0;
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index b2f2d2e05cec..6d90adf4428a 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -806,6 +806,78 @@ union uvh_node_present_table_u {
806}; 806};
807 807
808/* ========================================================================= */ 808/* ========================================================================= */
809/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */
810/* ========================================================================= */
811#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
812
813#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
814#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
815#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
816#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
817#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
818#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
819
820union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
821 unsigned long v;
822 struct uvh_rh_gam_alias210_overlay_config_0_mmr_s {
823 unsigned long rsvd_0_23: 24; /* */
824 unsigned long base : 8; /* RW */
825 unsigned long rsvd_32_47: 16; /* */
826 unsigned long m_alias : 5; /* RW */
827 unsigned long rsvd_53_62: 10; /* */
828 unsigned long enable : 1; /* RW */
829 } s;
830};
831
832/* ========================================================================= */
833/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */
834/* ========================================================================= */
835#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
836
837#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
838#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
839#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
840#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
841#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
842#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
843
844union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
845 unsigned long v;
846 struct uvh_rh_gam_alias210_overlay_config_1_mmr_s {
847 unsigned long rsvd_0_23: 24; /* */
848 unsigned long base : 8; /* RW */
849 unsigned long rsvd_32_47: 16; /* */
850 unsigned long m_alias : 5; /* RW */
851 unsigned long rsvd_53_62: 10; /* */
852 unsigned long enable : 1; /* RW */
853 } s;
854};
855
856/* ========================================================================= */
857/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */
858/* ========================================================================= */
859#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
860
861#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
862#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
863#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
864#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
865#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
866#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
867
868union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
869 unsigned long v;
870 struct uvh_rh_gam_alias210_overlay_config_2_mmr_s {
871 unsigned long rsvd_0_23: 24; /* */
872 unsigned long base : 8; /* RW */
873 unsigned long rsvd_32_47: 16; /* */
874 unsigned long m_alias : 5; /* RW */
875 unsigned long rsvd_53_62: 10; /* */
876 unsigned long enable : 1; /* RW */
877 } s;
878};
879
880/* ========================================================================= */
809/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */ 881/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
810/* ========================================================================= */ 882/* ========================================================================= */
811#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL 883#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
@@ -857,6 +929,29 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
857}; 929};
858 930
859/* ========================================================================= */ 931/* ========================================================================= */
932/* UVH_RH_GAM_CONFIG_MMR */
933/* ========================================================================= */
934#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL
935
936#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
937#define UVH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
938#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
939#define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
940#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12
941#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
942
943union uvh_rh_gam_config_mmr_u {
944 unsigned long v;
945 struct uvh_rh_gam_config_mmr_s {
946 unsigned long m_skt : 6; /* RW */
947 unsigned long n_skt : 4; /* RW */
948 unsigned long rsvd_10_11: 2; /* */
949 unsigned long mmiol_cfg : 1; /* RW */
950 unsigned long rsvd_13_63: 51; /* */
951 } s;
952};
953
954/* ========================================================================= */
860/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ 955/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
861/* ========================================================================= */ 956/* ========================================================================= */
862#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL 957#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
@@ -987,97 +1082,5 @@ union uvh_rtc1_int_config_u {
987 } s; 1082 } s;
988}; 1083};
989 1084
990/* ========================================================================= */
991/* UVH_SI_ADDR_MAP_CONFIG */
992/* ========================================================================= */
993#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
994
995#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0
996#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL
997#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8
998#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL
999
1000union uvh_si_addr_map_config_u {
1001 unsigned long v;
1002 struct uvh_si_addr_map_config_s {
1003 unsigned long m_skt : 6; /* RW */
1004 unsigned long rsvd_6_7: 2; /* */
1005 unsigned long n_skt : 4; /* RW */
1006 unsigned long rsvd_12_63: 52; /* */
1007 } s;
1008};
1009
1010/* ========================================================================= */
1011/* UVH_SI_ALIAS0_OVERLAY_CONFIG */
1012/* ========================================================================= */
1013#define UVH_SI_ALIAS0_OVERLAY_CONFIG 0xc80008UL
1014
1015#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_SHFT 24
1016#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
1017#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_SHFT 48
1018#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
1019#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_SHFT 63
1020#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
1021
1022union uvh_si_alias0_overlay_config_u {
1023 unsigned long v;
1024 struct uvh_si_alias0_overlay_config_s {
1025 unsigned long rsvd_0_23: 24; /* */
1026 unsigned long base : 8; /* RW */
1027 unsigned long rsvd_32_47: 16; /* */
1028 unsigned long m_alias : 5; /* RW */
1029 unsigned long rsvd_53_62: 10; /* */
1030 unsigned long enable : 1; /* RW */
1031 } s;
1032};
1033
1034/* ========================================================================= */
1035/* UVH_SI_ALIAS1_OVERLAY_CONFIG */
1036/* ========================================================================= */
1037#define UVH_SI_ALIAS1_OVERLAY_CONFIG 0xc80010UL
1038
1039#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_SHFT 24
1040#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
1041#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_SHFT 48
1042#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
1043#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_SHFT 63
1044#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
1045
1046union uvh_si_alias1_overlay_config_u {
1047 unsigned long v;
1048 struct uvh_si_alias1_overlay_config_s {
1049 unsigned long rsvd_0_23: 24; /* */
1050 unsigned long base : 8; /* RW */
1051 unsigned long rsvd_32_47: 16; /* */
1052 unsigned long m_alias : 5; /* RW */
1053 unsigned long rsvd_53_62: 10; /* */
1054 unsigned long enable : 1; /* RW */
1055 } s;
1056};
1057
1058/* ========================================================================= */
1059/* UVH_SI_ALIAS2_OVERLAY_CONFIG */
1060/* ========================================================================= */
1061#define UVH_SI_ALIAS2_OVERLAY_CONFIG 0xc80018UL
1062
1063#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_SHFT 24
1064#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
1065#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_SHFT 48
1066#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
1067#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_SHFT 63
1068#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
1069
1070union uvh_si_alias2_overlay_config_u {
1071 unsigned long v;
1072 struct uvh_si_alias2_overlay_config_s {
1073 unsigned long rsvd_0_23: 24; /* */
1074 unsigned long base : 8; /* RW */
1075 unsigned long rsvd_32_47: 16; /* */
1076 unsigned long m_alias : 5; /* RW */
1077 unsigned long rsvd_53_62: 10; /* */
1078 unsigned long enable : 1; /* RW */
1079 } s;
1080};
1081
1082 1085
1083#endif /* _ASM_X86_UV_UV_MMRS_H */ 1086#endif /* __ASM_UV_MMRS_X86_H__ */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 850657d1b0ed..3f838d537392 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -52,7 +52,6 @@
52#include <asm/mce.h> 52#include <asm/mce.h>
53#include <asm/kvm_para.h> 53#include <asm/kvm_para.h>
54#include <asm/tsc.h> 54#include <asm/tsc.h>
55#include <asm/atomic.h>
56 55
57unsigned int num_processors; 56unsigned int num_processors;
58 57
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index ed4118de249e..194539aea175 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -379,14 +379,14 @@ struct redir_addr {
379#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 379#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
380 380
381static __initdata struct redir_addr redir_addrs[] = { 381static __initdata struct redir_addr redir_addrs[] = {
382 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG}, 382 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR},
383 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG}, 383 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR},
384 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG}, 384 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR},
385}; 385};
386 386
387static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) 387static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
388{ 388{
389 union uvh_si_alias0_overlay_config_u alias; 389 union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias;
390 union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; 390 union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
391 int i; 391 int i;
392 392
@@ -660,7 +660,7 @@ void uv_nmi_init(void)
660 660
661void __init uv_system_init(void) 661void __init uv_system_init(void)
662{ 662{
663 union uvh_si_addr_map_config_u m_n_config; 663 union uvh_rh_gam_config_mmr_u m_n_config;
664 union uvh_node_id_u node_id; 664 union uvh_node_id_u node_id;
665 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; 665 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
666 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; 666 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
@@ -670,7 +670,7 @@ void __init uv_system_init(void)
670 670
671 map_low_mmrs(); 671 map_low_mmrs();
672 672
673 m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); 673 m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
674 m_val = m_n_config.s.m_skt; 674 m_val = m_n_config.s.m_skt;
675 n_val = m_n_config.s.n_skt; 675 n_val = m_n_config.s.n_skt;
676 mmr_base = 676 mmr_base =
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 46d58448c3af..e421b8cd6944 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -280,11 +280,11 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
280 struct amd_nb *nb; 280 struct amd_nb *nb;
281 int i; 281 int i;
282 282
283 nb = kmalloc(sizeof(struct amd_nb), GFP_KERNEL); 283 nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
284 cpu_to_node(cpu));
284 if (!nb) 285 if (!nb)
285 return NULL; 286 return NULL;
286 287
287 memset(nb, 0, sizeof(*nb));
288 nb->nb_id = nb_id; 288 nb->nb_id = nb_id;
289 289
290 /* 290 /*
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index e1af7c055c7d..ce0cb4721c9a 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -212,7 +212,7 @@ static int install_equiv_cpu_table(const u8 *buf)
212 return 0; 212 return 0;
213 } 213 }
214 214
215 equiv_cpu_table = (struct equiv_cpu_entry *) vmalloc(size); 215 equiv_cpu_table = vmalloc(size);
216 if (!equiv_cpu_table) { 216 if (!equiv_cpu_table) {
217 pr_err("failed to allocate equivalent CPU table\n"); 217 pr_err("failed to allocate equivalent CPU table\n");
218 return 0; 218 return 0;
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index 71825806cd44..6da143c2a6b8 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -217,13 +217,13 @@ void __cpuinit fam10h_check_enable_mmcfg(void)
217 wrmsrl(address, val); 217 wrmsrl(address, val);
218} 218}
219 219
220static int __devinit set_check_enable_amd_mmconf(const struct dmi_system_id *d) 220static int __init set_check_enable_amd_mmconf(const struct dmi_system_id *d)
221{ 221{
222 pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF; 222 pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF;
223 return 0; 223 return 0;
224} 224}
225 225
226static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = { 226static const struct dmi_system_id __initconst mmconf_dmi_table[] = {
227 { 227 {
228 .callback = set_check_enable_amd_mmconf, 228 .callback = set_check_enable_amd_mmconf,
229 .ident = "Sun Microsystems Machine", 229 .ident = "Sun Microsystems Machine",
@@ -234,7 +234,8 @@ static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = {
234 {} 234 {}
235}; 235};
236 236
237void __cpuinit check_enable_amd_mmconf_dmi(void) 237/* Called from a __cpuinit function, but only on the BSP. */
238void __ref check_enable_amd_mmconf_dmi(void)
238{ 239{
239 dmi_check_system(mmconf_dmi_table); 240 dmi_check_system(mmconf_dmi_table);
240} 241}
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index bab3b9e6f66d..008b91eefa18 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -41,44 +41,6 @@ void pvclock_set_flags(u8 flags)
41 valid_flags = flags; 41 valid_flags = flags;
42} 42}
43 43
44/*
45 * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
46 * yielding a 64-bit result.
47 */
48static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
49{
50 u64 product;
51#ifdef __i386__
52 u32 tmp1, tmp2;
53#endif
54
55 if (shift < 0)
56 delta >>= -shift;
57 else
58 delta <<= shift;
59
60#ifdef __i386__
61 __asm__ (
62 "mul %5 ; "
63 "mov %4,%%eax ; "
64 "mov %%edx,%4 ; "
65 "mul %5 ; "
66 "xor %5,%5 ; "
67 "add %4,%%eax ; "
68 "adc %5,%%edx ; "
69 : "=A" (product), "=r" (tmp1), "=r" (tmp2)
70 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
71#elif defined(__x86_64__)
72 __asm__ (
73 "mul %%rdx ; shrd $32,%%rdx,%%rax"
74 : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
75#else
76#error implement me!
77#endif
78
79 return product;
80}
81
82static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow) 44static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow)
83{ 45{
84 u64 delta = native_read_tsc() - shadow->tsc_timestamp; 46 u64 delta = native_read_tsc() - shadow->tsc_timestamp;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 49358481c733..12cdbb17ad18 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -251,7 +251,7 @@ static void __cpuinit calculate_tlb_offset(void)
251 } 251 }
252} 252}
253 253
254static int tlb_cpuhp_notify(struct notifier_block *n, 254static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n,
255 unsigned long action, void *hcpu) 255 unsigned long action, void *hcpu)
256{ 256{
257 switch (action & 0xf) { 257 switch (action & 0xf) {
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 117f5b8daf75..d7b5109f7a9c 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -147,8 +147,10 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
147 irq = xen_allocate_pirq(v[i], 0, /* not sharable */ 147 irq = xen_allocate_pirq(v[i], 0, /* not sharable */
148 (type == PCI_CAP_ID_MSIX) ? 148 (type == PCI_CAP_ID_MSIX) ?
149 "pcifront-msi-x" : "pcifront-msi"); 149 "pcifront-msi-x" : "pcifront-msi");
150 if (irq < 0) 150 if (irq < 0) {
151 return -1; 151 ret = -1;
152 goto free;
153 }
152 154
153 ret = set_irq_msi(irq, msidesc); 155 ret = set_irq_msi(irq, msidesc);
154 if (ret) 156 if (ret)
@@ -164,7 +166,7 @@ error:
164 if (ret == -ENODEV) 166 if (ret == -ENODEV)
165 dev_err(&dev->dev, "Xen PCI frontend has not registered" \ 167 dev_err(&dev->dev, "Xen PCI frontend has not registered" \
166 " MSI/MSI-X support!\n"); 168 " MSI/MSI-X support!\n");
167 169free:
168 kfree(v); 170 kfree(v);
169 return ret; 171 return ret;
170} 172}
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 20ea20a39e2a..a318194002b5 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1343,8 +1343,8 @@ uv_activation_descriptor_init(int node, int pnode)
1343 * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR) 1343 * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
1344 * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub 1344 * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub
1345 */ 1345 */
1346 bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)* 1346 bau_desc = kmalloc_node(sizeof(struct bau_desc) * UV_ADP_SIZE
1347 UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node); 1347 * UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
1348 BUG_ON(!bau_desc); 1348 BUG_ON(!bau_desc);
1349 1349
1350 pa = uv_gpa(bau_desc); /* need the real nasid*/ 1350 pa = uv_gpa(bau_desc); /* need the real nasid*/
@@ -1402,9 +1402,9 @@ uv_payload_queue_init(int node, int pnode)
1402 struct bau_payload_queue_entry *pqp_malloc; 1402 struct bau_payload_queue_entry *pqp_malloc;
1403 struct bau_control *bcp; 1403 struct bau_control *bcp;
1404 1404
1405 pqp = (struct bau_payload_queue_entry *) kmalloc_node( 1405 pqp = kmalloc_node((DEST_Q_SIZE + 1)
1406 (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry), 1406 * sizeof(struct bau_payload_queue_entry),
1407 GFP_KERNEL, node); 1407 GFP_KERNEL, node);
1408 BUG_ON(!pqp); 1408 BUG_ON(!pqp);
1409 pqp_malloc = pqp; 1409 pqp_malloc = pqp;
1410 1410
@@ -1520,8 +1520,7 @@ static void __init uv_init_per_cpu(int nuvhubs)
1520 1520
1521 timeout_us = calculate_destination_timeout(); 1521 timeout_us = calculate_destination_timeout();
1522 1522
1523 uvhub_descs = (struct uvhub_desc *) 1523 uvhub_descs = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
1524 kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
1525 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc)); 1524 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
1526 uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL); 1525 uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
1527 for_each_present_cpu(cpu) { 1526 for_each_present_cpu(cpu) {
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index c237b810b03f..21ed8d7f75a5 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2126,7 +2126,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
2126{ 2126{
2127 pmd_t *kernel_pmd; 2127 pmd_t *kernel_pmd;
2128 2128
2129 level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE); 2129 level2_kernel_pgt = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2130 2130
2131 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + 2131 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
2132 xen_start_info->nr_pt_frames * PAGE_SIZE + 2132 xen_start_info->nr_pt_frames * PAGE_SIZE +
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index b1dbdaa23ecc..769c4b01fa32 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -118,16 +118,18 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
118 const struct e820map *e820) 118 const struct e820map *e820)
119{ 119{
120 phys_addr_t max_addr = PFN_PHYS(max_pfn); 120 phys_addr_t max_addr = PFN_PHYS(max_pfn);
121 phys_addr_t last_end = 0; 121 phys_addr_t last_end = ISA_END_ADDRESS;
122 unsigned long released = 0; 122 unsigned long released = 0;
123 int i; 123 int i;
124 124
125 /* Free any unused memory above the low 1Mbyte. */
125 for (i = 0; i < e820->nr_map && last_end < max_addr; i++) { 126 for (i = 0; i < e820->nr_map && last_end < max_addr; i++) {
126 phys_addr_t end = e820->map[i].addr; 127 phys_addr_t end = e820->map[i].addr;
127 end = min(max_addr, end); 128 end = min(max_addr, end);
128 129
129 released += xen_release_chunk(last_end, end); 130 if (last_end < end)
130 last_end = e820->map[i].addr + e820->map[i].size; 131 released += xen_release_chunk(last_end, end);
132 last_end = max(last_end, e820->map[i].addr + e820->map[i].size);
131 } 133 }
132 134
133 if (last_end < max_addr) 135 if (last_end < max_addr)
@@ -164,6 +166,7 @@ char * __init xen_memory_setup(void)
164 XENMEM_memory_map; 166 XENMEM_memory_map;
165 rc = HYPERVISOR_memory_op(op, &memmap); 167 rc = HYPERVISOR_memory_op(op, &memmap);
166 if (rc == -ENOSYS) { 168 if (rc == -ENOSYS) {
169 BUG_ON(xen_initial_domain());
167 memmap.nr_entries = 1; 170 memmap.nr_entries = 1;
168 map[0].addr = 0ULL; 171 map[0].addr = 0ULL;
169 map[0].size = mem_end; 172 map[0].size = mem_end;
@@ -201,12 +204,13 @@ char * __init xen_memory_setup(void)
201 } 204 }
202 205
203 /* 206 /*
204 * Even though this is normal, usable memory under Xen, reserve 207 * In domU, the ISA region is normal, usable memory, but we
205 * ISA memory anyway because too many things think they can poke 208 * reserve ISA memory anyway because too many things poke
206 * about in there. 209 * about in there.
207 * 210 *
208 * In a dom0 kernel, this region is identity mapped with the 211 * In Dom0, the host E820 information can leave gaps in the
209 * hardware ISA area, so it really is out of bounds. 212 * ISA range, which would cause us to release those pages. To
213 * avoid this, we unconditionally reserve them here.
210 */ 214 */
211 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, 215 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
212 E820_RESERVED); 216 E820_RESERVED);
diff --git a/block/blk-core.c b/block/blk-core.c
index f0834e2f5727..4ce953f1b390 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1194,13 +1194,6 @@ static int __make_request(struct request_queue *q, struct bio *bio)
1194 int where = ELEVATOR_INSERT_SORT; 1194 int where = ELEVATOR_INSERT_SORT;
1195 int rw_flags; 1195 int rw_flags;
1196 1196
1197 /* REQ_HARDBARRIER is no more */
1198 if (WARN_ONCE(bio->bi_rw & REQ_HARDBARRIER,
1199 "block: HARDBARRIER is deprecated, use FLUSH/FUA instead\n")) {
1200 bio_endio(bio, -EOPNOTSUPP);
1201 return 0;
1202 }
1203
1204 /* 1197 /*
1205 * low level driver can indicate that it wants pages above a 1198 * low level driver can indicate that it wants pages above a
1206 * certain limit bounced to low memory (ie for highmem, or even 1199 * certain limit bounced to low memory (ie for highmem, or even
@@ -1351,7 +1344,7 @@ static void handle_bad_sector(struct bio *bio)
1351 bdevname(bio->bi_bdev, b), 1344 bdevname(bio->bi_bdev, b),
1352 bio->bi_rw, 1345 bio->bi_rw,
1353 (unsigned long long)bio->bi_sector + bio_sectors(bio), 1346 (unsigned long long)bio->bi_sector + bio_sectors(bio),
1354 (long long)(bio->bi_bdev->bd_inode->i_size >> 9)); 1347 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
1355 1348
1356 set_bit(BIO_EOF, &bio->bi_flags); 1349 set_bit(BIO_EOF, &bio->bi_flags);
1357} 1350}
@@ -1404,7 +1397,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1404 return 0; 1397 return 0;
1405 1398
1406 /* Test device or partition size, when known. */ 1399 /* Test device or partition size, when known. */
1407 maxsector = bio->bi_bdev->bd_inode->i_size >> 9; 1400 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
1408 if (maxsector) { 1401 if (maxsector) {
1409 sector_t sector = bio->bi_sector; 1402 sector_t sector = bio->bi_sector;
1410 1403
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index d22c4c55c406..3c7a339fe381 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -153,20 +153,6 @@ struct io_context *get_io_context(gfp_t gfp_flags, int node)
153} 153}
154EXPORT_SYMBOL(get_io_context); 154EXPORT_SYMBOL(get_io_context);
155 155
156void copy_io_context(struct io_context **pdst, struct io_context **psrc)
157{
158 struct io_context *src = *psrc;
159 struct io_context *dst = *pdst;
160
161 if (src) {
162 BUG_ON(atomic_long_read(&src->refcount) == 0);
163 atomic_long_inc(&src->refcount);
164 put_io_context(dst);
165 *pdst = src;
166 }
167}
168EXPORT_SYMBOL(copy_io_context);
169
170static int __init blk_ioc_init(void) 156static int __init blk_ioc_init(void)
171{ 157{
172 iocontext_cachep = kmem_cache_create("blkdev_ioc", 158 iocontext_cachep = kmem_cache_create("blkdev_ioc",
diff --git a/block/blk-map.c b/block/blk-map.c
index d4a586d8691e..5d5dbe47c228 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -205,6 +205,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
205 unaligned = 1; 205 unaligned = 1;
206 break; 206 break;
207 } 207 }
208 if (!iov[i].iov_len)
209 return -EINVAL;
208 } 210 }
209 211
210 if (unaligned || (q->dma_pad_mask & len) || map_data) 212 if (unaligned || (q->dma_pad_mask & len) || map_data)
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 119f07b74dc0..58c6ee5b010c 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -744,13 +744,13 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
744 bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; 744 bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
745 return 0; 745 return 0;
746 case BLKGETSIZE: 746 case BLKGETSIZE:
747 size = bdev->bd_inode->i_size; 747 size = i_size_read(bdev->bd_inode);
748 if ((size >> 9) > ~0UL) 748 if ((size >> 9) > ~0UL)
749 return -EFBIG; 749 return -EFBIG;
750 return compat_put_ulong(arg, size >> 9); 750 return compat_put_ulong(arg, size >> 9);
751 751
752 case BLKGETSIZE64_32: 752 case BLKGETSIZE64_32:
753 return compat_put_u64(arg, bdev->bd_inode->i_size); 753 return compat_put_u64(arg, i_size_read(bdev->bd_inode));
754 754
755 case BLKTRACESETUP32: 755 case BLKTRACESETUP32:
756 case BLKTRACESTART: /* compatible */ 756 case BLKTRACESTART: /* compatible */
diff --git a/block/elevator.c b/block/elevator.c
index 282e8308f7e2..2569512830d3 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -429,7 +429,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
429 q->nr_sorted--; 429 q->nr_sorted--;
430 430
431 boundary = q->end_sector; 431 boundary = q->end_sector;
432 stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED; 432 stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
433 list_for_each_prev(entry, &q->queue_head) { 433 list_for_each_prev(entry, &q->queue_head) {
434 struct request *pos = list_entry_rq(entry); 434 struct request *pos = list_entry_rq(entry);
435 435
@@ -691,7 +691,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
691void __elv_add_request(struct request_queue *q, struct request *rq, int where, 691void __elv_add_request(struct request_queue *q, struct request *rq, int where,
692 int plug) 692 int plug)
693{ 693{
694 if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { 694 if (rq->cmd_flags & REQ_SOFTBARRIER) {
695 /* barriers are scheduling boundary, update end_sector */ 695 /* barriers are scheduling boundary, update end_sector */
696 if (rq->cmd_type == REQ_TYPE_FS || 696 if (rq->cmd_type == REQ_TYPE_FS ||
697 (rq->cmd_flags & REQ_DISCARD)) { 697 (rq->cmd_flags & REQ_DISCARD)) {
diff --git a/block/ioctl.c b/block/ioctl.c
index d724ceb1d465..3d866d0037f2 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -125,7 +125,7 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
125 start >>= 9; 125 start >>= 9;
126 len >>= 9; 126 len >>= 9;
127 127
128 if (start + len > (bdev->bd_inode->i_size >> 9)) 128 if (start + len > (i_size_read(bdev->bd_inode) >> 9))
129 return -EINVAL; 129 return -EINVAL;
130 if (secure) 130 if (secure)
131 flags |= BLKDEV_DISCARD_SECURE; 131 flags |= BLKDEV_DISCARD_SECURE;
@@ -242,6 +242,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
242 * We need to set the startsect first, the driver may 242 * We need to set the startsect first, the driver may
243 * want to override it. 243 * want to override it.
244 */ 244 */
245 memset(&geo, 0, sizeof(geo));
245 geo.start = get_start_sect(bdev); 246 geo.start = get_start_sect(bdev);
246 ret = disk->fops->getgeo(bdev, &geo); 247 ret = disk->fops->getgeo(bdev, &geo);
247 if (ret) 248 if (ret)
@@ -307,12 +308,12 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
307 ret = blkdev_reread_part(bdev); 308 ret = blkdev_reread_part(bdev);
308 break; 309 break;
309 case BLKGETSIZE: 310 case BLKGETSIZE:
310 size = bdev->bd_inode->i_size; 311 size = i_size_read(bdev->bd_inode);
311 if ((size >> 9) > ~0UL) 312 if ((size >> 9) > ~0UL)
312 return -EFBIG; 313 return -EFBIG;
313 return put_ulong(arg, size >> 9); 314 return put_ulong(arg, size >> 9);
314 case BLKGETSIZE64: 315 case BLKGETSIZE64:
315 return put_u64(arg, bdev->bd_inode->i_size); 316 return put_u64(arg, i_size_read(bdev->bd_inode));
316 case BLKTRACESTART: 317 case BLKTRACESTART:
317 case BLKTRACESTOP: 318 case BLKTRACESTOP:
318 case BLKTRACESETUP: 319 case BLKTRACESETUP:
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index a8b5a10eb5b0..4f4230b79bb6 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -321,33 +321,47 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
321 if (hdr->iovec_count) { 321 if (hdr->iovec_count) {
322 const int size = sizeof(struct sg_iovec) * hdr->iovec_count; 322 const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
323 size_t iov_data_len; 323 size_t iov_data_len;
324 struct sg_iovec *iov; 324 struct sg_iovec *sg_iov;
325 struct iovec *iov;
326 int i;
325 327
326 iov = kmalloc(size, GFP_KERNEL); 328 sg_iov = kmalloc(size, GFP_KERNEL);
327 if (!iov) { 329 if (!sg_iov) {
328 ret = -ENOMEM; 330 ret = -ENOMEM;
329 goto out; 331 goto out;
330 } 332 }
331 333
332 if (copy_from_user(iov, hdr->dxferp, size)) { 334 if (copy_from_user(sg_iov, hdr->dxferp, size)) {
333 kfree(iov); 335 kfree(sg_iov);
334 ret = -EFAULT; 336 ret = -EFAULT;
335 goto out; 337 goto out;
336 } 338 }
337 339
340 /*
341 * Sum up the vecs, making sure they don't overflow
342 */
343 iov = (struct iovec *) sg_iov;
344 iov_data_len = 0;
345 for (i = 0; i < hdr->iovec_count; i++) {
346 if (iov_data_len + iov[i].iov_len < iov_data_len) {
347 kfree(sg_iov);
348 ret = -EINVAL;
349 goto out;
350 }
351 iov_data_len += iov[i].iov_len;
352 }
353
338 /* SG_IO howto says that the shorter of the two wins */ 354 /* SG_IO howto says that the shorter of the two wins */
339 iov_data_len = iov_length((struct iovec *)iov,
340 hdr->iovec_count);
341 if (hdr->dxfer_len < iov_data_len) { 355 if (hdr->dxfer_len < iov_data_len) {
342 hdr->iovec_count = iov_shorten((struct iovec *)iov, 356 hdr->iovec_count = iov_shorten(iov,
343 hdr->iovec_count, 357 hdr->iovec_count,
344 hdr->dxfer_len); 358 hdr->dxfer_len);
345 iov_data_len = hdr->dxfer_len; 359 iov_data_len = hdr->dxfer_len;
346 } 360 }
347 361
348 ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count, 362 ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
349 iov_data_len, GFP_KERNEL); 363 iov_data_len, GFP_KERNEL);
350 kfree(iov); 364 kfree(sg_iov);
351 } else if (hdr->dxfer_len) 365 } else if (hdr->dxfer_len)
352 ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len, 366 ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
353 GFP_KERNEL); 367 GFP_KERNEL);
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index de3078215fe6..75586f1f86e7 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -504,7 +504,6 @@ err:
504 504
505static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) 505static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
506{ 506{
507 kobject_put(&pcrypt->pinst->kobj);
508 free_cpumask_var(pcrypt->cb_cpumask->mask); 507 free_cpumask_var(pcrypt->cb_cpumask->mask);
509 kfree(pcrypt->cb_cpumask); 508 kfree(pcrypt->cb_cpumask);
510 509
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index d050e073e570..3f91c01c217f 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2552,8 +2552,11 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2552 * 2552 *
2553 * If door lock fails, always clear sdev->locked to 2553 * If door lock fails, always clear sdev->locked to
2554 * avoid this infinite loop. 2554 * avoid this infinite loop.
2555 *
2556 * This may happen before SCSI scan is complete. Make
2557 * sure qc->dev->sdev isn't NULL before dereferencing.
2555 */ 2558 */
2556 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL) 2559 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev)
2557 qc->dev->sdev->locked = 0; 2560 qc->dev->sdev->locked = 0;
2558 2561
2559 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; 2562 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index eaf194138f21..6bd9425ba5ab 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -142,7 +142,7 @@ static int autospeed; /* Chip present which snoops speed changes */
142static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */ 142static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */
143static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */ 143static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
144 144
145#ifdef PATA_WINBOND_VLB_MODULE 145#ifdef CONFIG_PATA_WINBOND_VLB_MODULE
146static int winbond = 1; /* Set to probe Winbond controllers, 146static int winbond = 1; /* Set to probe Winbond controllers,
147 give I/O port if non standard */ 147 give I/O port if non standard */
148#else 148#else
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 74b829817891..fa1b95a9a7ff 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -653,8 +653,6 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
653 653
654 ap = host->ports[i]; 654 ap = host->ports[i];
655 ocd = ap->dev->platform_data; 655 ocd = ap->dev->platform_data;
656
657 ocd = ap->dev->platform_data;
658 cf_port = ap->private_data; 656 cf_port = ap->private_data;
659 dma_int.u64 = 657 dma_int.u64 =
660 cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine)); 658 cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine));
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 541e18879965..528f6318ded1 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -180,9 +180,6 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
180 BUG(); 180 BUG();
181 bio_endio(bio, -ENXIO); 181 bio_endio(bio, -ENXIO);
182 return 0; 182 return 0;
183 } else if (bio->bi_rw & REQ_HARDBARRIER) {
184 bio_endio(bio, -EOPNOTSUPP);
185 return 0;
186 } else if (bio->bi_io_vec == NULL) { 183 } else if (bio->bi_io_vec == NULL) {
187 printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); 184 printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
188 BUG(); 185 BUG();
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 2cc4dda46279..a67d0a611a8a 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -113,6 +113,8 @@ static struct board_type products[] = {
113 {0x409D0E11, "Smart Array 6400 EM", &SA5_access}, 113 {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
114 {0x40910E11, "Smart Array 6i", &SA5_access}, 114 {0x40910E11, "Smart Array 6i", &SA5_access},
115 {0x3225103C, "Smart Array P600", &SA5_access}, 115 {0x3225103C, "Smart Array P600", &SA5_access},
116 {0x3223103C, "Smart Array P800", &SA5_access},
117 {0x3234103C, "Smart Array P400", &SA5_access},
116 {0x3235103C, "Smart Array P400i", &SA5_access}, 118 {0x3235103C, "Smart Array P400i", &SA5_access},
117 {0x3211103C, "Smart Array E200i", &SA5_access}, 119 {0x3211103C, "Smart Array E200i", &SA5_access},
118 {0x3212103C, "Smart Array E200", &SA5_access}, 120 {0x3212103C, "Smart Array E200", &SA5_access},
@@ -3753,7 +3755,7 @@ static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h)
3753 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 3755 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3754 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) 3756 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
3755 break; 3757 break;
3756 msleep(10); 3758 usleep_range(10000, 20000);
3757 } 3759 }
3758} 3760}
3759 3761
@@ -3937,10 +3939,9 @@ static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
3937 *board_id = ((subsystem_device_id << 16) & 0xffff0000) | 3939 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
3938 subsystem_vendor_id; 3940 subsystem_vendor_id;
3939 3941
3940 for (i = 0; i < ARRAY_SIZE(products); i++) { 3942 for (i = 0; i < ARRAY_SIZE(products); i++)
3941 if (*board_id == products[i].board_id) 3943 if (*board_id == products[i].board_id)
3942 return i; 3944 return i;
3943 }
3944 dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n", 3945 dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
3945 *board_id); 3946 *board_id);
3946 return -ENODEV; 3947 return -ENODEV;
@@ -3971,18 +3972,31 @@ static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
3971 return -ENODEV; 3972 return -ENODEV;
3972} 3973}
3973 3974
3974static int __devinit cciss_wait_for_board_ready(ctlr_info_t *h) 3975static int __devinit cciss_wait_for_board_state(struct pci_dev *pdev,
3976 void __iomem *vaddr, int wait_for_ready)
3977#define BOARD_READY 1
3978#define BOARD_NOT_READY 0
3975{ 3979{
3976 int i; 3980 int i, iterations;
3977 u32 scratchpad; 3981 u32 scratchpad;
3978 3982
3979 for (i = 0; i < CCISS_BOARD_READY_ITERATIONS; i++) { 3983 if (wait_for_ready)
3980 scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 3984 iterations = CCISS_BOARD_READY_ITERATIONS;
3981 if (scratchpad == CCISS_FIRMWARE_READY) 3985 else
3982 return 0; 3986 iterations = CCISS_BOARD_NOT_READY_ITERATIONS;
3987
3988 for (i = 0; i < iterations; i++) {
3989 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
3990 if (wait_for_ready) {
3991 if (scratchpad == CCISS_FIRMWARE_READY)
3992 return 0;
3993 } else {
3994 if (scratchpad != CCISS_FIRMWARE_READY)
3995 return 0;
3996 }
3983 msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS); 3997 msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS);
3984 } 3998 }
3985 dev_warn(&h->pdev->dev, "board not ready, timed out.\n"); 3999 dev_warn(&pdev->dev, "board not ready, timed out.\n");
3986 return -ENODEV; 4000 return -ENODEV;
3987} 4001}
3988 4002
@@ -4031,6 +4045,11 @@ static int __devinit cciss_find_cfgtables(ctlr_info_t *h)
4031static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h) 4045static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
4032{ 4046{
4033 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 4047 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
4048
4049 /* Limit commands in memory limited kdump scenario. */
4050 if (reset_devices && h->max_commands > 32)
4051 h->max_commands = 32;
4052
4034 if (h->max_commands < 16) { 4053 if (h->max_commands < 16) {
4035 dev_warn(&h->pdev->dev, "Controller reports " 4054 dev_warn(&h->pdev->dev, "Controller reports "
4036 "max supported commands of %d, an obvious lie. " 4055 "max supported commands of %d, an obvious lie. "
@@ -4148,7 +4167,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
4148 err = -ENOMEM; 4167 err = -ENOMEM;
4149 goto err_out_free_res; 4168 goto err_out_free_res;
4150 } 4169 }
4151 err = cciss_wait_for_board_ready(h); 4170 err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
4152 if (err) 4171 if (err)
4153 goto err_out_free_res; 4172 goto err_out_free_res;
4154 err = cciss_find_cfgtables(h); 4173 err = cciss_find_cfgtables(h);
@@ -4313,36 +4332,6 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
4313#define cciss_soft_reset_controller(p) cciss_message(p, 1, 0) 4332#define cciss_soft_reset_controller(p) cciss_message(p, 1, 0)
4314#define cciss_noop(p) cciss_message(p, 3, 0) 4333#define cciss_noop(p) cciss_message(p, 3, 0)
4315 4334
4316static __devinit int cciss_reset_msi(struct pci_dev *pdev)
4317{
4318/* the #defines are stolen from drivers/pci/msi.h. */
4319#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
4320#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
4321
4322 int pos;
4323 u16 control = 0;
4324
4325 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
4326 if (pos) {
4327 pci_read_config_word(pdev, msi_control_reg(pos), &control);
4328 if (control & PCI_MSI_FLAGS_ENABLE) {
4329 dev_info(&pdev->dev, "resetting MSI\n");
4330 pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE);
4331 }
4332 }
4333
4334 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
4335 if (pos) {
4336 pci_read_config_word(pdev, msi_control_reg(pos), &control);
4337 if (control & PCI_MSIX_FLAGS_ENABLE) {
4338 dev_info(&pdev->dev, "resetting MSI-X\n");
4339 pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE);
4340 }
4341 }
4342
4343 return 0;
4344}
4345
4346static int cciss_controller_hard_reset(struct pci_dev *pdev, 4335static int cciss_controller_hard_reset(struct pci_dev *pdev,
4347 void * __iomem vaddr, bool use_doorbell) 4336 void * __iomem vaddr, bool use_doorbell)
4348{ 4337{
@@ -4397,17 +4386,17 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev,
4397 * states or using the doorbell register. */ 4386 * states or using the doorbell register. */
4398static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) 4387static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4399{ 4388{
4400 u16 saved_config_space[32];
4401 u64 cfg_offset; 4389 u64 cfg_offset;
4402 u32 cfg_base_addr; 4390 u32 cfg_base_addr;
4403 u64 cfg_base_addr_index; 4391 u64 cfg_base_addr_index;
4404 void __iomem *vaddr; 4392 void __iomem *vaddr;
4405 unsigned long paddr; 4393 unsigned long paddr;
4406 u32 misc_fw_support, active_transport; 4394 u32 misc_fw_support, active_transport;
4407 int rc, i; 4395 int rc;
4408 CfgTable_struct __iomem *cfgtable; 4396 CfgTable_struct __iomem *cfgtable;
4409 bool use_doorbell; 4397 bool use_doorbell;
4410 u32 board_id; 4398 u32 board_id;
4399 u16 command_register;
4411 4400
4412 /* For controllers as old a the p600, this is very nearly 4401 /* For controllers as old a the p600, this is very nearly
4413 * the same thing as 4402 * the same thing as
@@ -4417,14 +4406,6 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4417 * pci_set_power_state(pci_dev, PCI_D0); 4406 * pci_set_power_state(pci_dev, PCI_D0);
4418 * pci_restore_state(pci_dev); 4407 * pci_restore_state(pci_dev);
4419 * 4408 *
4420 * but we can't use these nice canned kernel routines on
4421 * kexec, because they also check the MSI/MSI-X state in PCI
4422 * configuration space and do the wrong thing when it is
4423 * set/cleared. Also, the pci_save/restore_state functions
4424 * violate the ordering requirements for restoring the
4425 * configuration space from the CCISS document (see the
4426 * comment below). So we roll our own ....
4427 *
4428 * For controllers newer than the P600, the pci power state 4409 * For controllers newer than the P600, the pci power state
4429 * method of resetting doesn't work so we have another way 4410 * method of resetting doesn't work so we have another way
4430 * using the doorbell register. 4411 * using the doorbell register.
@@ -4443,8 +4424,13 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4443 return -ENODEV; 4424 return -ENODEV;
4444 } 4425 }
4445 4426
4446 for (i = 0; i < 32; i++) 4427 /* Save the PCI command register */
4447 pci_read_config_word(pdev, 2*i, &saved_config_space[i]); 4428 pci_read_config_word(pdev, 4, &command_register);
4429 /* Turn the board off. This is so that later pci_restore_state()
4430 * won't turn the board on before the rest of config space is ready.
4431 */
4432 pci_disable_device(pdev);
4433 pci_save_state(pdev);
4448 4434
4449 /* find the first memory BAR, so we can find the cfg table */ 4435 /* find the first memory BAR, so we can find the cfg table */
4450 rc = cciss_pci_find_memory_BAR(pdev, &paddr); 4436 rc = cciss_pci_find_memory_BAR(pdev, &paddr);
@@ -4479,26 +4465,32 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
4479 rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); 4465 rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
4480 if (rc) 4466 if (rc)
4481 goto unmap_cfgtable; 4467 goto unmap_cfgtable;
4482 4468 pci_restore_state(pdev);
4483 /* Restore the PCI configuration space. The Open CISS 4469 rc = pci_enable_device(pdev);
4484 * Specification says, "Restore the PCI Configuration 4470 if (rc) {
4485 * Registers, offsets 00h through 60h. It is important to 4471 dev_warn(&pdev->dev, "failed to enable device.\n");
4486 * restore the command register, 16-bits at offset 04h, 4472 goto unmap_cfgtable;
4487 * last. Do not restore the configuration status register,
4488 * 16-bits at offset 06h." Note that the offset is 2*i.
4489 */
4490 for (i = 0; i < 32; i++) {
4491 if (i == 2 || i == 3)
4492 continue;
4493 pci_write_config_word(pdev, 2*i, saved_config_space[i]);
4494 } 4473 }
4495 wmb(); 4474 pci_write_config_word(pdev, 4, command_register);
4496 pci_write_config_word(pdev, 4, saved_config_space[2]);
4497 4475
4498 /* Some devices (notably the HP Smart Array 5i Controller) 4476 /* Some devices (notably the HP Smart Array 5i Controller)
4499 need a little pause here */ 4477 need a little pause here */
4500 msleep(CCISS_POST_RESET_PAUSE_MSECS); 4478 msleep(CCISS_POST_RESET_PAUSE_MSECS);
4501 4479
4480 /* Wait for board to become not ready, then ready. */
4481 dev_info(&pdev->dev, "Waiting for board to become ready.\n");
4482 rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
4483 if (rc) /* Don't bail, might be E500, etc. which can't be reset */
4484 dev_warn(&pdev->dev,
4485 "failed waiting for board to become not ready\n");
4486 rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY);
4487 if (rc) {
4488 dev_warn(&pdev->dev,
4489 "failed waiting for board to become ready\n");
4490 goto unmap_cfgtable;
4491 }
4492 dev_info(&pdev->dev, "board ready.\n");
4493
4502 /* Controller should be in simple mode at this point. If it's not, 4494 /* Controller should be in simple mode at this point. If it's not,
4503 * It means we're on one of those controllers which doesn't support 4495 * It means we're on one of those controllers which doesn't support
4504 * the doorbell reset method and on which the PCI power management reset 4496 * the doorbell reset method and on which the PCI power management reset
@@ -4539,8 +4531,6 @@ static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
4539 return 0; /* just try to do the kdump anyhow. */ 4531 return 0; /* just try to do the kdump anyhow. */
4540 if (rc) 4532 if (rc)
4541 return -ENODEV; 4533 return -ENODEV;
4542 if (cciss_reset_msi(pdev))
4543 return -ENODEV;
4544 4534
4545 /* Now try to get the controller to respond to a no-op */ 4535 /* Now try to get the controller to respond to a no-op */
4546 for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) { 4536 for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) {
@@ -4936,7 +4926,8 @@ static void __exit cciss_cleanup(void)
4936 } 4926 }
4937 } 4927 }
4938 kthread_stop(cciss_scan_thread); 4928 kthread_stop(cciss_scan_thread);
4939 remove_proc_entry("driver/cciss", NULL); 4929 if (proc_cciss)
4930 remove_proc_entry("driver/cciss", NULL);
4940 bus_unregister(&cciss_bus_type); 4931 bus_unregister(&cciss_bus_type);
4941} 4932}
4942 4933
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index ae340ffc8f81..4b8933d778f1 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -200,10 +200,14 @@ struct ctlr_info
200 * the above. 200 * the above.
201 */ 201 */
202#define CCISS_BOARD_READY_WAIT_SECS (120) 202#define CCISS_BOARD_READY_WAIT_SECS (120)
203#define CCISS_BOARD_NOT_READY_WAIT_SECS (10)
203#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100) 204#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100)
204#define CCISS_BOARD_READY_ITERATIONS \ 205#define CCISS_BOARD_READY_ITERATIONS \
205 ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \ 206 ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \
206 CCISS_BOARD_READY_POLL_INTERVAL_MSECS) 207 CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
208#define CCISS_BOARD_NOT_READY_ITERATIONS \
209 ((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \
210 CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
207#define CCISS_POST_RESET_PAUSE_MSECS (3000) 211#define CCISS_POST_RESET_PAUSE_MSECS (3000)
208#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000) 212#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000)
209#define CCISS_POST_RESET_NOOP_RETRIES (12) 213#define CCISS_POST_RESET_NOOP_RETRIES (12)
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index ac04ef97eac2..ba95cba192be 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -78,11 +78,10 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
78 init_completion(&md_io.event); 78 init_completion(&md_io.event);
79 md_io.error = 0; 79 md_io.error = 0;
80 80
81 if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags)) 81 if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
82 rw |= REQ_HARDBARRIER; 82 rw |= REQ_FUA;
83 rw |= REQ_UNPLUG | REQ_SYNC; 83 rw |= REQ_UNPLUG | REQ_SYNC;
84 84
85 retry:
86 bio = bio_alloc(GFP_NOIO, 1); 85 bio = bio_alloc(GFP_NOIO, 1);
87 bio->bi_bdev = bdev->md_bdev; 86 bio->bi_bdev = bdev->md_bdev;
88 bio->bi_sector = sector; 87 bio->bi_sector = sector;
@@ -100,17 +99,6 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
100 wait_for_completion(&md_io.event); 99 wait_for_completion(&md_io.event);
101 ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0; 100 ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;
102 101
103 /* check for unsupported barrier op.
104 * would rather check on EOPNOTSUPP, but that is not reliable.
105 * don't try again for ANY return value != 0 */
106 if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) {
107 /* Try again with no barrier */
108 dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
109 set_bit(MD_NO_BARRIER, &mdev->flags);
110 rw &= ~REQ_HARDBARRIER;
111 bio_put(bio);
112 goto retry;
113 }
114 out: 102 out:
115 bio_put(bio); 103 bio_put(bio);
116 return ok; 104 return ok;
@@ -284,18 +272,32 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
284 u32 xor_sum = 0; 272 u32 xor_sum = 0;
285 273
286 if (!get_ldev(mdev)) { 274 if (!get_ldev(mdev)) {
287 dev_err(DEV, "get_ldev() failed in w_al_write_transaction\n"); 275 dev_err(DEV,
276 "disk is %s, cannot start al transaction (-%d +%d)\n",
277 drbd_disk_str(mdev->state.disk), evicted, new_enr);
288 complete(&((struct update_al_work *)w)->event); 278 complete(&((struct update_al_work *)w)->event);
289 return 1; 279 return 1;
290 } 280 }
291 /* do we have to do a bitmap write, first? 281 /* do we have to do a bitmap write, first?
292 * TODO reduce maximum latency: 282 * TODO reduce maximum latency:
293 * submit both bios, then wait for both, 283 * submit both bios, then wait for both,
294 * instead of doing two synchronous sector writes. */ 284 * instead of doing two synchronous sector writes.
285 * For now, we must not write the transaction,
286 * if we cannot write out the bitmap of the evicted extent. */
295 if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE) 287 if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE)
296 drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT); 288 drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT);
297 289
298 mutex_lock(&mdev->md_io_mutex); /* protects md_io_page, al_tr_cycle, ... */ 290 /* The bitmap write may have failed, causing a state change. */
291 if (mdev->state.disk < D_INCONSISTENT) {
292 dev_err(DEV,
293 "disk is %s, cannot write al transaction (-%d +%d)\n",
294 drbd_disk_str(mdev->state.disk), evicted, new_enr);
295 complete(&((struct update_al_work *)w)->event);
296 put_ldev(mdev);
297 return 1;
298 }
299
300 mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */
299 buffer = (struct al_transaction *)page_address(mdev->md_io_page); 301 buffer = (struct al_transaction *)page_address(mdev->md_io_page);
300 302
301 buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC); 303 buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
@@ -739,7 +741,7 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev)
739 unsigned int enr; 741 unsigned int enr;
740 unsigned long add = 0; 742 unsigned long add = 0;
741 char ppb[10]; 743 char ppb[10];
742 int i; 744 int i, tmp;
743 745
744 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 746 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
745 747
@@ -747,7 +749,9 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev)
747 enr = lc_element_by_index(mdev->act_log, i)->lc_number; 749 enr = lc_element_by_index(mdev->act_log, i)->lc_number;
748 if (enr == LC_FREE) 750 if (enr == LC_FREE)
749 continue; 751 continue;
750 add += drbd_bm_ALe_set_all(mdev, enr); 752 tmp = drbd_bm_ALe_set_all(mdev, enr);
753 dynamic_dev_dbg(DEV, "AL: set %d bits in extent %u\n", tmp, enr);
754 add += tmp;
751 } 755 }
752 756
753 lc_unlock(mdev->act_log); 757 lc_unlock(mdev->act_log);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 9bdcf4393c0a..1ea1a34e78b2 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -114,11 +114,11 @@ struct drbd_conf;
114#define D_ASSERT(exp) if (!(exp)) \ 114#define D_ASSERT(exp) if (!(exp)) \
115 dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__) 115 dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
116 116
117#define ERR_IF(exp) if (({ \ 117#define ERR_IF(exp) if (({ \
118 int _b = (exp) != 0; \ 118 int _b = (exp) != 0; \
119 if (_b) dev_err(DEV, "%s: (%s) in %s:%d\n", \ 119 if (_b) dev_err(DEV, "ASSERT FAILED: %s: (%s) in %s:%d\n", \
120 __func__, #exp, __FILE__, __LINE__); \ 120 __func__, #exp, __FILE__, __LINE__); \
121 _b; \ 121 _b; \
122 })) 122 }))
123 123
124/* Defines to control fault insertion */ 124/* Defines to control fault insertion */
@@ -749,17 +749,12 @@ struct drbd_epoch {
749 749
750/* drbd_epoch flag bits */ 750/* drbd_epoch flag bits */
751enum { 751enum {
752 DE_BARRIER_IN_NEXT_EPOCH_ISSUED,
753 DE_BARRIER_IN_NEXT_EPOCH_DONE,
754 DE_CONTAINS_A_BARRIER,
755 DE_HAVE_BARRIER_NUMBER, 752 DE_HAVE_BARRIER_NUMBER,
756 DE_IS_FINISHING,
757}; 753};
758 754
759enum epoch_event { 755enum epoch_event {
760 EV_PUT, 756 EV_PUT,
761 EV_GOT_BARRIER_NR, 757 EV_GOT_BARRIER_NR,
762 EV_BARRIER_DONE,
763 EV_BECAME_LAST, 758 EV_BECAME_LAST,
764 EV_CLEANUP = 32, /* used as flag */ 759 EV_CLEANUP = 32, /* used as flag */
765}; 760};
@@ -801,11 +796,6 @@ enum {
801 __EE_CALL_AL_COMPLETE_IO, 796 __EE_CALL_AL_COMPLETE_IO,
802 __EE_MAY_SET_IN_SYNC, 797 __EE_MAY_SET_IN_SYNC,
803 798
804 /* This epoch entry closes an epoch using a barrier.
805 * On sucessful completion, the epoch is released,
806 * and the P_BARRIER_ACK send. */
807 __EE_IS_BARRIER,
808
809 /* In case a barrier failed, 799 /* In case a barrier failed,
810 * we need to resubmit without the barrier flag. */ 800 * we need to resubmit without the barrier flag. */
811 __EE_RESUBMITTED, 801 __EE_RESUBMITTED,
@@ -820,7 +810,6 @@ enum {
820}; 810};
821#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) 811#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
822#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) 812#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
823#define EE_IS_BARRIER (1<<__EE_IS_BARRIER)
824#define EE_RESUBMITTED (1<<__EE_RESUBMITTED) 813#define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
825#define EE_WAS_ERROR (1<<__EE_WAS_ERROR) 814#define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
826#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST) 815#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
@@ -843,16 +832,15 @@ enum {
843 * Gets cleared when the state.conn 832 * Gets cleared when the state.conn
844 * goes into C_CONNECTED state. */ 833 * goes into C_CONNECTED state. */
845 WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */ 834 WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */
846 NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */
847 CONSIDER_RESYNC, 835 CONSIDER_RESYNC,
848 836
849 MD_NO_BARRIER, /* meta data device does not support barriers, 837 MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */
850 so don't even try */
851 SUSPEND_IO, /* suspend application io */ 838 SUSPEND_IO, /* suspend application io */
852 BITMAP_IO, /* suspend application io; 839 BITMAP_IO, /* suspend application io;
853 once no more io in flight, start bitmap io */ 840 once no more io in flight, start bitmap io */
854 BITMAP_IO_QUEUED, /* Started bitmap IO */ 841 BITMAP_IO_QUEUED, /* Started bitmap IO */
855 GO_DISKLESS, /* Disk failed, local_cnt reached zero, we are going diskless */ 842 GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */
843 WAS_IO_ERROR, /* Local disk failed returned IO error */
856 RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ 844 RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
857 NET_CONGESTED, /* The data socket is congested */ 845 NET_CONGESTED, /* The data socket is congested */
858 846
@@ -947,7 +935,6 @@ enum write_ordering_e {
947 WO_none, 935 WO_none,
948 WO_drain_io, 936 WO_drain_io,
949 WO_bdev_flush, 937 WO_bdev_flush,
950 WO_bio_barrier
951}; 938};
952 939
953struct fifo_buffer { 940struct fifo_buffer {
@@ -1281,6 +1268,7 @@ extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
1281extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev); 1268extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
1282extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why); 1269extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why);
1283extern void drbd_go_diskless(struct drbd_conf *mdev); 1270extern void drbd_go_diskless(struct drbd_conf *mdev);
1271extern void drbd_ldev_destroy(struct drbd_conf *mdev);
1284 1272
1285 1273
1286/* Meta data layout 1274/* Meta data layout
@@ -1798,17 +1786,17 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
1798 case EP_PASS_ON: 1786 case EP_PASS_ON:
1799 if (!forcedetach) { 1787 if (!forcedetach) {
1800 if (__ratelimit(&drbd_ratelimit_state)) 1788 if (__ratelimit(&drbd_ratelimit_state))
1801 dev_err(DEV, "Local IO failed in %s." 1789 dev_err(DEV, "Local IO failed in %s.\n", where);
1802 "Passing error on...\n", where);
1803 break; 1790 break;
1804 } 1791 }
1805 /* NOTE fall through to detach case if forcedetach set */ 1792 /* NOTE fall through to detach case if forcedetach set */
1806 case EP_DETACH: 1793 case EP_DETACH:
1807 case EP_CALL_HELPER: 1794 case EP_CALL_HELPER:
1795 set_bit(WAS_IO_ERROR, &mdev->flags);
1808 if (mdev->state.disk > D_FAILED) { 1796 if (mdev->state.disk > D_FAILED) {
1809 _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL); 1797 _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
1810 dev_err(DEV, "Local IO failed in %s." 1798 dev_err(DEV,
1811 "Detaching...\n", where); 1799 "Local IO failed in %s. Detaching...\n", where);
1812 } 1800 }
1813 break; 1801 break;
1814 } 1802 }
@@ -1874,7 +1862,7 @@ static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1874static inline sector_t drbd_get_capacity(struct block_device *bdev) 1862static inline sector_t drbd_get_capacity(struct block_device *bdev)
1875{ 1863{
1876 /* return bdev ? get_capacity(bdev->bd_disk) : 0; */ 1864 /* return bdev ? get_capacity(bdev->bd_disk) : 0; */
1877 return bdev ? bdev->bd_inode->i_size >> 9 : 0; 1865 return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
1878} 1866}
1879 1867
1880/** 1868/**
@@ -2127,7 +2115,11 @@ static inline void put_ldev(struct drbd_conf *mdev)
2127 __release(local); 2115 __release(local);
2128 D_ASSERT(i >= 0); 2116 D_ASSERT(i >= 0);
2129 if (i == 0) { 2117 if (i == 0) {
2118 if (mdev->state.disk == D_DISKLESS)
2119 /* even internal references gone, safe to destroy */
2120 drbd_ldev_destroy(mdev);
2130 if (mdev->state.disk == D_FAILED) 2121 if (mdev->state.disk == D_FAILED)
2122 /* all application IO references gone. */
2131 drbd_go_diskless(mdev); 2123 drbd_go_diskless(mdev);
2132 wake_up(&mdev->misc_wait); 2124 wake_up(&mdev->misc_wait);
2133 } 2125 }
@@ -2138,6 +2130,10 @@ static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_stat
2138{ 2130{
2139 int io_allowed; 2131 int io_allowed;
2140 2132
2133 /* never get a reference while D_DISKLESS */
2134 if (mdev->state.disk == D_DISKLESS)
2135 return 0;
2136
2141 atomic_inc(&mdev->local_cnt); 2137 atomic_inc(&mdev->local_cnt);
2142 io_allowed = (mdev->state.disk >= mins); 2138 io_allowed = (mdev->state.disk >= mins);
2143 if (!io_allowed) 2139 if (!io_allowed)
@@ -2406,12 +2402,12 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
2406{ 2402{
2407 int r; 2403 int r;
2408 2404
2409 if (test_bit(MD_NO_BARRIER, &mdev->flags)) 2405 if (test_bit(MD_NO_FUA, &mdev->flags))
2410 return; 2406 return;
2411 2407
2412 r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL); 2408 r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL);
2413 if (r) { 2409 if (r) {
2414 set_bit(MD_NO_BARRIER, &mdev->flags); 2410 set_bit(MD_NO_FUA, &mdev->flags);
2415 dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r); 2411 dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
2416 } 2412 }
2417} 2413}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 25c7a73c5062..6be5401d0e88 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -835,6 +835,15 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
835 ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN) 835 ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
836 ns.conn = os.conn; 836 ns.conn = os.conn;
837 837
838 /* we cannot fail (again) if we already detached */
839 if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
840 ns.disk = D_DISKLESS;
841
842 /* if we are only D_ATTACHING yet,
843 * we can (and should) go directly to D_DISKLESS. */
844 if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
845 ns.disk = D_DISKLESS;
846
838 /* After C_DISCONNECTING only C_STANDALONE may follow */ 847 /* After C_DISCONNECTING only C_STANDALONE may follow */
839 if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE) 848 if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
840 ns.conn = os.conn; 849 ns.conn = os.conn;
@@ -1056,7 +1065,15 @@ int __drbd_set_state(struct drbd_conf *mdev,
1056 !test_and_set_bit(CONFIG_PENDING, &mdev->flags)) 1065 !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
1057 set_bit(DEVICE_DYING, &mdev->flags); 1066 set_bit(DEVICE_DYING, &mdev->flags);
1058 1067
1059 mdev->state.i = ns.i; 1068 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1069 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1070 * drbd_ldev_destroy() won't happen before our corresponding
1071 * after_state_ch works run, where we put_ldev again. */
1072 if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
1073 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
1074 atomic_inc(&mdev->local_cnt);
1075
1076 mdev->state = ns;
1060 wake_up(&mdev->misc_wait); 1077 wake_up(&mdev->misc_wait);
1061 wake_up(&mdev->state_wait); 1078 wake_up(&mdev->state_wait);
1062 1079
@@ -1268,7 +1285,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1268 if (test_bit(NEW_CUR_UUID, &mdev->flags)) { 1285 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1269 drbd_uuid_new_current(mdev); 1286 drbd_uuid_new_current(mdev);
1270 clear_bit(NEW_CUR_UUID, &mdev->flags); 1287 clear_bit(NEW_CUR_UUID, &mdev->flags);
1271 drbd_md_sync(mdev);
1272 } 1288 }
1273 spin_lock_irq(&mdev->req_lock); 1289 spin_lock_irq(&mdev->req_lock);
1274 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL); 1290 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
@@ -1365,63 +1381,64 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1365 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT) 1381 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1366 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate"); 1382 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
1367 1383
1368 /* first half of local IO error */ 1384 /* first half of local IO error, failure to attach,
1369 if (os.disk > D_FAILED && ns.disk == D_FAILED) { 1385 * or administrative detach */
1370 enum drbd_io_error_p eh = EP_PASS_ON; 1386 if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1387 enum drbd_io_error_p eh;
1388 int was_io_error;
1389 /* corresponding get_ldev was in __drbd_set_state, to serialize
1390 * our cleanup here with the transition to D_DISKLESS,
1391 * so it is safe to dreference ldev here. */
1392 eh = mdev->ldev->dc.on_io_error;
1393 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1394
1395 /* current state still has to be D_FAILED,
1396 * there is only one way out: to D_DISKLESS,
1397 * and that may only happen after our put_ldev below. */
1398 if (mdev->state.disk != D_FAILED)
1399 dev_err(DEV,
1400 "ASSERT FAILED: disk is %s during detach\n",
1401 drbd_disk_str(mdev->state.disk));
1371 1402
1372 if (drbd_send_state(mdev)) 1403 if (drbd_send_state(mdev))
1373 dev_warn(DEV, "Notified peer that my disk is broken.\n"); 1404 dev_warn(DEV, "Notified peer that I am detaching my disk\n");
1374 else 1405 else
1375 dev_err(DEV, "Sending state for drbd_io_error() failed\n"); 1406 dev_err(DEV, "Sending state for detaching disk failed\n");
1376 1407
1377 drbd_rs_cancel_all(mdev); 1408 drbd_rs_cancel_all(mdev);
1378 1409
1379 if (get_ldev_if_state(mdev, D_FAILED)) { 1410 /* In case we want to get something to stable storage still,
1380 eh = mdev->ldev->dc.on_io_error; 1411 * this may be the last chance.
1381 put_ldev(mdev); 1412 * Following put_ldev may transition to D_DISKLESS. */
1382 } 1413 drbd_md_sync(mdev);
1383 if (eh == EP_CALL_HELPER) 1414 put_ldev(mdev);
1415
1416 if (was_io_error && eh == EP_CALL_HELPER)
1384 drbd_khelper(mdev, "local-io-error"); 1417 drbd_khelper(mdev, "local-io-error");
1385 } 1418 }
1386 1419
1420 /* second half of local IO error, failure to attach,
1421 * or administrative detach,
1422 * after local_cnt references have reached zero again */
1423 if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1424 /* We must still be diskless,
1425 * re-attach has to be serialized with this! */
1426 if (mdev->state.disk != D_DISKLESS)
1427 dev_err(DEV,
1428 "ASSERT FAILED: disk is %s while going diskless\n",
1429 drbd_disk_str(mdev->state.disk));
1387 1430
1388 /* second half of local IO error handling, 1431 mdev->rs_total = 0;
1389 * after local_cnt references have reached zero: */ 1432 mdev->rs_failed = 0;
1390 if (os.disk == D_FAILED && ns.disk == D_DISKLESS) { 1433 atomic_set(&mdev->rs_pending_cnt, 0);
1391 mdev->rs_total = 0;
1392 mdev->rs_failed = 0;
1393 atomic_set(&mdev->rs_pending_cnt, 0);
1394 }
1395
1396 if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) {
1397 /* We must still be diskless,
1398 * re-attach has to be serialized with this! */
1399 if (mdev->state.disk != D_DISKLESS)
1400 dev_err(DEV,
1401 "ASSERT FAILED: disk is %s while going diskless\n",
1402 drbd_disk_str(mdev->state.disk));
1403 1434
1404 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state
1405 * will inc/dec it frequently. Since we became D_DISKLESS, no
1406 * one has touched the protected members anymore, though, so we
1407 * are safe to free them here. */
1408 if (drbd_send_state(mdev)) 1435 if (drbd_send_state(mdev))
1409 dev_warn(DEV, "Notified peer that I detached my disk.\n"); 1436 dev_warn(DEV, "Notified peer that I'm now diskless.\n");
1410 else 1437 else
1411 dev_err(DEV, "Sending state for detach failed\n"); 1438 dev_err(DEV, "Sending state for being diskless failed\n");
1412 1439 /* corresponding get_ldev in __drbd_set_state
1413 lc_destroy(mdev->resync); 1440 * this may finaly trigger drbd_ldev_destroy. */
1414 mdev->resync = NULL; 1441 put_ldev(mdev);
1415 lc_destroy(mdev->act_log);
1416 mdev->act_log = NULL;
1417 __no_warn(local,
1418 drbd_free_bc(mdev->ldev);
1419 mdev->ldev = NULL;);
1420
1421 if (mdev->md_io_tmpp) {
1422 __free_page(mdev->md_io_tmpp);
1423 mdev->md_io_tmpp = NULL;
1424 }
1425 } 1442 }
1426 1443
1427 /* Disks got bigger while they were detached */ 1444 /* Disks got bigger while they were detached */
@@ -2772,11 +2789,6 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
2772 2789
2773 drbd_set_defaults(mdev); 2790 drbd_set_defaults(mdev);
2774 2791
2775 /* for now, we do NOT yet support it,
2776 * even though we start some framework
2777 * to eventually support barriers */
2778 set_bit(NO_BARRIER_SUPP, &mdev->flags);
2779
2780 atomic_set(&mdev->ap_bio_cnt, 0); 2792 atomic_set(&mdev->ap_bio_cnt, 0);
2781 atomic_set(&mdev->ap_pending_cnt, 0); 2793 atomic_set(&mdev->ap_pending_cnt, 0);
2782 atomic_set(&mdev->rs_pending_cnt, 0); 2794 atomic_set(&mdev->rs_pending_cnt, 0);
@@ -2842,7 +2854,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
2842 drbd_thread_init(mdev, &mdev->asender, drbd_asender); 2854 drbd_thread_init(mdev, &mdev->asender, drbd_asender);
2843 2855
2844 mdev->agreed_pro_version = PRO_VERSION_MAX; 2856 mdev->agreed_pro_version = PRO_VERSION_MAX;
2845 mdev->write_ordering = WO_bio_barrier; 2857 mdev->write_ordering = WO_bdev_flush;
2846 mdev->resync_wenr = LC_FREE; 2858 mdev->resync_wenr = LC_FREE;
2847} 2859}
2848 2860
@@ -2899,7 +2911,6 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
2899 D_ASSERT(list_empty(&mdev->resync_work.list)); 2911 D_ASSERT(list_empty(&mdev->resync_work.list));
2900 D_ASSERT(list_empty(&mdev->unplug_work.list)); 2912 D_ASSERT(list_empty(&mdev->unplug_work.list));
2901 D_ASSERT(list_empty(&mdev->go_diskless.list)); 2913 D_ASSERT(list_empty(&mdev->go_diskless.list));
2902
2903} 2914}
2904 2915
2905 2916
@@ -3660,6 +3671,8 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3660 3671
3661 get_random_bytes(&val, sizeof(u64)); 3672 get_random_bytes(&val, sizeof(u64));
3662 _drbd_uuid_set(mdev, UI_CURRENT, val); 3673 _drbd_uuid_set(mdev, UI_CURRENT, val);
3674 /* get it to stable storage _now_ */
3675 drbd_md_sync(mdev);
3663} 3676}
3664 3677
3665void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local) 3678void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
@@ -3756,19 +3769,31 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3756 return 1; 3769 return 1;
3757} 3770}
3758 3771
3772void drbd_ldev_destroy(struct drbd_conf *mdev)
3773{
3774 lc_destroy(mdev->resync);
3775 mdev->resync = NULL;
3776 lc_destroy(mdev->act_log);
3777 mdev->act_log = NULL;
3778 __no_warn(local,
3779 drbd_free_bc(mdev->ldev);
3780 mdev->ldev = NULL;);
3781
3782 if (mdev->md_io_tmpp) {
3783 __free_page(mdev->md_io_tmpp);
3784 mdev->md_io_tmpp = NULL;
3785 }
3786 clear_bit(GO_DISKLESS, &mdev->flags);
3787}
3788
3759static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused) 3789static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3760{ 3790{
3761 D_ASSERT(mdev->state.disk == D_FAILED); 3791 D_ASSERT(mdev->state.disk == D_FAILED);
3762 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will 3792 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3763 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch 3793 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
3764 * the protected members anymore, though, so in the after_state_ch work 3794 * the protected members anymore, though, so once put_ldev reaches zero
3765 * it will be safe to free them. */ 3795 * again, it will be safe to free them. */
3766 drbd_force_state(mdev, NS(disk, D_DISKLESS)); 3796 drbd_force_state(mdev, NS(disk, D_DISKLESS));
3767 /* We need to wait for return of references checked out while we still
3768 * have been D_FAILED, though (drbd_md_sync, bitmap io). */
3769 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
3770
3771 clear_bit(GO_DISKLESS, &mdev->flags);
3772 return 1; 3797 return 1;
3773} 3798}
3774 3799
@@ -3777,9 +3802,6 @@ void drbd_go_diskless(struct drbd_conf *mdev)
3777 D_ASSERT(mdev->state.disk == D_FAILED); 3802 D_ASSERT(mdev->state.disk == D_FAILED);
3778 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags)) 3803 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
3779 drbd_queue_work(&mdev->data.work, &mdev->go_diskless); 3804 drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
3780 /* don't drbd_queue_work_front,
3781 * we need to serialize with the after_state_ch work
3782 * of the -> D_FAILED transition. */
3783} 3805}
3784 3806
3785/** 3807/**
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 87925e97e613..29e5c70e4e26 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -870,6 +870,11 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
870 retcode = ERR_DISK_CONFIGURED; 870 retcode = ERR_DISK_CONFIGURED;
871 goto fail; 871 goto fail;
872 } 872 }
873 /* It may just now have detached because of IO error. Make sure
874 * drbd_ldev_destroy is done already, we may end up here very fast,
875 * e.g. if someone calls attach from the on-io-error handler,
876 * to realize a "hot spare" feature (not that I'd recommend that) */
877 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
873 878
874 /* allocation not in the IO path, cqueue thread context */ 879 /* allocation not in the IO path, cqueue thread context */
875 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); 880 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
@@ -1098,9 +1103,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1098 /* Reset the "barriers don't work" bits here, then force meta data to 1103 /* Reset the "barriers don't work" bits here, then force meta data to
1099 * be written, to ensure we determine if barriers are supported. */ 1104 * be written, to ensure we determine if barriers are supported. */
1100 if (nbc->dc.no_md_flush) 1105 if (nbc->dc.no_md_flush)
1101 set_bit(MD_NO_BARRIER, &mdev->flags); 1106 set_bit(MD_NO_FUA, &mdev->flags);
1102 else 1107 else
1103 clear_bit(MD_NO_BARRIER, &mdev->flags); 1108 clear_bit(MD_NO_FUA, &mdev->flags);
1104 1109
1105 /* Point of no return reached. 1110 /* Point of no return reached.
1106 * Devices and memory are no longer released by error cleanup below. 1111 * Devices and memory are no longer released by error cleanup below.
@@ -1112,8 +1117,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1112 nbc = NULL; 1117 nbc = NULL;
1113 resync_lru = NULL; 1118 resync_lru = NULL;
1114 1119
1115 mdev->write_ordering = WO_bio_barrier; 1120 mdev->write_ordering = WO_bdev_flush;
1116 drbd_bump_write_ordering(mdev, WO_bio_barrier); 1121 drbd_bump_write_ordering(mdev, WO_bdev_flush);
1117 1122
1118 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY)) 1123 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1119 set_bit(CRASHED_PRIMARY, &mdev->flags); 1124 set_bit(CRASHED_PRIMARY, &mdev->flags);
@@ -1262,7 +1267,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1262 force_diskless_dec: 1267 force_diskless_dec:
1263 put_ldev(mdev); 1268 put_ldev(mdev);
1264 force_diskless: 1269 force_diskless:
1265 drbd_force_state(mdev, NS(disk, D_DISKLESS)); 1270 drbd_force_state(mdev, NS(disk, D_FAILED));
1266 drbd_md_sync(mdev); 1271 drbd_md_sync(mdev);
1267 release_bdev2_fail: 1272 release_bdev2_fail:
1268 if (nbc) 1273 if (nbc)
@@ -1285,10 +1290,19 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1285 return 0; 1290 return 0;
1286} 1291}
1287 1292
1293/* Detaching the disk is a process in multiple stages. First we need to lock
1294 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1295 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1296 * internal references as well.
1297 * Only then we have finally detached. */
1288static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1298static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
1289 struct drbd_nl_cfg_reply *reply) 1299 struct drbd_nl_cfg_reply *reply)
1290{ 1300{
1301 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1291 reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS)); 1302 reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS));
1303 if (mdev->state.disk == D_DISKLESS)
1304 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1305 drbd_resume_io(mdev);
1292 return 0; 1306 return 0;
1293} 1307}
1294 1308
@@ -1953,7 +1967,6 @@ static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1953 if (test_bit(NEW_CUR_UUID, &mdev->flags)) { 1967 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1954 drbd_uuid_new_current(mdev); 1968 drbd_uuid_new_current(mdev);
1955 clear_bit(NEW_CUR_UUID, &mdev->flags); 1969 clear_bit(NEW_CUR_UUID, &mdev->flags);
1956 drbd_md_sync(mdev);
1957 } 1970 }
1958 drbd_suspend_io(mdev); 1971 drbd_suspend_io(mdev);
1959 reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0)); 1972 reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index ad325c5d0ce1..7e6ac307e2de 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -158,7 +158,6 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
158 [WO_none] = 'n', 158 [WO_none] = 'n',
159 [WO_drain_io] = 'd', 159 [WO_drain_io] = 'd',
160 [WO_bdev_flush] = 'f', 160 [WO_bdev_flush] = 'f',
161 [WO_bio_barrier] = 'b',
162 }; 161 };
163 162
164 seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n", 163 seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n",
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index efd6169acf2f..d299fe9e78c8 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -49,11 +49,6 @@
49 49
50#include "drbd_vli.h" 50#include "drbd_vli.h"
51 51
52struct flush_work {
53 struct drbd_work w;
54 struct drbd_epoch *epoch;
55};
56
57enum finish_epoch { 52enum finish_epoch {
58 FE_STILL_LIVE, 53 FE_STILL_LIVE,
59 FE_DESTROYED, 54 FE_DESTROYED,
@@ -66,16 +61,6 @@ static int drbd_do_auth(struct drbd_conf *mdev);
66static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event); 61static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
67static int e_end_block(struct drbd_conf *, struct drbd_work *, int); 62static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
68 63
69static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
70{
71 struct drbd_epoch *prev;
72 spin_lock(&mdev->epoch_lock);
73 prev = list_entry(epoch->list.prev, struct drbd_epoch, list);
74 if (prev == epoch || prev == mdev->current_epoch)
75 prev = NULL;
76 spin_unlock(&mdev->epoch_lock);
77 return prev;
78}
79 64
80#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) 65#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
81 66
@@ -981,7 +966,7 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi
981 return TRUE; 966 return TRUE;
982} 967}
983 968
984static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch) 969static void drbd_flush(struct drbd_conf *mdev)
985{ 970{
986 int rv; 971 int rv;
987 972
@@ -997,24 +982,6 @@ static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct d
997 } 982 }
998 put_ldev(mdev); 983 put_ldev(mdev);
999 } 984 }
1000
1001 return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1002}
1003
1004static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1005{
1006 struct flush_work *fw = (struct flush_work *)w;
1007 struct drbd_epoch *epoch = fw->epoch;
1008
1009 kfree(w);
1010
1011 if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags))
1012 drbd_flush_after_epoch(mdev, epoch);
1013
1014 drbd_may_finish_epoch(mdev, epoch, EV_PUT |
1015 (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0));
1016
1017 return 1;
1018} 985}
1019 986
1020/** 987/**
@@ -1027,15 +994,13 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1027 struct drbd_epoch *epoch, 994 struct drbd_epoch *epoch,
1028 enum epoch_event ev) 995 enum epoch_event ev)
1029{ 996{
1030 int finish, epoch_size; 997 int epoch_size;
1031 struct drbd_epoch *next_epoch; 998 struct drbd_epoch *next_epoch;
1032 int schedule_flush = 0;
1033 enum finish_epoch rv = FE_STILL_LIVE; 999 enum finish_epoch rv = FE_STILL_LIVE;
1034 1000
1035 spin_lock(&mdev->epoch_lock); 1001 spin_lock(&mdev->epoch_lock);
1036 do { 1002 do {
1037 next_epoch = NULL; 1003 next_epoch = NULL;
1038 finish = 0;
1039 1004
1040 epoch_size = atomic_read(&epoch->epoch_size); 1005 epoch_size = atomic_read(&epoch->epoch_size);
1041 1006
@@ -1045,16 +1010,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1045 break; 1010 break;
1046 case EV_GOT_BARRIER_NR: 1011 case EV_GOT_BARRIER_NR:
1047 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags); 1012 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1048
1049 /* Special case: If we just switched from WO_bio_barrier to
1050 WO_bdev_flush we should not finish the current epoch */
1051 if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 &&
1052 mdev->write_ordering != WO_bio_barrier &&
1053 epoch == mdev->current_epoch)
1054 clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags);
1055 break;
1056 case EV_BARRIER_DONE:
1057 set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags);
1058 break; 1013 break;
1059 case EV_BECAME_LAST: 1014 case EV_BECAME_LAST:
1060 /* nothing to do*/ 1015 /* nothing to do*/
@@ -1063,23 +1018,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1063 1018
1064 if (epoch_size != 0 && 1019 if (epoch_size != 0 &&
1065 atomic_read(&epoch->active) == 0 && 1020 atomic_read(&epoch->active) == 0 &&
1066 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) && 1021 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
1067 epoch->list.prev == &mdev->current_epoch->list &&
1068 !test_bit(DE_IS_FINISHING, &epoch->flags)) {
1069 /* Nearly all conditions are met to finish that epoch... */
1070 if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ||
1071 mdev->write_ordering == WO_none ||
1072 (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) ||
1073 ev & EV_CLEANUP) {
1074 finish = 1;
1075 set_bit(DE_IS_FINISHING, &epoch->flags);
1076 } else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) &&
1077 mdev->write_ordering == WO_bio_barrier) {
1078 atomic_inc(&epoch->active);
1079 schedule_flush = 1;
1080 }
1081 }
1082 if (finish) {
1083 if (!(ev & EV_CLEANUP)) { 1022 if (!(ev & EV_CLEANUP)) {
1084 spin_unlock(&mdev->epoch_lock); 1023 spin_unlock(&mdev->epoch_lock);
1085 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size); 1024 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
@@ -1102,6 +1041,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1102 /* atomic_set(&epoch->active, 0); is already zero */ 1041 /* atomic_set(&epoch->active, 0); is already zero */
1103 if (rv == FE_STILL_LIVE) 1042 if (rv == FE_STILL_LIVE)
1104 rv = FE_RECYCLED; 1043 rv = FE_RECYCLED;
1044 wake_up(&mdev->ee_wait);
1105 } 1045 }
1106 } 1046 }
1107 1047
@@ -1113,22 +1053,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1113 1053
1114 spin_unlock(&mdev->epoch_lock); 1054 spin_unlock(&mdev->epoch_lock);
1115 1055
1116 if (schedule_flush) {
1117 struct flush_work *fw;
1118 fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
1119 if (fw) {
1120 fw->w.cb = w_flush;
1121 fw->epoch = epoch;
1122 drbd_queue_work(&mdev->data.work, &fw->w);
1123 } else {
1124 dev_warn(DEV, "Could not kmalloc a flush_work obj\n");
1125 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1126 /* That is not a recursion, only one level */
1127 drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1128 drbd_may_finish_epoch(mdev, epoch, EV_PUT);
1129 }
1130 }
1131
1132 return rv; 1056 return rv;
1133} 1057}
1134 1058
@@ -1144,19 +1068,16 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo)
1144 [WO_none] = "none", 1068 [WO_none] = "none",
1145 [WO_drain_io] = "drain", 1069 [WO_drain_io] = "drain",
1146 [WO_bdev_flush] = "flush", 1070 [WO_bdev_flush] = "flush",
1147 [WO_bio_barrier] = "barrier",
1148 }; 1071 };
1149 1072
1150 pwo = mdev->write_ordering; 1073 pwo = mdev->write_ordering;
1151 wo = min(pwo, wo); 1074 wo = min(pwo, wo);
1152 if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier)
1153 wo = WO_bdev_flush;
1154 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush) 1075 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1155 wo = WO_drain_io; 1076 wo = WO_drain_io;
1156 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain) 1077 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1157 wo = WO_none; 1078 wo = WO_none;
1158 mdev->write_ordering = wo; 1079 mdev->write_ordering = wo;
1159 if (pwo != mdev->write_ordering || wo == WO_bio_barrier) 1080 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
1160 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]); 1081 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1161} 1082}
1162 1083
@@ -1192,7 +1113,7 @@ next_bio:
1192 bio->bi_sector = sector; 1113 bio->bi_sector = sector;
1193 bio->bi_bdev = mdev->ldev->backing_bdev; 1114 bio->bi_bdev = mdev->ldev->backing_bdev;
1194 /* we special case some flags in the multi-bio case, see below 1115 /* we special case some flags in the multi-bio case, see below
1195 * (REQ_UNPLUG, REQ_HARDBARRIER) */ 1116 * (REQ_UNPLUG) */
1196 bio->bi_rw = rw; 1117 bio->bi_rw = rw;
1197 bio->bi_private = e; 1118 bio->bi_private = e;
1198 bio->bi_end_io = drbd_endio_sec; 1119 bio->bi_end_io = drbd_endio_sec;
@@ -1226,11 +1147,6 @@ next_bio:
1226 bio->bi_rw &= ~REQ_UNPLUG; 1147 bio->bi_rw &= ~REQ_UNPLUG;
1227 1148
1228 drbd_generic_make_request(mdev, fault_type, bio); 1149 drbd_generic_make_request(mdev, fault_type, bio);
1229
1230 /* strip off REQ_HARDBARRIER,
1231 * unless it is the first or last bio */
1232 if (bios && bios->bi_next)
1233 bios->bi_rw &= ~REQ_HARDBARRIER;
1234 } while (bios); 1150 } while (bios);
1235 maybe_kick_lo(mdev); 1151 maybe_kick_lo(mdev);
1236 return 0; 1152 return 0;
@@ -1244,45 +1160,9 @@ fail:
1244 return -ENOMEM; 1160 return -ENOMEM;
1245} 1161}
1246 1162
1247/**
1248 * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
1249 * @mdev: DRBD device.
1250 * @w: work object.
1251 * @cancel: The connection will be closed anyways (unused in this callback)
1252 */
1253int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
1254{
1255 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1256 /* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
1257 (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
1258 so that we can finish that epoch in drbd_may_finish_epoch().
1259 That is necessary if we already have a long chain of Epochs, before
1260 we realize that REQ_HARDBARRIER is actually not supported */
1261
1262 /* As long as the -ENOTSUPP on the barrier is reported immediately
1263 that will never trigger. If it is reported late, we will just
1264 print that warning and continue correctly for all future requests
1265 with WO_bdev_flush */
1266 if (previous_epoch(mdev, e->epoch))
1267 dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
1268
1269 /* we still have a local reference,
1270 * get_ldev was done in receive_Data. */
1271
1272 e->w.cb = e_end_block;
1273 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) {
1274 /* drbd_submit_ee fails for one reason only:
1275 * if was not able to allocate sufficient bios.
1276 * requeue, try again later. */
1277 e->w.cb = w_e_reissue;
1278 drbd_queue_work(&mdev->data.work, &e->w);
1279 }
1280 return 1;
1281}
1282
1283static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 1163static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1284{ 1164{
1285 int rv, issue_flush; 1165 int rv;
1286 struct p_barrier *p = &mdev->data.rbuf.barrier; 1166 struct p_barrier *p = &mdev->data.rbuf.barrier;
1287 struct drbd_epoch *epoch; 1167 struct drbd_epoch *epoch;
1288 1168
@@ -1300,44 +1180,40 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
1300 * Therefore we must send the barrier_ack after the barrier request was 1180 * Therefore we must send the barrier_ack after the barrier request was
1301 * completed. */ 1181 * completed. */
1302 switch (mdev->write_ordering) { 1182 switch (mdev->write_ordering) {
1303 case WO_bio_barrier:
1304 case WO_none: 1183 case WO_none:
1305 if (rv == FE_RECYCLED) 1184 if (rv == FE_RECYCLED)
1306 return TRUE; 1185 return TRUE;
1307 break; 1186
1187 /* receiver context, in the writeout path of the other node.
1188 * avoid potential distributed deadlock */
1189 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1190 if (epoch)
1191 break;
1192 else
1193 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1194 /* Fall through */
1308 1195
1309 case WO_bdev_flush: 1196 case WO_bdev_flush:
1310 case WO_drain_io: 1197 case WO_drain_io:
1311 if (rv == FE_STILL_LIVE) {
1312 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1313 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1314 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1315 }
1316 if (rv == FE_RECYCLED)
1317 return TRUE;
1318
1319 /* The asender will send all the ACKs and barrier ACKs out, since
1320 all EEs moved from the active_ee to the done_ee. We need to
1321 provide a new epoch object for the EEs that come in soon */
1322 break;
1323 }
1324
1325 /* receiver context, in the writeout path of the other node.
1326 * avoid potential distributed deadlock */
1327 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1328 if (!epoch) {
1329 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1330 issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1331 drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 1198 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1332 if (issue_flush) { 1199 drbd_flush(mdev);
1333 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); 1200
1334 if (rv == FE_RECYCLED) 1201 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1335 return TRUE; 1202 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1203 if (epoch)
1204 break;
1336 } 1205 }
1337 1206
1338 drbd_wait_ee_list_empty(mdev, &mdev->done_ee); 1207 epoch = mdev->current_epoch;
1208 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1209
1210 D_ASSERT(atomic_read(&epoch->active) == 0);
1211 D_ASSERT(epoch->flags == 0);
1339 1212
1340 return TRUE; 1213 return TRUE;
1214 default:
1215 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
1216 return FALSE;
1341 } 1217 }
1342 1218
1343 epoch->flags = 0; 1219 epoch->flags = 0;
@@ -1652,15 +1528,8 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1652{ 1528{
1653 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; 1529 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1654 sector_t sector = e->sector; 1530 sector_t sector = e->sector;
1655 struct drbd_epoch *epoch;
1656 int ok = 1, pcmd; 1531 int ok = 1, pcmd;
1657 1532
1658 if (e->flags & EE_IS_BARRIER) {
1659 epoch = previous_epoch(mdev, e->epoch);
1660 if (epoch)
1661 drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0));
1662 }
1663
1664 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) { 1533 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
1665 if (likely((e->flags & EE_WAS_ERROR) == 0)) { 1534 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1666 pcmd = (mdev->state.conn >= C_SYNC_SOURCE && 1535 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
@@ -1817,27 +1686,6 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1817 e->epoch = mdev->current_epoch; 1686 e->epoch = mdev->current_epoch;
1818 atomic_inc(&e->epoch->epoch_size); 1687 atomic_inc(&e->epoch->epoch_size);
1819 atomic_inc(&e->epoch->active); 1688 atomic_inc(&e->epoch->active);
1820
1821 if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) {
1822 struct drbd_epoch *epoch;
1823 /* Issue a barrier if we start a new epoch, and the previous epoch
1824 was not a epoch containing a single request which already was
1825 a Barrier. */
1826 epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
1827 if (epoch == e->epoch) {
1828 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1829 rw |= REQ_HARDBARRIER;
1830 e->flags |= EE_IS_BARRIER;
1831 } else {
1832 if (atomic_read(&epoch->epoch_size) > 1 ||
1833 !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
1834 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1835 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1836 rw |= REQ_HARDBARRIER;
1837 e->flags |= EE_IS_BARRIER;
1838 }
1839 }
1840 }
1841 spin_unlock(&mdev->epoch_lock); 1689 spin_unlock(&mdev->epoch_lock);
1842 1690
1843 dp_flags = be32_to_cpu(p->dp_flags); 1691 dp_flags = be32_to_cpu(p->dp_flags);
@@ -1995,10 +1843,11 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
1995 break; 1843 break;
1996 } 1844 }
1997 1845
1998 if (mdev->state.pdsk == D_DISKLESS) { 1846 if (mdev->state.pdsk < D_INCONSISTENT) {
1999 /* In case we have the only disk of the cluster, */ 1847 /* In case we have the only disk of the cluster, */
2000 drbd_set_out_of_sync(mdev, e->sector, e->size); 1848 drbd_set_out_of_sync(mdev, e->sector, e->size);
2001 e->flags |= EE_CALL_AL_COMPLETE_IO; 1849 e->flags |= EE_CALL_AL_COMPLETE_IO;
1850 e->flags &= ~EE_MAY_SET_IN_SYNC;
2002 drbd_al_begin_io(mdev, e->sector); 1851 drbd_al_begin_io(mdev, e->sector);
2003 } 1852 }
2004 1853
@@ -3362,7 +3211,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
3362 if (ns.conn == C_MASK) { 3211 if (ns.conn == C_MASK) {
3363 ns.conn = C_CONNECTED; 3212 ns.conn = C_CONNECTED;
3364 if (mdev->state.disk == D_NEGOTIATING) { 3213 if (mdev->state.disk == D_NEGOTIATING) {
3365 drbd_force_state(mdev, NS(disk, D_DISKLESS)); 3214 drbd_force_state(mdev, NS(disk, D_FAILED));
3366 } else if (peer_state.disk == D_NEGOTIATING) { 3215 } else if (peer_state.disk == D_NEGOTIATING) {
3367 dev_err(DEV, "Disk attach process on the peer node was aborted.\n"); 3216 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3368 peer_state.disk = D_DISKLESS; 3217 peer_state.disk = D_DISKLESS;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 9e91a2545fc8..11a75d32a2e2 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -258,7 +258,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
258 if (!hlist_unhashed(&req->colision)) 258 if (!hlist_unhashed(&req->colision))
259 hlist_del(&req->colision); 259 hlist_del(&req->colision);
260 else 260 else
261 D_ASSERT((s & RQ_NET_MASK) == 0); 261 D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
262 262
263 /* for writes we need to do some extra housekeeping */ 263 /* for writes we need to do some extra housekeeping */
264 if (rw == WRITE) 264 if (rw == WRITE)
@@ -813,7 +813,8 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
813 mdev->state.conn >= C_CONNECTED)); 813 mdev->state.conn >= C_CONNECTED));
814 814
815 if (!(local || remote) && !is_susp(mdev->state)) { 815 if (!(local || remote) && !is_susp(mdev->state)) {
816 dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); 816 if (__ratelimit(&drbd_ratelimit_state))
817 dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
817 goto fail_free_complete; 818 goto fail_free_complete;
818 } 819 }
819 820
@@ -942,12 +943,21 @@ allocate_barrier:
942 if (local) { 943 if (local) {
943 req->private_bio->bi_bdev = mdev->ldev->backing_bdev; 944 req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
944 945
945 if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR 946 /* State may have changed since we grabbed our reference on the
946 : rw == READ ? DRBD_FAULT_DT_RD 947 * mdev->ldev member. Double check, and short-circuit to endio.
947 : DRBD_FAULT_DT_RA)) 948 * In case the last activity log transaction failed to get on
949 * stable storage, and this is a WRITE, we may not even submit
950 * this bio. */
951 if (get_ldev(mdev)) {
952 if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
953 : rw == READ ? DRBD_FAULT_DT_RD
954 : DRBD_FAULT_DT_RA))
955 bio_endio(req->private_bio, -EIO);
956 else
957 generic_make_request(req->private_bio);
958 put_ldev(mdev);
959 } else
948 bio_endio(req->private_bio, -EIO); 960 bio_endio(req->private_bio, -EIO);
949 else
950 generic_make_request(req->private_bio);
951 } 961 }
952 962
953 /* we need to plug ALWAYS since we possibly need to kick lo_dev. 963 /* we need to plug ALWAYS since we possibly need to kick lo_dev.
@@ -1022,20 +1032,6 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
1022 return 0; 1032 return 0;
1023 } 1033 }
1024 1034
1025 /* Reject barrier requests if we know the underlying device does
1026 * not support them.
1027 * XXX: Need to get this info from peer as well some how so we
1028 * XXX: reject if EITHER side/data/metadata area does not support them.
1029 *
1030 * because of those XXX, this is not yet enabled,
1031 * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit.
1032 */
1033 if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) {
1034 /* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */
1035 bio_endio(bio, -EOPNOTSUPP);
1036 return 0;
1037 }
1038
1039 /* 1035 /*
1040 * what we "blindly" assume: 1036 * what we "blindly" assume:
1041 */ 1037 */
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 108d58015cd1..b0551ba7ad0c 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -102,12 +102,6 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
102 put_ldev(mdev); 102 put_ldev(mdev);
103} 103}
104 104
105static int is_failed_barrier(int ee_flags)
106{
107 return (ee_flags & (EE_IS_BARRIER|EE_WAS_ERROR|EE_RESUBMITTED))
108 == (EE_IS_BARRIER|EE_WAS_ERROR);
109}
110
111/* writes on behalf of the partner, or resync writes, 105/* writes on behalf of the partner, or resync writes,
112 * "submitted" by the receiver, final stage. */ 106 * "submitted" by the receiver, final stage. */
113static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local) 107static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
@@ -119,21 +113,6 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
119 int is_syncer_req; 113 int is_syncer_req;
120 int do_al_complete_io; 114 int do_al_complete_io;
121 115
122 /* if this is a failed barrier request, disable use of barriers,
123 * and schedule for resubmission */
124 if (is_failed_barrier(e->flags)) {
125 drbd_bump_write_ordering(mdev, WO_bdev_flush);
126 spin_lock_irqsave(&mdev->req_lock, flags);
127 list_del(&e->w.list);
128 e->flags = (e->flags & ~EE_WAS_ERROR) | EE_RESUBMITTED;
129 e->w.cb = w_e_reissue;
130 /* put_ldev actually happens below, once we come here again. */
131 __release(local);
132 spin_unlock_irqrestore(&mdev->req_lock, flags);
133 drbd_queue_work(&mdev->data.work, &e->w);
134 return;
135 }
136
137 D_ASSERT(e->block_id != ID_VACANT); 116 D_ASSERT(e->block_id != ID_VACANT);
138 117
139 /* after we moved e to done_ee, 118 /* after we moved e to done_ee,
@@ -925,7 +904,7 @@ out:
925 drbd_md_sync(mdev); 904 drbd_md_sync(mdev);
926 905
927 if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) { 906 if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) {
928 dev_warn(DEV, "Writing the whole bitmap, due to failed kmalloc\n"); 907 dev_info(DEV, "Writing the whole bitmap\n");
929 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished"); 908 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
930 } 909 }
931 910
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 1e5284ef65fa..7ea0bea2f7e3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -481,12 +481,6 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
481 if (bio_rw(bio) == WRITE) { 481 if (bio_rw(bio) == WRITE) {
482 struct file *file = lo->lo_backing_file; 482 struct file *file = lo->lo_backing_file;
483 483
484 /* REQ_HARDBARRIER is deprecated */
485 if (bio->bi_rw & REQ_HARDBARRIER) {
486 ret = -EOPNOTSUPP;
487 goto out;
488 }
489
490 if (bio->bi_rw & REQ_FLUSH) { 484 if (bio->bi_rw & REQ_FLUSH) {
491 ret = vfs_fsync(file, 0); 485 ret = vfs_fsync(file, 0);
492 if (unlikely(ret && ret != -EINVAL)) { 486 if (unlikely(ret && ret != -EINVAL)) {
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 06e2812ba124..255035cfc88a 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -289,8 +289,6 @@ static int blkif_queue_request(struct request *req)
289 289
290 ring_req->operation = rq_data_dir(req) ? 290 ring_req->operation = rq_data_dir(req) ?
291 BLKIF_OP_WRITE : BLKIF_OP_READ; 291 BLKIF_OP_WRITE : BLKIF_OP_READ;
292 if (req->cmd_flags & REQ_HARDBARRIER)
293 ring_req->operation = BLKIF_OP_WRITE_BARRIER;
294 292
295 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); 293 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
296 BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); 294 BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index d120a5c1c093..ab3894f742c3 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -68,6 +68,9 @@ static struct usb_device_id btusb_table[] = {
68 /* Apple MacBookPro6,2 */ 68 /* Apple MacBookPro6,2 */
69 { USB_DEVICE(0x05ac, 0x8218) }, 69 { USB_DEVICE(0x05ac, 0x8218) },
70 70
71 /* Apple MacBookAir3,1, MacBookAir3,2 */
72 { USB_DEVICE(0x05ac, 0x821b) },
73
71 /* AVM BlueFRITZ! USB v2.0 */ 74 /* AVM BlueFRITZ! USB v2.0 */
72 { USB_DEVICE(0x057c, 0x3800) }, 75 { USB_DEVICE(0x057c, 0x3800) },
73 76
@@ -1029,6 +1032,8 @@ static int btusb_probe(struct usb_interface *intf,
1029 1032
1030 usb_set_intfdata(intf, data); 1033 usb_set_intfdata(intf, data);
1031 1034
1035 usb_enable_autosuspend(interface_to_usbdev(intf));
1036
1032 return 0; 1037 return 0;
1033} 1038}
1034 1039
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 6b6760ea2435..9272c38dd3c6 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1210,14 +1210,14 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
1210 unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; 1210 unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1211 u32 pte_flags; 1211 u32 pte_flags;
1212 1212
1213 if (type_mask == AGP_USER_UNCACHED_MEMORY) 1213 if (type_mask == AGP_USER_MEMORY)
1214 pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; 1214 pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
1215 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { 1215 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
1216 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; 1216 pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
1217 if (gfdt) 1217 if (gfdt)
1218 pte_flags |= GEN6_PTE_GFDT; 1218 pte_flags |= GEN6_PTE_GFDT;
1219 } else { /* set 'normal'/'cached' to LLC by default */ 1219 } else { /* set 'normal'/'cached' to LLC by default */
1220 pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; 1220 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
1221 if (gfdt) 1221 if (gfdt)
1222 pte_flags |= GEN6_PTE_GFDT; 1222 pte_flags |= GEN6_PTE_GFDT;
1223 } 1223 }
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index b0a70461a12c..c0bd6f472c52 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -1299,7 +1299,6 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
1299{ 1299{
1300 struct async_struct * info = tty->driver_data; 1300 struct async_struct * info = tty->driver_data;
1301 struct async_icount cprev, cnow; /* kernel counter temps */ 1301 struct async_icount cprev, cnow; /* kernel counter temps */
1302 struct serial_icounter_struct icount;
1303 void __user *argp = (void __user *)arg; 1302 void __user *argp = (void __user *)arg;
1304 unsigned long flags; 1303 unsigned long flags;
1305 1304
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index dd3f9b1f11b4..294d03e8c61a 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -1828,7 +1828,6 @@ static int ntty_ioctl(struct tty_struct *tty, struct file *file,
1828 unsigned int cmd, unsigned long arg) 1828 unsigned int cmd, unsigned long arg)
1829{ 1829{
1830 struct port *port = tty->driver_data; 1830 struct port *port = tty->driver_data;
1831 void __user *argp = (void __user *)arg;
1832 int rval = -ENOIOCTLCMD; 1831 int rval = -ENOIOCTLCMD;
1833 1832
1834 DBG1("******** IOCTL, cmd: %d", cmd); 1833 DBG1("******** IOCTL, cmd: %d", cmd);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index bfc10f89d951..eaa41992fbe2 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -2796,6 +2796,7 @@ static const struct tty_operations mgslpc_ops = {
2796 .hangup = mgslpc_hangup, 2796 .hangup = mgslpc_hangup,
2797 .tiocmget = tiocmget, 2797 .tiocmget = tiocmget,
2798 .tiocmset = tiocmset, 2798 .tiocmset = tiocmset,
2799 .get_icount = mgslpc_get_icount,
2799 .proc_fops = &mgslpc_proc_fops, 2800 .proc_fops = &mgslpc_proc_fops,
2800}; 2801};
2801 2802
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index dcbeb98f195a..f7af91cb273d 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
276 struct drm_crtc *tmp; 276 struct drm_crtc *tmp;
277 int crtc_mask = 1; 277 int crtc_mask = 1;
278 278
279 WARN(!crtc, "checking null crtc?"); 279 WARN(!crtc, "checking null crtc?\n");
280 280
281 dev = crtc->dev; 281 dev = crtc->dev;
282 282
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index c1a26217a530..a245d17165ae 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -240,7 +240,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
240 .addr = DDC_ADDR, 240 .addr = DDC_ADDR,
241 .flags = I2C_M_RD, 241 .flags = I2C_M_RD,
242 .len = len, 242 .len = len,
243 .buf = buf + start, 243 .buf = buf,
244 } 244 }
245 }; 245 };
246 246
@@ -253,7 +253,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
253static u8 * 253static u8 *
254drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 254drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
255{ 255{
256 int i, j = 0; 256 int i, j = 0, valid_extensions = 0;
257 u8 *block, *new; 257 u8 *block, *new;
258 258
259 if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) 259 if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
@@ -280,14 +280,28 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
280 280
281 for (j = 1; j <= block[0x7e]; j++) { 281 for (j = 1; j <= block[0x7e]; j++) {
282 for (i = 0; i < 4; i++) { 282 for (i = 0; i < 4; i++) {
283 if (drm_do_probe_ddc_edid(adapter, block, j, 283 if (drm_do_probe_ddc_edid(adapter,
284 EDID_LENGTH)) 284 block + (valid_extensions + 1) * EDID_LENGTH,
285 j, EDID_LENGTH))
285 goto out; 286 goto out;
286 if (drm_edid_block_valid(block + j * EDID_LENGTH)) 287 if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
288 valid_extensions++;
287 break; 289 break;
290 }
288 } 291 }
289 if (i == 4) 292 if (i == 4)
290 goto carp; 293 dev_warn(connector->dev->dev,
294 "%s: Ignoring invalid EDID block %d.\n",
295 drm_get_connector_name(connector), j);
296 }
297
298 if (valid_extensions != block[0x7e]) {
299 block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
300 block[0x7e] = valid_extensions;
301 new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
302 if (!new)
303 goto out;
304 block = new;
291 } 305 }
292 306
293 return block; 307 return block;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3467dd420760..80745f85902c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -44,7 +44,7 @@ unsigned int i915_fbpercrtc = 0;
44module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); 44module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
45 45
46unsigned int i915_powersave = 1; 46unsigned int i915_powersave = 1;
47module_param_named(powersave, i915_powersave, int, 0400); 47module_param_named(powersave, i915_powersave, int, 0600);
48 48
49unsigned int i915_lvds_downclock = 0; 49unsigned int i915_lvds_downclock = 0;
50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2c2c19b6285e..90414ae86afc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1321,6 +1321,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
1321 1321
1322#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 1322#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1323#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1323#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1324#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1324 1325
1325#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1326#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1326 1327
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8eb8453208b5..ef188e391406 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2172,7 +2172,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2172static int i915_ring_idle(struct drm_device *dev, 2172static int i915_ring_idle(struct drm_device *dev,
2173 struct intel_ring_buffer *ring) 2173 struct intel_ring_buffer *ring)
2174{ 2174{
2175 if (list_empty(&ring->gpu_write_list)) 2175 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2176 return 0; 2176 return 0;
2177 2177
2178 i915_gem_flush_ring(dev, NULL, ring, 2178 i915_gem_flush_ring(dev, NULL, ring,
@@ -2190,9 +2190,7 @@ i915_gpu_idle(struct drm_device *dev)
2190 int ret; 2190 int ret;
2191 2191
2192 lists_empty = (list_empty(&dev_priv->mm.flushing_list) && 2192 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2193 list_empty(&dev_priv->render_ring.active_list) && 2193 list_empty(&dev_priv->mm.active_list));
2194 list_empty(&dev_priv->bsd_ring.active_list) &&
2195 list_empty(&dev_priv->blt_ring.active_list));
2196 if (lists_empty) 2194 if (lists_empty)
2197 return 0; 2195 return 0;
2198 2196
@@ -3108,7 +3106,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
3108 * write domain 3106 * write domain
3109 */ 3107 */
3110 if (obj->write_domain && 3108 if (obj->write_domain &&
3111 obj->write_domain != obj->pending_read_domains) { 3109 (obj->write_domain != obj->pending_read_domains ||
3110 obj_priv->ring != ring)) {
3112 flush_domains |= obj->write_domain; 3111 flush_domains |= obj->write_domain;
3113 invalidate_domains |= 3112 invalidate_domains |=
3114 obj->pending_read_domains & ~obj->write_domain; 3113 obj->pending_read_domains & ~obj->write_domain;
@@ -3497,6 +3496,52 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
3497 return 0; 3496 return 0;
3498} 3497}
3499 3498
3499static int
3500i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
3501 struct drm_file *file,
3502 struct intel_ring_buffer *ring,
3503 struct drm_gem_object **objects,
3504 int count)
3505{
3506 struct drm_i915_private *dev_priv = dev->dev_private;
3507 int ret, i;
3508
3509 /* Zero the global flush/invalidate flags. These
3510 * will be modified as new domains are computed
3511 * for each object
3512 */
3513 dev->invalidate_domains = 0;
3514 dev->flush_domains = 0;
3515 dev_priv->mm.flush_rings = 0;
3516 for (i = 0; i < count; i++)
3517 i915_gem_object_set_to_gpu_domain(objects[i], ring);
3518
3519 if (dev->invalidate_domains | dev->flush_domains) {
3520#if WATCH_EXEC
3521 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3522 __func__,
3523 dev->invalidate_domains,
3524 dev->flush_domains);
3525#endif
3526 i915_gem_flush(dev, file,
3527 dev->invalidate_domains,
3528 dev->flush_domains,
3529 dev_priv->mm.flush_rings);
3530 }
3531
3532 for (i = 0; i < count; i++) {
3533 struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
3534 /* XXX replace with semaphores */
3535 if (obj->ring && ring != obj->ring) {
3536 ret = i915_gem_object_wait_rendering(&obj->base, true);
3537 if (ret)
3538 return ret;
3539 }
3540 }
3541
3542 return 0;
3543}
3544
3500/* Throttle our rendering by waiting until the ring has completed our requests 3545/* Throttle our rendering by waiting until the ring has completed our requests
3501 * emitted over 20 msec ago. 3546 * emitted over 20 msec ago.
3502 * 3547 *
@@ -3757,33 +3802,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3757 goto err; 3802 goto err;
3758 } 3803 }
3759 3804
3760 /* Zero the global flush/invalidate flags. These 3805 ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
3761 * will be modified as new domains are computed 3806 object_list, args->buffer_count);
3762 * for each object 3807 if (ret)
3763 */ 3808 goto err;
3764 dev->invalidate_domains = 0;
3765 dev->flush_domains = 0;
3766 dev_priv->mm.flush_rings = 0;
3767
3768 for (i = 0; i < args->buffer_count; i++) {
3769 struct drm_gem_object *obj = object_list[i];
3770
3771 /* Compute new gpu domains and update invalidate/flush */
3772 i915_gem_object_set_to_gpu_domain(obj, ring);
3773 }
3774
3775 if (dev->invalidate_domains | dev->flush_domains) {
3776#if WATCH_EXEC
3777 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3778 __func__,
3779 dev->invalidate_domains,
3780 dev->flush_domains);
3781#endif
3782 i915_gem_flush(dev, file,
3783 dev->invalidate_domains,
3784 dev->flush_domains,
3785 dev_priv->mm.flush_rings);
3786 }
3787 3809
3788 for (i = 0; i < args->buffer_count; i++) { 3810 for (i = 0; i < args->buffer_count; i++) {
3789 struct drm_gem_object *obj = object_list[i]; 3811 struct drm_gem_object *obj = object_list[i];
@@ -4043,8 +4065,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4043 alignment = i915_gem_get_gtt_alignment(obj); 4065 alignment = i915_gem_get_gtt_alignment(obj);
4044 if (obj_priv->gtt_offset & (alignment - 1)) { 4066 if (obj_priv->gtt_offset & (alignment - 1)) {
4045 WARN(obj_priv->pin_count, 4067 WARN(obj_priv->pin_count,
4046 "bo is already pinned with incorrect alignment:" 4068 "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
4047 " offset=%x, req.alignment=%x\n",
4048 obj_priv->gtt_offset, alignment); 4069 obj_priv->gtt_offset, alignment);
4049 ret = i915_gem_object_unbind(obj); 4070 ret = i915_gem_object_unbind(obj);
4050 if (ret) 4071 if (ret)
@@ -4856,17 +4877,24 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4856 struct drm_file *file_priv) 4877 struct drm_file *file_priv)
4857{ 4878{
4858 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 4879 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4859 void *obj_addr; 4880 void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
4860 int ret; 4881 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
4861 char __user *user_data;
4862 4882
4863 user_data = (char __user *) (uintptr_t) args->data_ptr; 4883 DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
4864 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4865 4884
4866 DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size); 4885 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4867 ret = copy_from_user(obj_addr, user_data, args->size); 4886 unsigned long unwritten;
4868 if (ret) 4887
4869 return -EFAULT; 4888 /* The physical object once assigned is fixed for the lifetime
4889 * of the obj, so we can safely drop the lock and continue
4890 * to access vaddr.
4891 */
4892 mutex_unlock(&dev->struct_mutex);
4893 unwritten = copy_from_user(vaddr, user_data, args->size);
4894 mutex_lock(&dev->struct_mutex);
4895 if (unwritten)
4896 return -EFAULT;
4897 }
4870 4898
4871 drm_agp_chipset_flush(dev); 4899 drm_agp_chipset_flush(dev);
4872 return 0; 4900 return 0;
@@ -4900,9 +4928,7 @@ i915_gpu_is_active(struct drm_device *dev)
4900 int lists_empty; 4928 int lists_empty;
4901 4929
4902 lists_empty = list_empty(&dev_priv->mm.flushing_list) && 4930 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4903 list_empty(&dev_priv->render_ring.active_list) && 4931 list_empty(&dev_priv->mm.active_list);
4904 list_empty(&dev_priv->bsd_ring.active_list) &&
4905 list_empty(&dev_priv->blt_ring.active_list);
4906 4932
4907 return !lists_empty; 4933 return !lists_empty;
4908} 4934}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 43a4013f53fa..d8ae7d1d0cc6 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -165,9 +165,7 @@ i915_gem_evict_everything(struct drm_device *dev)
165 165
166 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 166 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
167 list_empty(&dev_priv->mm.flushing_list) && 167 list_empty(&dev_priv->mm.flushing_list) &&
168 list_empty(&dev_priv->render_ring.active_list) && 168 list_empty(&dev_priv->mm.active_list));
169 list_empty(&dev_priv->bsd_ring.active_list) &&
170 list_empty(&dev_priv->blt_ring.active_list));
171 if (lists_empty) 169 if (lists_empty)
172 return -ENOSPC; 170 return -ENOSPC;
173 171
@@ -184,9 +182,7 @@ i915_gem_evict_everything(struct drm_device *dev)
184 182
185 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 183 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
186 list_empty(&dev_priv->mm.flushing_list) && 184 list_empty(&dev_priv->mm.flushing_list) &&
187 list_empty(&dev_priv->render_ring.active_list) && 185 list_empty(&dev_priv->mm.active_list));
188 list_empty(&dev_priv->bsd_ring.active_list) &&
189 list_empty(&dev_priv->blt_ring.active_list));
190 BUG_ON(!lists_empty); 186 BUG_ON(!lists_empty);
191 187
192 return 0; 188 return 0;
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 989c19d2d959..454c064f8ef7 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev)
862 /* Clock gating state */ 862 /* Clock gating state */
863 intel_init_clock_gating(dev); 863 intel_init_clock_gating(dev);
864 864
865 if (HAS_PCH_SPLIT(dev)) 865 if (HAS_PCH_SPLIT(dev)) {
866 ironlake_enable_drps(dev); 866 ironlake_enable_drps(dev);
867 intel_init_emon(dev);
868 }
867 869
868 /* Cache mode state */ 870 /* Cache mode state */
869 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 871 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 990f065374b2..48d8fd686ea9 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1681,6 +1681,37 @@ static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
1681 udelay(500); 1681 udelay(500);
1682} 1682}
1683 1683
1684static void intel_fdi_normal_train(struct drm_crtc *crtc)
1685{
1686 struct drm_device *dev = crtc->dev;
1687 struct drm_i915_private *dev_priv = dev->dev_private;
1688 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1689 int pipe = intel_crtc->pipe;
1690 u32 reg, temp;
1691
1692 /* enable normal train */
1693 reg = FDI_TX_CTL(pipe);
1694 temp = I915_READ(reg);
1695 temp &= ~FDI_LINK_TRAIN_NONE;
1696 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
1697 I915_WRITE(reg, temp);
1698
1699 reg = FDI_RX_CTL(pipe);
1700 temp = I915_READ(reg);
1701 if (HAS_PCH_CPT(dev)) {
1702 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1703 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
1704 } else {
1705 temp &= ~FDI_LINK_TRAIN_NONE;
1706 temp |= FDI_LINK_TRAIN_NONE;
1707 }
1708 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
1709
1710 /* wait one idle pattern time */
1711 POSTING_READ(reg);
1712 udelay(1000);
1713}
1714
1684/* The FDI link training functions for ILK/Ibexpeak. */ 1715/* The FDI link training functions for ILK/Ibexpeak. */
1685static void ironlake_fdi_link_train(struct drm_crtc *crtc) 1716static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1686{ 1717{
@@ -1767,27 +1798,6 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1767 1798
1768 DRM_DEBUG_KMS("FDI train done\n"); 1799 DRM_DEBUG_KMS("FDI train done\n");
1769 1800
1770 /* enable normal train */
1771 reg = FDI_TX_CTL(pipe);
1772 temp = I915_READ(reg);
1773 temp &= ~FDI_LINK_TRAIN_NONE;
1774 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
1775 I915_WRITE(reg, temp);
1776
1777 reg = FDI_RX_CTL(pipe);
1778 temp = I915_READ(reg);
1779 if (HAS_PCH_CPT(dev)) {
1780 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1781 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
1782 } else {
1783 temp &= ~FDI_LINK_TRAIN_NONE;
1784 temp |= FDI_LINK_TRAIN_NONE;
1785 }
1786 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
1787
1788 /* wait one idle pattern time */
1789 POSTING_READ(reg);
1790 udelay(1000);
1791} 1801}
1792 1802
1793static const int const snb_b_fdi_train_param [] = { 1803static const int const snb_b_fdi_train_param [] = {
@@ -2090,6 +2100,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2090 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); 2100 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2091 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); 2101 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
2092 2102
2103 intel_fdi_normal_train(crtc);
2104
2093 /* For PCH DP, enable TRANS_DP_CTL */ 2105 /* For PCH DP, enable TRANS_DP_CTL */
2094 if (HAS_PCH_CPT(dev) && 2106 if (HAS_PCH_CPT(dev) &&
2095 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 2107 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
@@ -2200,9 +2212,10 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
2200 udelay(100); 2212 udelay(100);
2201 2213
2202 /* Ironlake workaround, disable clock pointer after downing FDI */ 2214 /* Ironlake workaround, disable clock pointer after downing FDI */
2203 I915_WRITE(FDI_RX_CHICKEN(pipe), 2215 if (HAS_PCH_IBX(dev))
2204 I915_READ(FDI_RX_CHICKEN(pipe) & 2216 I915_WRITE(FDI_RX_CHICKEN(pipe),
2205 ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); 2217 I915_READ(FDI_RX_CHICKEN(pipe) &
2218 ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
2206 2219
2207 /* still set train pattern 1 */ 2220 /* still set train pattern 1 */
2208 reg = FDI_TX_CTL(pipe); 2221 reg = FDI_TX_CTL(pipe);
@@ -5581,20 +5594,19 @@ void ironlake_enable_drps(struct drm_device *dev)
5581 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 5594 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
5582 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 5595 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
5583 MEMMODE_FSTART_SHIFT; 5596 MEMMODE_FSTART_SHIFT;
5584 fstart = fmax;
5585 5597
5586 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> 5598 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
5587 PXVFREQ_PX_SHIFT; 5599 PXVFREQ_PX_SHIFT;
5588 5600
5589 dev_priv->fmax = fstart; /* IPS callback will increase this */ 5601 dev_priv->fmax = fmax; /* IPS callback will increase this */
5590 dev_priv->fstart = fstart; 5602 dev_priv->fstart = fstart;
5591 5603
5592 dev_priv->max_delay = fmax; 5604 dev_priv->max_delay = fstart;
5593 dev_priv->min_delay = fmin; 5605 dev_priv->min_delay = fmin;
5594 dev_priv->cur_delay = fstart; 5606 dev_priv->cur_delay = fstart;
5595 5607
5596 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin, 5608 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
5597 fstart); 5609 fmax, fmin, fstart);
5598 5610
5599 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 5611 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
5600 5612
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 891f4f1d63b1..c8e005553310 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1517,7 +1517,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
1517 status = connector_status_connected; 1517 status = connector_status_connected;
1518 } 1518 }
1519 1519
1520 return bit; 1520 return status;
1521} 1521}
1522 1522
1523/** 1523/**
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9af9f86a8765..21551fe74541 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -296,6 +296,7 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
296extern void intel_init_clock_gating(struct drm_device *dev); 296extern void intel_init_clock_gating(struct drm_device *dev);
297extern void ironlake_enable_drps(struct drm_device *dev); 297extern void ironlake_enable_drps(struct drm_device *dev);
298extern void ironlake_disable_drps(struct drm_device *dev); 298extern void ironlake_disable_drps(struct drm_device *dev);
299extern void intel_init_emon(struct drm_device *dev);
299 300
300extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, 301extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
301 struct drm_gem_object *obj, 302 struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f1a649990ea9..4324a326f98e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -481,11 +481,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
481 struct drm_device *dev = connector->dev; 481 struct drm_device *dev = connector->dev;
482 struct drm_display_mode *mode; 482 struct drm_display_mode *mode;
483 483
484 if (intel_lvds->edid) { 484 if (intel_lvds->edid)
485 drm_mode_connector_update_edid_property(connector,
486 intel_lvds->edid);
487 return drm_add_edid_modes(connector, intel_lvds->edid); 485 return drm_add_edid_modes(connector, intel_lvds->edid);
488 }
489 486
490 mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); 487 mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
491 if (mode == 0) 488 if (mode == 0)
@@ -939,7 +936,16 @@ void intel_lvds_init(struct drm_device *dev)
939 */ 936 */
940 intel_lvds->edid = drm_get_edid(connector, 937 intel_lvds->edid = drm_get_edid(connector,
941 &dev_priv->gmbus[pin].adapter); 938 &dev_priv->gmbus[pin].adapter);
942 939 if (intel_lvds->edid) {
940 if (drm_add_edid_modes(connector,
941 intel_lvds->edid)) {
942 drm_mode_connector_update_edid_property(connector,
943 intel_lvds->edid);
944 } else {
945 kfree(intel_lvds->edid);
946 intel_lvds->edid = NULL;
947 }
948 }
943 if (!intel_lvds->edid) { 949 if (!intel_lvds->edid) {
944 /* Didn't get an EDID, so 950 /* Didn't get an EDID, so
945 * Set wide sync ranges so we get all modes 951 * Set wide sync ranges so we get all modes
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 917c7dc3cd6b..9b0d9a867aea 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -512,6 +512,6 @@ int intel_opregion_setup(struct drm_device *dev)
512 return 0; 512 return 0;
513 513
514err_out: 514err_out:
515 iounmap(opregion->header); 515 iounmap(base);
516 return err; 516 return err;
517} 517}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index afb96d25219a..02ff0a481f47 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -946,7 +946,9 @@ static int check_overlay_src(struct drm_device *dev,
946{ 946{
947 int uv_hscale = uv_hsubsampling(rec->flags); 947 int uv_hscale = uv_hsubsampling(rec->flags);
948 int uv_vscale = uv_vsubsampling(rec->flags); 948 int uv_vscale = uv_vsubsampling(rec->flags);
949 u32 stride_mask, depth, tmp; 949 u32 stride_mask;
950 int depth;
951 u32 tmp;
950 952
951 /* check src dimensions */ 953 /* check src dimensions */
952 if (IS_845G(dev) || IS_I830(dev)) { 954 if (IS_845G(dev) || IS_I830(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 09f2dc353ae2..b83306f9244b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -177,7 +177,7 @@ static int init_ring_common(struct drm_device *dev,
177 177
178 I915_WRITE_CTL(ring, 178 I915_WRITE_CTL(ring,
179 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) 179 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
180 | RING_NO_REPORT | RING_VALID); 180 | RING_REPORT_64K | RING_VALID);
181 181
182 head = I915_READ_HEAD(ring) & HEAD_ADDR; 182 head = I915_READ_HEAD(ring) & HEAD_ADDR;
183 /* If the head is still not zero, the ring is dead */ 183 /* If the head is still not zero, the ring is dead */
@@ -654,6 +654,10 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
654 i915_gem_object_unpin(ring->gem_object); 654 i915_gem_object_unpin(ring->gem_object);
655 drm_gem_object_unreference(ring->gem_object); 655 drm_gem_object_unreference(ring->gem_object);
656 ring->gem_object = NULL; 656 ring->gem_object = NULL;
657
658 if (ring->cleanup)
659 ring->cleanup(ring);
660
657 cleanup_status_page(dev, ring); 661 cleanup_status_page(dev, ring);
658} 662}
659 663
@@ -688,6 +692,17 @@ int intel_wait_ring_buffer(struct drm_device *dev,
688{ 692{
689 unsigned long end; 693 unsigned long end;
690 drm_i915_private_t *dev_priv = dev->dev_private; 694 drm_i915_private_t *dev_priv = dev->dev_private;
695 u32 head;
696
697 head = intel_read_status_page(ring, 4);
698 if (head) {
699 ring->head = head & HEAD_ADDR;
700 ring->space = ring->head - (ring->tail + 8);
701 if (ring->space < 0)
702 ring->space += ring->size;
703 if (ring->space >= n)
704 return 0;
705 }
691 706
692 trace_i915_ring_wait_begin (dev); 707 trace_i915_ring_wait_begin (dev);
693 end = jiffies + 3 * HZ; 708 end = jiffies + 3 * HZ;
@@ -854,19 +869,125 @@ blt_ring_put_user_irq(struct drm_device *dev,
854 /* do nothing */ 869 /* do nothing */
855} 870}
856 871
872
873/* Workaround for some stepping of SNB,
874 * each time when BLT engine ring tail moved,
875 * the first command in the ring to be parsed
876 * should be MI_BATCH_BUFFER_START
877 */
878#define NEED_BLT_WORKAROUND(dev) \
879 (IS_GEN6(dev) && (dev->pdev->revision < 8))
880
881static inline struct drm_i915_gem_object *
882to_blt_workaround(struct intel_ring_buffer *ring)
883{
884 return ring->private;
885}
886
887static int blt_ring_init(struct drm_device *dev,
888 struct intel_ring_buffer *ring)
889{
890 if (NEED_BLT_WORKAROUND(dev)) {
891 struct drm_i915_gem_object *obj;
892 u32 __iomem *ptr;
893 int ret;
894
895 obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
896 if (obj == NULL)
897 return -ENOMEM;
898
899 ret = i915_gem_object_pin(&obj->base, 4096);
900 if (ret) {
901 drm_gem_object_unreference(&obj->base);
902 return ret;
903 }
904
905 ptr = kmap(obj->pages[0]);
906 iowrite32(MI_BATCH_BUFFER_END, ptr);
907 iowrite32(MI_NOOP, ptr+1);
908 kunmap(obj->pages[0]);
909
910 ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
911 if (ret) {
912 i915_gem_object_unpin(&obj->base);
913 drm_gem_object_unreference(&obj->base);
914 return ret;
915 }
916
917 ring->private = obj;
918 }
919
920 return init_ring_common(dev, ring);
921}
922
923static void blt_ring_begin(struct drm_device *dev,
924 struct intel_ring_buffer *ring,
925 int num_dwords)
926{
927 if (ring->private) {
928 intel_ring_begin(dev, ring, num_dwords+2);
929 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
930 intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
931 } else
932 intel_ring_begin(dev, ring, 4);
933}
934
935static void blt_ring_flush(struct drm_device *dev,
936 struct intel_ring_buffer *ring,
937 u32 invalidate_domains,
938 u32 flush_domains)
939{
940 blt_ring_begin(dev, ring, 4);
941 intel_ring_emit(dev, ring, MI_FLUSH_DW);
942 intel_ring_emit(dev, ring, 0);
943 intel_ring_emit(dev, ring, 0);
944 intel_ring_emit(dev, ring, 0);
945 intel_ring_advance(dev, ring);
946}
947
948static u32
949blt_ring_add_request(struct drm_device *dev,
950 struct intel_ring_buffer *ring,
951 u32 flush_domains)
952{
953 u32 seqno = i915_gem_get_seqno(dev);
954
955 blt_ring_begin(dev, ring, 4);
956 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
957 intel_ring_emit(dev, ring,
958 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
959 intel_ring_emit(dev, ring, seqno);
960 intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
961 intel_ring_advance(dev, ring);
962
963 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
964 return seqno;
965}
966
967static void blt_ring_cleanup(struct intel_ring_buffer *ring)
968{
969 if (!ring->private)
970 return;
971
972 i915_gem_object_unpin(ring->private);
973 drm_gem_object_unreference(ring->private);
974 ring->private = NULL;
975}
976
857static const struct intel_ring_buffer gen6_blt_ring = { 977static const struct intel_ring_buffer gen6_blt_ring = {
858 .name = "blt ring", 978 .name = "blt ring",
859 .id = RING_BLT, 979 .id = RING_BLT,
860 .mmio_base = BLT_RING_BASE, 980 .mmio_base = BLT_RING_BASE,
861 .size = 32 * PAGE_SIZE, 981 .size = 32 * PAGE_SIZE,
862 .init = init_ring_common, 982 .init = blt_ring_init,
863 .write_tail = ring_write_tail, 983 .write_tail = ring_write_tail,
864 .flush = gen6_ring_flush, 984 .flush = blt_ring_flush,
865 .add_request = ring_add_request, 985 .add_request = blt_ring_add_request,
866 .get_seqno = ring_status_page_get_seqno, 986 .get_seqno = ring_status_page_get_seqno,
867 .user_irq_get = blt_ring_get_user_irq, 987 .user_irq_get = blt_ring_get_user_irq,
868 .user_irq_put = blt_ring_put_user_irq, 988 .user_irq_put = blt_ring_put_user_irq,
869 .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, 989 .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
990 .cleanup = blt_ring_cleanup,
870}; 991};
871 992
872int intel_init_render_ring_buffer(struct drm_device *dev) 993int intel_init_render_ring_buffer(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a05aff0e5764..3126c2681983 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -63,6 +63,7 @@ struct intel_ring_buffer {
63 struct drm_i915_gem_execbuffer2 *exec, 63 struct drm_i915_gem_execbuffer2 *exec,
64 struct drm_clip_rect *cliprects, 64 struct drm_clip_rect *cliprects,
65 uint64_t exec_offset); 65 uint64_t exec_offset);
66 void (*cleanup)(struct intel_ring_buffer *ring);
66 67
67 /** 68 /**
68 * List of objects currently involved in rendering from the 69 * List of objects currently involved in rendering from the
@@ -98,6 +99,8 @@ struct intel_ring_buffer {
98 99
99 wait_queue_head_t irq_queue; 100 wait_queue_head_t irq_queue;
100 drm_local_map_t map; 101 drm_local_map_t map;
102
103 void *private;
101}; 104};
102 105
103static inline u32 106static inline u32
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f12a5b3ec050..488c36c8f5e6 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2033,7 +2033,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
2033 u32 grbm_int_cntl = 0; 2033 u32 grbm_int_cntl = 0;
2034 2034
2035 if (!rdev->irq.installed) { 2035 if (!rdev->irq.installed) {
2036 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 2036 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2037 return -EINVAL; 2037 return -EINVAL;
2038 } 2038 }
2039 /* don't enable anything if the ih is disabled */ 2039 /* don't enable anything if the ih is disabled */
@@ -2295,6 +2295,7 @@ restart_ih:
2295 case 0: /* D1 vblank */ 2295 case 0: /* D1 vblank */
2296 if (disp_int & LB_D1_VBLANK_INTERRUPT) { 2296 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
2297 drm_handle_vblank(rdev->ddev, 0); 2297 drm_handle_vblank(rdev->ddev, 0);
2298 rdev->pm.vblank_sync = true;
2298 wake_up(&rdev->irq.vblank_queue); 2299 wake_up(&rdev->irq.vblank_queue);
2299 disp_int &= ~LB_D1_VBLANK_INTERRUPT; 2300 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2300 DRM_DEBUG("IH: D1 vblank\n"); 2301 DRM_DEBUG("IH: D1 vblank\n");
@@ -2316,6 +2317,7 @@ restart_ih:
2316 case 0: /* D2 vblank */ 2317 case 0: /* D2 vblank */
2317 if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { 2318 if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
2318 drm_handle_vblank(rdev->ddev, 1); 2319 drm_handle_vblank(rdev->ddev, 1);
2320 rdev->pm.vblank_sync = true;
2319 wake_up(&rdev->irq.vblank_queue); 2321 wake_up(&rdev->irq.vblank_queue);
2320 disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; 2322 disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
2321 DRM_DEBUG("IH: D2 vblank\n"); 2323 DRM_DEBUG("IH: D2 vblank\n");
@@ -2337,6 +2339,7 @@ restart_ih:
2337 case 0: /* D3 vblank */ 2339 case 0: /* D3 vblank */
2338 if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { 2340 if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
2339 drm_handle_vblank(rdev->ddev, 2); 2341 drm_handle_vblank(rdev->ddev, 2);
2342 rdev->pm.vblank_sync = true;
2340 wake_up(&rdev->irq.vblank_queue); 2343 wake_up(&rdev->irq.vblank_queue);
2341 disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; 2344 disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
2342 DRM_DEBUG("IH: D3 vblank\n"); 2345 DRM_DEBUG("IH: D3 vblank\n");
@@ -2358,6 +2361,7 @@ restart_ih:
2358 case 0: /* D4 vblank */ 2361 case 0: /* D4 vblank */
2359 if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { 2362 if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
2360 drm_handle_vblank(rdev->ddev, 3); 2363 drm_handle_vblank(rdev->ddev, 3);
2364 rdev->pm.vblank_sync = true;
2361 wake_up(&rdev->irq.vblank_queue); 2365 wake_up(&rdev->irq.vblank_queue);
2362 disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; 2366 disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
2363 DRM_DEBUG("IH: D4 vblank\n"); 2367 DRM_DEBUG("IH: D4 vblank\n");
@@ -2379,6 +2383,7 @@ restart_ih:
2379 case 0: /* D5 vblank */ 2383 case 0: /* D5 vblank */
2380 if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { 2384 if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
2381 drm_handle_vblank(rdev->ddev, 4); 2385 drm_handle_vblank(rdev->ddev, 4);
2386 rdev->pm.vblank_sync = true;
2382 wake_up(&rdev->irq.vblank_queue); 2387 wake_up(&rdev->irq.vblank_queue);
2383 disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; 2388 disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
2384 DRM_DEBUG("IH: D5 vblank\n"); 2389 DRM_DEBUG("IH: D5 vblank\n");
@@ -2400,6 +2405,7 @@ restart_ih:
2400 case 0: /* D6 vblank */ 2405 case 0: /* D6 vblank */
2401 if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { 2406 if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
2402 drm_handle_vblank(rdev->ddev, 5); 2407 drm_handle_vblank(rdev->ddev, 5);
2408 rdev->pm.vblank_sync = true;
2403 wake_up(&rdev->irq.vblank_queue); 2409 wake_up(&rdev->irq.vblank_queue);
2404 disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; 2410 disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
2405 DRM_DEBUG("IH: D6 vblank\n"); 2411 DRM_DEBUG("IH: D6 vblank\n");
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 0e8f28a68927..8e10aa9f74b0 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -442,7 +442,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
442 int r; 442 int r;
443 443
444 if (rdev->gart.table.ram.ptr) { 444 if (rdev->gart.table.ram.ptr) {
445 WARN(1, "R100 PCI GART already initialized.\n"); 445 WARN(1, "R100 PCI GART already initialized\n");
446 return 0; 446 return 0;
447 } 447 }
448 /* Initialize common gart structure */ 448 /* Initialize common gart structure */
@@ -516,7 +516,7 @@ int r100_irq_set(struct radeon_device *rdev)
516 uint32_t tmp = 0; 516 uint32_t tmp = 0;
517 517
518 if (!rdev->irq.installed) { 518 if (!rdev->irq.installed) {
519 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 519 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
520 WREG32(R_000040_GEN_INT_CNTL, 0); 520 WREG32(R_000040_GEN_INT_CNTL, 0);
521 return -EINVAL; 521 return -EINVAL;
522 } 522 }
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 34527e600fe9..cde1d3480d93 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -91,7 +91,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
91 int r; 91 int r;
92 92
93 if (rdev->gart.table.vram.robj) { 93 if (rdev->gart.table.vram.robj) {
94 WARN(1, "RV370 PCIE GART already initialized.\n"); 94 WARN(1, "RV370 PCIE GART already initialized\n");
95 return 0; 95 return 0;
96 } 96 }
97 /* Initialize common gart structure */ 97 /* Initialize common gart structure */
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 33952a12f0a3..0f806cc7dc75 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -97,14 +97,8 @@ u32 rv6xx_get_temp(struct radeon_device *rdev)
97{ 97{
98 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> 98 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
99 ASIC_T_SHIFT; 99 ASIC_T_SHIFT;
100 u32 actual_temp = 0;
101 100
102 if ((temp >> 7) & 1) 101 return temp * 1000;
103 actual_temp = 0;
104 else
105 actual_temp = (temp >> 1) & 0xff;
106
107 return actual_temp * 1000;
108} 102}
109 103
110void r600_pm_get_dynpm_state(struct radeon_device *rdev) 104void r600_pm_get_dynpm_state(struct radeon_device *rdev)
@@ -919,7 +913,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev)
919 int r; 913 int r;
920 914
921 if (rdev->gart.table.vram.robj) { 915 if (rdev->gart.table.vram.robj) {
922 WARN(1, "R600 PCIE GART already initialized.\n"); 916 WARN(1, "R600 PCIE GART already initialized\n");
923 return 0; 917 return 0;
924 } 918 }
925 /* Initialize common gart structure */ 919 /* Initialize common gart structure */
@@ -2995,7 +2989,7 @@ int r600_irq_set(struct radeon_device *rdev)
2995 u32 hdmi1, hdmi2; 2989 u32 hdmi1, hdmi2;
2996 2990
2997 if (!rdev->irq.installed) { 2991 if (!rdev->irq.installed) {
2998 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 2992 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2999 return -EINVAL; 2993 return -EINVAL;
3000 } 2994 }
3001 /* don't enable anything if the ih is disabled */ 2995 /* don't enable anything if the ih is disabled */
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 04cac7ec9039..87ead090c7d5 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -526,8 +526,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
526 if (crev < 2) 526 if (crev < 2)
527 return false; 527 return false;
528 528
529 router.valid = false;
530
531 obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); 529 obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
532 path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) 530 path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
533 (ctx->bios + data_offset + 531 (ctx->bios + data_offset +
@@ -624,6 +622,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
624 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 622 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
625 continue; 623 continue;
626 624
625 router.ddc_valid = false;
626 router.cd_valid = false;
627 for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { 627 for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
628 uint8_t grph_obj_id, grph_obj_num, grph_obj_type; 628 uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
629 629
@@ -647,9 +647,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
647 usDeviceTag)); 647 usDeviceTag));
648 648
649 } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { 649 } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
650 router.valid = false;
651 for (k = 0; k < router_obj->ucNumberOfObjects; k++) { 650 for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
652 u16 router_obj_id = le16_to_cpu(router_obj->asObjects[j].usObjectID); 651 u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
653 if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) { 652 if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
654 ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) 653 ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
655 (ctx->bios + data_offset + 654 (ctx->bios + data_offset +
@@ -657,6 +656,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
657 ATOM_I2C_RECORD *i2c_record; 656 ATOM_I2C_RECORD *i2c_record;
658 ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; 657 ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
659 ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path; 658 ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
659 ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path;
660 ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table = 660 ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
661 (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) 661 (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
662 (ctx->bios + data_offset + 662 (ctx->bios + data_offset +
@@ -690,10 +690,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
690 case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE: 690 case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
691 ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *) 691 ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
692 record; 692 record;
693 router.valid = true; 693 router.ddc_valid = true;
694 router.mux_type = ddc_path->ucMuxType; 694 router.ddc_mux_type = ddc_path->ucMuxType;
695 router.mux_control_pin = ddc_path->ucMuxControlPin; 695 router.ddc_mux_control_pin = ddc_path->ucMuxControlPin;
696 router.mux_state = ddc_path->ucMuxState[enum_id]; 696 router.ddc_mux_state = ddc_path->ucMuxState[enum_id];
697 break;
698 case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE:
699 cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *)
700 record;
701 router.cd_valid = true;
702 router.cd_mux_type = cd_path->ucMuxType;
703 router.cd_mux_control_pin = cd_path->ucMuxControlPin;
704 router.cd_mux_state = cd_path->ucMuxState[enum_id];
697 break; 705 break;
698 } 706 }
699 record = (ATOM_COMMON_RECORD_HEADER *) 707 record = (ATOM_COMMON_RECORD_HEADER *)
@@ -860,7 +868,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
860 size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE; 868 size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
861 struct radeon_router router; 869 struct radeon_router router;
862 870
863 router.valid = false; 871 router.ddc_valid = false;
872 router.cd_valid = false;
864 873
865 bios_connectors = kzalloc(bc_size, GFP_KERNEL); 874 bios_connectors = kzalloc(bc_size, GFP_KERNEL);
866 if (!bios_connectors) 875 if (!bios_connectors)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 4dac4b0a02ee..fe6c74780f18 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -183,13 +183,13 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
183 continue; 183 continue;
184 184
185 if (priority == true) { 185 if (priority == true) {
186 DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); 186 DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
187 DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); 187 DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector));
188 conflict->status = connector_status_disconnected; 188 conflict->status = connector_status_disconnected;
189 radeon_connector_update_scratch_regs(conflict, connector_status_disconnected); 189 radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
190 } else { 190 } else {
191 DRM_INFO("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector)); 191 DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
192 DRM_INFO("in favor of %s\n", drm_get_connector_name(conflict)); 192 DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict));
193 current_status = connector_status_disconnected; 193 current_status = connector_status_disconnected;
194 } 194 }
195 break; 195 break;
@@ -432,13 +432,13 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
432 mode->vdisplay == native_mode->vdisplay) { 432 mode->vdisplay == native_mode->vdisplay) {
433 *native_mode = *mode; 433 *native_mode = *mode;
434 drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V); 434 drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
435 DRM_INFO("Determined LVDS native mode details from EDID\n"); 435 DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
436 break; 436 break;
437 } 437 }
438 } 438 }
439 } 439 }
440 if (!native_mode->clock) { 440 if (!native_mode->clock) {
441 DRM_INFO("No LVDS native mode details, disabling RMX\n"); 441 DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
442 radeon_encoder->rmx_type = RMX_OFF; 442 radeon_encoder->rmx_type = RMX_OFF;
443 } 443 }
444} 444}
@@ -1116,7 +1116,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1116 radeon_connector->shared_ddc = true; 1116 radeon_connector->shared_ddc = true;
1117 shared_ddc = true; 1117 shared_ddc = true;
1118 } 1118 }
1119 if (radeon_connector->router_bus && router->valid && 1119 if (radeon_connector->router_bus && router->ddc_valid &&
1120 (radeon_connector->router.router_id == router->router_id)) { 1120 (radeon_connector->router.router_id == router->router_id)) {
1121 radeon_connector->shared_ddc = false; 1121 radeon_connector->shared_ddc = false;
1122 shared_ddc = false; 1122 shared_ddc = false;
@@ -1136,7 +1136,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1136 radeon_connector->connector_object_id = connector_object_id; 1136 radeon_connector->connector_object_id = connector_object_id;
1137 radeon_connector->hpd = *hpd; 1137 radeon_connector->hpd = *hpd;
1138 radeon_connector->router = *router; 1138 radeon_connector->router = *router;
1139 if (router->valid) { 1139 if (router->ddc_valid || router->cd_valid) {
1140 radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); 1140 radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
1141 if (!radeon_connector->router_bus) 1141 if (!radeon_connector->router_bus)
1142 goto failed; 1142 goto failed;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0383631da69c..1df4dc6c063c 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -315,10 +315,14 @@ static void radeon_print_display_setup(struct drm_device *dev)
315 radeon_connector->ddc_bus->rec.en_data_reg, 315 radeon_connector->ddc_bus->rec.en_data_reg,
316 radeon_connector->ddc_bus->rec.y_clk_reg, 316 radeon_connector->ddc_bus->rec.y_clk_reg,
317 radeon_connector->ddc_bus->rec.y_data_reg); 317 radeon_connector->ddc_bus->rec.y_data_reg);
318 if (radeon_connector->router_bus) 318 if (radeon_connector->router.ddc_valid)
319 DRM_INFO(" DDC Router 0x%x/0x%x\n", 319 DRM_INFO(" DDC Router 0x%x/0x%x\n",
320 radeon_connector->router.mux_control_pin, 320 radeon_connector->router.ddc_mux_control_pin,
321 radeon_connector->router.mux_state); 321 radeon_connector->router.ddc_mux_state);
322 if (radeon_connector->router.cd_valid)
323 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
324 radeon_connector->router.cd_mux_control_pin,
325 radeon_connector->router.cd_mux_state);
322 } else { 326 } else {
323 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || 327 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
324 connector->connector_type == DRM_MODE_CONNECTOR_DVII || 328 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
@@ -398,8 +402,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
398 int ret = 0; 402 int ret = 0;
399 403
400 /* on hw with routers, select right port */ 404 /* on hw with routers, select right port */
401 if (radeon_connector->router.valid) 405 if (radeon_connector->router.ddc_valid)
402 radeon_router_select_port(radeon_connector); 406 radeon_router_select_ddc_port(radeon_connector);
403 407
404 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 408 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
405 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { 409 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
@@ -432,8 +436,8 @@ static int radeon_ddc_dump(struct drm_connector *connector)
432 int ret = 0; 436 int ret = 0;
433 437
434 /* on hw with routers, select right port */ 438 /* on hw with routers, select right port */
435 if (radeon_connector->router.valid) 439 if (radeon_connector->router.ddc_valid)
436 radeon_router_select_port(radeon_connector); 440 radeon_router_select_ddc_port(radeon_connector);
437 441
438 if (!radeon_connector->ddc_bus) 442 if (!radeon_connector->ddc_bus)
439 return -1; 443 return -1;
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index ae58b6849a2e..f678257c42e6 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -1520,6 +1520,7 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
1520static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) 1520static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
1521{ 1521{
1522 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1522 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1523 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1523 1524
1524 if (radeon_encoder->active_device & 1525 if (radeon_encoder->active_device &
1525 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { 1526 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
@@ -1531,6 +1532,13 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
1531 radeon_atom_output_lock(encoder, true); 1532 radeon_atom_output_lock(encoder, true);
1532 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 1533 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1533 1534
1535 /* select the clock/data port if it uses a router */
1536 if (connector) {
1537 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1538 if (radeon_connector->router.cd_valid)
1539 radeon_router_select_cd_port(radeon_connector);
1540 }
1541
1534 /* this is needed for the pll/ss setup to work correctly in some cases */ 1542 /* this is needed for the pll/ss setup to work correctly in some cases */
1535 atombios_set_encoder_crtc_source(encoder); 1543 atombios_set_encoder_crtc_source(encoder);
1536} 1544}
@@ -1547,6 +1555,23 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1547 struct radeon_device *rdev = dev->dev_private; 1555 struct radeon_device *rdev = dev->dev_private;
1548 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1556 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1549 struct radeon_encoder_atom_dig *dig; 1557 struct radeon_encoder_atom_dig *dig;
1558
1559 /* check for pre-DCE3 cards with shared encoders;
1560 * can't really use the links individually, so don't disable
1561 * the encoder if it's in use by another connector
1562 */
1563 if (!ASIC_IS_DCE3(rdev)) {
1564 struct drm_encoder *other_encoder;
1565 struct radeon_encoder *other_radeon_encoder;
1566
1567 list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
1568 other_radeon_encoder = to_radeon_encoder(other_encoder);
1569 if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
1570 drm_helper_encoder_in_use(other_encoder))
1571 goto disable_done;
1572 }
1573 }
1574
1550 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 1575 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1551 1576
1552 switch (radeon_encoder->encoder_id) { 1577 switch (radeon_encoder->encoder_id) {
@@ -1586,6 +1611,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1586 break; 1611 break;
1587 } 1612 }
1588 1613
1614disable_done:
1589 if (radeon_encoder_is_digital(encoder)) { 1615 if (radeon_encoder_is_digital(encoder)) {
1590 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) 1616 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
1591 r600_hdmi_disable(encoder); 1617 r600_hdmi_disable(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 216392d0353b..daacb281dfaf 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -240,7 +240,8 @@ retry:
240 */ 240 */
241 if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { 241 if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
242 /* good news we believe it's a lockup */ 242 /* good news we believe it's a lockup */
243 WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq); 243 WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
244 fence->seq, seq);
244 /* FIXME: what should we do ? marking everyone 245 /* FIXME: what should we do ? marking everyone
245 * as signaled for now 246 * as signaled for now
246 */ 247 */
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 6a13ee38a5b9..0cfbba02c4d0 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -53,8 +53,8 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
53 }; 53 };
54 54
55 /* on hw with routers, select right port */ 55 /* on hw with routers, select right port */
56 if (radeon_connector->router.valid) 56 if (radeon_connector->router.ddc_valid)
57 radeon_router_select_port(radeon_connector); 57 radeon_router_select_ddc_port(radeon_connector);
58 58
59 ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); 59 ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
60 if (ret == 2) 60 if (ret == 2)
@@ -1084,26 +1084,51 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
1084 addr, val); 1084 addr, val);
1085} 1085}
1086 1086
1087/* router switching */ 1087/* ddc router switching */
1088void radeon_router_select_port(struct radeon_connector *radeon_connector) 1088void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
1089{ 1089{
1090 u8 val; 1090 u8 val;
1091 1091
1092 if (!radeon_connector->router.valid) 1092 if (!radeon_connector->router.ddc_valid)
1093 return; 1093 return;
1094 1094
1095 radeon_i2c_get_byte(radeon_connector->router_bus, 1095 radeon_i2c_get_byte(radeon_connector->router_bus,
1096 radeon_connector->router.i2c_addr, 1096 radeon_connector->router.i2c_addr,
1097 0x3, &val); 1097 0x3, &val);
1098 val &= radeon_connector->router.mux_control_pin; 1098 val &= ~radeon_connector->router.ddc_mux_control_pin;
1099 radeon_i2c_put_byte(radeon_connector->router_bus, 1099 radeon_i2c_put_byte(radeon_connector->router_bus,
1100 radeon_connector->router.i2c_addr, 1100 radeon_connector->router.i2c_addr,
1101 0x3, val); 1101 0x3, val);
1102 radeon_i2c_get_byte(radeon_connector->router_bus, 1102 radeon_i2c_get_byte(radeon_connector->router_bus,
1103 radeon_connector->router.i2c_addr, 1103 radeon_connector->router.i2c_addr,
1104 0x1, &val); 1104 0x1, &val);
1105 val &= radeon_connector->router.mux_control_pin; 1105 val &= ~radeon_connector->router.ddc_mux_control_pin;
1106 val |= radeon_connector->router.mux_state; 1106 val |= radeon_connector->router.ddc_mux_state;
1107 radeon_i2c_put_byte(radeon_connector->router_bus,
1108 radeon_connector->router.i2c_addr,
1109 0x1, val);
1110}
1111
1112/* clock/data router switching */
1113void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
1114{
1115 u8 val;
1116
1117 if (!radeon_connector->router.cd_valid)
1118 return;
1119
1120 radeon_i2c_get_byte(radeon_connector->router_bus,
1121 radeon_connector->router.i2c_addr,
1122 0x3, &val);
1123 val &= ~radeon_connector->router.cd_mux_control_pin;
1124 radeon_i2c_put_byte(radeon_connector->router_bus,
1125 radeon_connector->router.i2c_addr,
1126 0x3, val);
1127 radeon_i2c_get_byte(radeon_connector->router_bus,
1128 radeon_connector->router.i2c_addr,
1129 0x1, &val);
1130 val &= ~radeon_connector->router.cd_mux_control_pin;
1131 val |= radeon_connector->router.cd_mux_state;
1107 radeon_i2c_put_byte(radeon_connector->router_bus, 1132 radeon_i2c_put_byte(radeon_connector->router_bus,
1108 radeon_connector->router.i2c_addr, 1133 radeon_connector->router.i2c_addr,
1109 0x1, val); 1134 0x1, val);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 92457163d070..680f57644e86 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -401,13 +401,19 @@ struct radeon_hpd {
401}; 401};
402 402
403struct radeon_router { 403struct radeon_router {
404 bool valid;
405 u32 router_id; 404 u32 router_id;
406 struct radeon_i2c_bus_rec i2c_info; 405 struct radeon_i2c_bus_rec i2c_info;
407 u8 i2c_addr; 406 u8 i2c_addr;
408 u8 mux_type; 407 /* i2c mux */
409 u8 mux_control_pin; 408 bool ddc_valid;
410 u8 mux_state; 409 u8 ddc_mux_type;
410 u8 ddc_mux_control_pin;
411 u8 ddc_mux_state;
412 /* clock/data mux */
413 bool cd_valid;
414 u8 cd_mux_type;
415 u8 cd_mux_control_pin;
416 u8 cd_mux_state;
411}; 417};
412 418
413struct radeon_connector { 419struct radeon_connector {
@@ -488,7 +494,8 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
488 u8 slave_addr, 494 u8 slave_addr,
489 u8 addr, 495 u8 addr,
490 u8 val); 496 u8 val);
491extern void radeon_router_select_port(struct radeon_connector *radeon_connector); 497extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
498extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
492extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); 499extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
493extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); 500extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
494 501
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d7ab91416410..8eb183466015 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -102,6 +102,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
102 type = ttm_bo_type_device; 102 type = ttm_bo_type_device;
103 } 103 }
104 *bo_ptr = NULL; 104 *bo_ptr = NULL;
105
106retry:
105 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); 107 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
106 if (bo == NULL) 108 if (bo == NULL)
107 return -ENOMEM; 109 return -ENOMEM;
@@ -109,8 +111,6 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
109 bo->gobj = gobj; 111 bo->gobj = gobj;
110 bo->surface_reg = -1; 112 bo->surface_reg = -1;
111 INIT_LIST_HEAD(&bo->list); 113 INIT_LIST_HEAD(&bo->list);
112
113retry:
114 radeon_ttm_placement_from_domain(bo, domain); 114 radeon_ttm_placement_from_domain(bo, domain);
115 /* Kernel allocation are uninterruptible */ 115 /* Kernel allocation are uninterruptible */
116 mutex_lock(&rdev->vram_mutex); 116 mutex_lock(&rdev->vram_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index fe95bb35317e..01c2c736a1da 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -689,7 +689,8 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
689 gtt = container_of(backend, struct radeon_ttm_backend, backend); 689 gtt = container_of(backend, struct radeon_ttm_backend, backend);
690 gtt->offset = bo_mem->start << PAGE_SHIFT; 690 gtt->offset = bo_mem->start << PAGE_SHIFT;
691 if (!gtt->num_pages) { 691 if (!gtt->num_pages) {
692 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend); 692 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
693 gtt->num_pages, bo_mem, backend);
693 } 694 }
694 r = radeon_gart_bind(gtt->rdev, gtt->offset, 695 r = radeon_gart_bind(gtt->rdev, gtt->offset,
695 gtt->num_pages, gtt->pages); 696 gtt->num_pages, gtt->pages);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index f683e51a2a06..5512e4e5e636 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -78,7 +78,7 @@ int rs400_gart_init(struct radeon_device *rdev)
78 int r; 78 int r;
79 79
80 if (rdev->gart.table.ram.ptr) { 80 if (rdev->gart.table.ram.ptr) {
81 WARN(1, "RS400 GART already initialized.\n"); 81 WARN(1, "RS400 GART already initialized\n");
82 return 0; 82 return 0;
83 } 83 }
84 /* Check gart size */ 84 /* Check gart size */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index b091a1f6fa4e..f1c6e02c2e6b 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -375,7 +375,7 @@ int rs600_gart_init(struct radeon_device *rdev)
375 int r; 375 int r;
376 376
377 if (rdev->gart.table.vram.robj) { 377 if (rdev->gart.table.vram.robj) {
378 WARN(1, "RS600 GART already initialized.\n"); 378 WARN(1, "RS600 GART already initialized\n");
379 return 0; 379 return 0;
380 } 380 }
381 /* Initialize common gart structure */ 381 /* Initialize common gart structure */
@@ -505,7 +505,7 @@ int rs600_irq_set(struct radeon_device *rdev)
505 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); 505 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
506 506
507 if (!rdev->irq.installed) { 507 if (!rdev->irq.installed) {
508 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); 508 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
509 WREG32(R_000040_GEN_INT_CNTL, 0); 509 WREG32(R_000040_GEN_INT_CNTL, 0);
510 return -EINVAL; 510 return -EINVAL;
511 } 511 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a1cb783c7131..3ca77dc03915 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -27,14 +27,6 @@
27/* 27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */ 29 */
30/* Notes:
31 *
32 * We store bo pointer in drm_mm_node struct so we know which bo own a
33 * specific node. There is no protection on the pointer, thus to make
34 * sure things don't go berserk you have to access this pointer while
35 * holding the global lru lock and make sure anytime you free a node you
36 * reset the pointer to NULL.
37 */
38 30
39#include "ttm/ttm_module.h" 31#include "ttm/ttm_module.h"
40#include "ttm/ttm_bo_driver.h" 32#include "ttm/ttm_bo_driver.h"
@@ -45,6 +37,7 @@
45#include <linux/mm.h> 37#include <linux/mm.h>
46#include <linux/file.h> 38#include <linux/file.h>
47#include <linux/module.h> 39#include <linux/module.h>
40#include <asm/atomic.h>
48 41
49#define TTM_ASSERT_LOCKED(param) 42#define TTM_ASSERT_LOCKED(param)
50#define TTM_DEBUG(fmt, arg...) 43#define TTM_DEBUG(fmt, arg...)
@@ -452,6 +445,11 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
452 ttm_bo_mem_put(bo, &bo->mem); 445 ttm_bo_mem_put(bo, &bo->mem);
453 446
454 atomic_set(&bo->reserved, 0); 447 atomic_set(&bo->reserved, 0);
448
449 /*
450 * Make processes trying to reserve really pick it up.
451 */
452 smp_mb__after_atomic_dec();
455 wake_up_all(&bo->event_queue); 453 wake_up_all(&bo->event_queue);
456} 454}
457 455
@@ -460,7 +458,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
460 struct ttm_bo_device *bdev = bo->bdev; 458 struct ttm_bo_device *bdev = bo->bdev;
461 struct ttm_bo_global *glob = bo->glob; 459 struct ttm_bo_global *glob = bo->glob;
462 struct ttm_bo_driver *driver; 460 struct ttm_bo_driver *driver;
463 void *sync_obj; 461 void *sync_obj = NULL;
464 void *sync_obj_arg; 462 void *sync_obj_arg;
465 int put_count; 463 int put_count;
466 int ret; 464 int ret;
@@ -495,17 +493,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
495 spin_lock(&glob->lru_lock); 493 spin_lock(&glob->lru_lock);
496 } 494 }
497queue: 495queue:
498 sync_obj = bo->sync_obj;
499 sync_obj_arg = bo->sync_obj_arg;
500 driver = bdev->driver; 496 driver = bdev->driver;
497 if (bo->sync_obj)
498 sync_obj = driver->sync_obj_ref(bo->sync_obj);
499 sync_obj_arg = bo->sync_obj_arg;
501 500
502 kref_get(&bo->list_kref); 501 kref_get(&bo->list_kref);
503 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 502 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
504 spin_unlock(&glob->lru_lock); 503 spin_unlock(&glob->lru_lock);
505 spin_unlock(&bo->lock); 504 spin_unlock(&bo->lock);
506 505
507 if (sync_obj) 506 if (sync_obj) {
508 driver->sync_obj_flush(sync_obj, sync_obj_arg); 507 driver->sync_obj_flush(sync_obj, sync_obj_arg);
508 driver->sync_obj_unref(&sync_obj);
509 }
509 schedule_delayed_work(&bdev->wq, 510 schedule_delayed_work(&bdev->wq,
510 ((HZ / 100) < 1) ? 1 : HZ / 100); 511 ((HZ / 100) < 1) ? 1 : HZ / 100);
511} 512}
@@ -822,7 +823,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
822 bool no_wait_gpu) 823 bool no_wait_gpu)
823{ 824{
824 struct ttm_bo_device *bdev = bo->bdev; 825 struct ttm_bo_device *bdev = bo->bdev;
825 struct ttm_bo_global *glob = bdev->glob;
826 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 826 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
827 int ret; 827 int ret;
828 828
@@ -832,12 +832,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
832 return ret; 832 return ret;
833 if (mem->mm_node) 833 if (mem->mm_node)
834 break; 834 break;
835 spin_lock(&glob->lru_lock);
836 if (list_empty(&man->lru)) {
837 spin_unlock(&glob->lru_lock);
838 break;
839 }
840 spin_unlock(&glob->lru_lock);
841 ret = ttm_mem_evict_first(bdev, mem_type, interruptible, 835 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
842 no_wait_reserve, no_wait_gpu); 836 no_wait_reserve, no_wait_gpu);
843 if (unlikely(ret != 0)) 837 if (unlikely(ret != 0))
@@ -1125,35 +1119,9 @@ EXPORT_SYMBOL(ttm_bo_validate);
1125int ttm_bo_check_placement(struct ttm_buffer_object *bo, 1119int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1126 struct ttm_placement *placement) 1120 struct ttm_placement *placement)
1127{ 1121{
1128 int i; 1122 BUG_ON((placement->fpfn || placement->lpfn) &&
1123 (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
1129 1124
1130 if (placement->fpfn || placement->lpfn) {
1131 if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
1132 printk(KERN_ERR TTM_PFX "Page number range to small "
1133 "Need %lu pages, range is [%u, %u]\n",
1134 bo->mem.num_pages, placement->fpfn,
1135 placement->lpfn);
1136 return -EINVAL;
1137 }
1138 }
1139 for (i = 0; i < placement->num_placement; i++) {
1140 if (!capable(CAP_SYS_ADMIN)) {
1141 if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
1142 printk(KERN_ERR TTM_PFX "Need to be root to "
1143 "modify NO_EVICT status.\n");
1144 return -EINVAL;
1145 }
1146 }
1147 }
1148 for (i = 0; i < placement->num_busy_placement; i++) {
1149 if (!capable(CAP_SYS_ADMIN)) {
1150 if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
1151 printk(KERN_ERR TTM_PFX "Need to be root to "
1152 "modify NO_EVICT status.\n");
1153 return -EINVAL;
1154 }
1155 }
1156 }
1157 return 0; 1125 return 0;
1158} 1126}
1159 1127
@@ -1176,6 +1144,10 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1176 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1144 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1177 if (num_pages == 0) { 1145 if (num_pages == 0) {
1178 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n"); 1146 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1147 if (destroy)
1148 (*destroy)(bo);
1149 else
1150 kfree(bo);
1179 return -EINVAL; 1151 return -EINVAL;
1180 } 1152 }
1181 bo->destroy = destroy; 1153 bo->destroy = destroy;
@@ -1369,18 +1341,9 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1369 int ret = -EINVAL; 1341 int ret = -EINVAL;
1370 struct ttm_mem_type_manager *man; 1342 struct ttm_mem_type_manager *man;
1371 1343
1372 if (type >= TTM_NUM_MEM_TYPES) { 1344 BUG_ON(type >= TTM_NUM_MEM_TYPES);
1373 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1374 return ret;
1375 }
1376
1377 man = &bdev->man[type]; 1345 man = &bdev->man[type];
1378 if (man->has_type) { 1346 BUG_ON(man->has_type);
1379 printk(KERN_ERR TTM_PFX
1380 "Memory manager already initialized for type %d\n",
1381 type);
1382 return ret;
1383 }
1384 1347
1385 ret = bdev->driver->init_mem_type(bdev, type, man); 1348 ret = bdev->driver->init_mem_type(bdev, type, man);
1386 if (ret) 1349 if (ret)
@@ -1389,13 +1352,6 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1389 1352
1390 ret = 0; 1353 ret = 0;
1391 if (type != TTM_PL_SYSTEM) { 1354 if (type != TTM_PL_SYSTEM) {
1392 if (!p_size) {
1393 printk(KERN_ERR TTM_PFX
1394 "Zero size memory manager type %d\n",
1395 type);
1396 return ret;
1397 }
1398
1399 ret = (*man->func->init)(man, p_size); 1355 ret = (*man->func->init)(man, p_size);
1400 if (ret) 1356 if (ret)
1401 return ret; 1357 return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 7410c190c891..038e947d00f9 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -1,6 +1,6 @@
1/************************************************************************** 1/**************************************************************************
2 * 2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA 3 * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved. 4 * All Rights Reserved.
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,20 +31,29 @@
31#include "ttm/ttm_module.h" 31#include "ttm/ttm_module.h"
32#include "ttm/ttm_bo_driver.h" 32#include "ttm/ttm_bo_driver.h"
33#include "ttm/ttm_placement.h" 33#include "ttm/ttm_placement.h"
34#include <linux/jiffies.h> 34#include "drm_mm.h"
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/sched.h> 36#include <linux/spinlock.h>
37#include <linux/mm.h>
38#include <linux/file.h>
39#include <linux/module.h> 37#include <linux/module.h>
40 38
39/**
40 * Currently we use a spinlock for the lock, but a mutex *may* be
41 * more appropriate to reduce scheduling latency if the range manager
42 * ends up with very fragmented allocation patterns.
43 */
44
45struct ttm_range_manager {
46 struct drm_mm mm;
47 spinlock_t lock;
48};
49
41static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, 50static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
42 struct ttm_buffer_object *bo, 51 struct ttm_buffer_object *bo,
43 struct ttm_placement *placement, 52 struct ttm_placement *placement,
44 struct ttm_mem_reg *mem) 53 struct ttm_mem_reg *mem)
45{ 54{
46 struct ttm_bo_global *glob = man->bdev->glob; 55 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
47 struct drm_mm *mm = man->priv; 56 struct drm_mm *mm = &rman->mm;
48 struct drm_mm_node *node = NULL; 57 struct drm_mm_node *node = NULL;
49 unsigned long lpfn; 58 unsigned long lpfn;
50 int ret; 59 int ret;
@@ -57,19 +66,19 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
57 if (unlikely(ret)) 66 if (unlikely(ret))
58 return ret; 67 return ret;
59 68
60 spin_lock(&glob->lru_lock); 69 spin_lock(&rman->lock);
61 node = drm_mm_search_free_in_range(mm, 70 node = drm_mm_search_free_in_range(mm,
62 mem->num_pages, mem->page_alignment, 71 mem->num_pages, mem->page_alignment,
63 placement->fpfn, lpfn, 1); 72 placement->fpfn, lpfn, 1);
64 if (unlikely(node == NULL)) { 73 if (unlikely(node == NULL)) {
65 spin_unlock(&glob->lru_lock); 74 spin_unlock(&rman->lock);
66 return 0; 75 return 0;
67 } 76 }
68 node = drm_mm_get_block_atomic_range(node, mem->num_pages, 77 node = drm_mm_get_block_atomic_range(node, mem->num_pages,
69 mem->page_alignment, 78 mem->page_alignment,
70 placement->fpfn, 79 placement->fpfn,
71 lpfn); 80 lpfn);
72 spin_unlock(&glob->lru_lock); 81 spin_unlock(&rman->lock);
73 } while (node == NULL); 82 } while (node == NULL);
74 83
75 mem->mm_node = node; 84 mem->mm_node = node;
@@ -80,12 +89,12 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
80static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, 89static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
81 struct ttm_mem_reg *mem) 90 struct ttm_mem_reg *mem)
82{ 91{
83 struct ttm_bo_global *glob = man->bdev->glob; 92 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
84 93
85 if (mem->mm_node) { 94 if (mem->mm_node) {
86 spin_lock(&glob->lru_lock); 95 spin_lock(&rman->lock);
87 drm_mm_put_block(mem->mm_node); 96 drm_mm_put_block(mem->mm_node);
88 spin_unlock(&glob->lru_lock); 97 spin_unlock(&rman->lock);
89 mem->mm_node = NULL; 98 mem->mm_node = NULL;
90 } 99 }
91} 100}
@@ -93,49 +102,49 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
93static int ttm_bo_man_init(struct ttm_mem_type_manager *man, 102static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
94 unsigned long p_size) 103 unsigned long p_size)
95{ 104{
96 struct drm_mm *mm; 105 struct ttm_range_manager *rman;
97 int ret; 106 int ret;
98 107
99 mm = kzalloc(sizeof(*mm), GFP_KERNEL); 108 rman = kzalloc(sizeof(*rman), GFP_KERNEL);
100 if (!mm) 109 if (!rman)
101 return -ENOMEM; 110 return -ENOMEM;
102 111
103 ret = drm_mm_init(mm, 0, p_size); 112 ret = drm_mm_init(&rman->mm, 0, p_size);
104 if (ret) { 113 if (ret) {
105 kfree(mm); 114 kfree(rman);
106 return ret; 115 return ret;
107 } 116 }
108 117
109 man->priv = mm; 118 spin_lock_init(&rman->lock);
119 man->priv = rman;
110 return 0; 120 return 0;
111} 121}
112 122
113static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) 123static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
114{ 124{
115 struct ttm_bo_global *glob = man->bdev->glob; 125 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
116 struct drm_mm *mm = man->priv; 126 struct drm_mm *mm = &rman->mm;
117 int ret = 0;
118 127
119 spin_lock(&glob->lru_lock); 128 spin_lock(&rman->lock);
120 if (drm_mm_clean(mm)) { 129 if (drm_mm_clean(mm)) {
121 drm_mm_takedown(mm); 130 drm_mm_takedown(mm);
122 kfree(mm); 131 spin_unlock(&rman->lock);
132 kfree(rman);
123 man->priv = NULL; 133 man->priv = NULL;
124 } else 134 return 0;
125 ret = -EBUSY; 135 }
126 spin_unlock(&glob->lru_lock); 136 spin_unlock(&rman->lock);
127 return ret; 137 return -EBUSY;
128} 138}
129 139
130static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, 140static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
131 const char *prefix) 141 const char *prefix)
132{ 142{
133 struct ttm_bo_global *glob = man->bdev->glob; 143 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
134 struct drm_mm *mm = man->priv;
135 144
136 spin_lock(&glob->lru_lock); 145 spin_lock(&rman->lock);
137 drm_mm_debug_table(mm, prefix); 146 drm_mm_debug_table(&rman->mm, prefix);
138 spin_unlock(&glob->lru_lock); 147 spin_unlock(&rman->lock);
139} 148}
140 149
141const struct ttm_mem_type_manager_func ttm_bo_manager_func = { 150const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index a7bab87a548b..af789dc869b9 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -440,10 +440,8 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
440 return ret; 440 return ret;
441 441
442 ret = be->func->bind(be, bo_mem); 442 ret = be->func->bind(be, bo_mem);
443 if (ret) { 443 if (unlikely(ret != 0))
444 printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
445 return ret; 444 return ret;
446 }
447 445
448 ttm->state = tt_bound; 446 ttm->state = tt_bound;
449 447
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 9b5b4d9dd62c..3e038a394c51 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -235,9 +235,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
235 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) - 235 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
236 first_pfn + 1; 236 first_pfn + 1;
237 237
238 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) 238 vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages);
239 if (NULL == vsg->pages)
239 return -ENOMEM; 240 return -ENOMEM;
240 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
241 down_read(&current->mm->mmap_sem); 241 down_read(&current->mm->mmap_sem);
242 ret = get_user_pages(current, current->mm, 242 ret = get_user_pages(current, current->mm,
243 (unsigned long)xfer->mem_addr, 243 (unsigned long)xfer->mem_addr,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 51d9f9f1d7f2..76954e3528c1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -691,6 +691,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
691 691
692 fence_rep.error = ret; 692 fence_rep.error = ret;
693 fence_rep.fence_seq = (uint64_t) sequence; 693 fence_rep.fence_seq = (uint64_t) sequence;
694 fence_rep.pad64 = 0;
694 695
695 user_fence_rep = (struct drm_vmw_fence_rep __user *) 696 user_fence_rep = (struct drm_vmw_fence_rep __user *)
696 (unsigned long)arg->fence_rep; 697 (unsigned long)arg->fence_rep;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 87c6e6156d7d..cceeb42789b6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -720,6 +720,8 @@ static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
720 &vmw_vram_ne_placement, 720 &vmw_vram_ne_placement,
721 false, &vmw_dmabuf_bo_free); 721 false, &vmw_dmabuf_bo_free);
722 vmw_overlay_resume_all(dev_priv); 722 vmw_overlay_resume_all(dev_priv);
723 if (unlikely(ret != 0))
724 vfbs->buffer = NULL;
723 725
724 return ret; 726 return ret;
725} 727}
@@ -730,6 +732,9 @@ static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
730 struct vmw_framebuffer_surface *vfbs = 732 struct vmw_framebuffer_surface *vfbs =
731 vmw_framebuffer_to_vfbs(&vfb->base); 733 vmw_framebuffer_to_vfbs(&vfb->base);
732 734
735 if (unlikely(vfbs->buffer == NULL))
736 return 0;
737
733 bo = &vfbs->buffer->base; 738 bo = &vfbs->buffer->base;
734 ttm_bo_unref(&bo); 739 ttm_bo_unref(&bo);
735 vfbs->buffer = NULL; 740 vfbs->buffer = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index a01c47ddb5bc..29113c9b26a8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -557,7 +557,7 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
557 return -EINVAL; 557 return -EINVAL;
558 } 558 }
559 559
560 dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv)); 560 dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL);
561 561
562 if (!dev_priv->ldu_priv) 562 if (!dev_priv->ldu_priv)
563 return -ENOMEM; 563 return -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index df2036ed18d5..f1a52f9e7298 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -585,7 +585,7 @@ int vmw_overlay_init(struct vmw_private *dev_priv)
585 return -ENOSYS; 585 return -ENOSYS;
586 } 586 }
587 587
588 overlay = kmalloc(GFP_KERNEL, sizeof(*overlay)); 588 overlay = kmalloc(sizeof(*overlay), GFP_KERNEL);
589 if (!overlay) 589 if (!overlay)
590 return -ENOMEM; 590 return -ENOMEM;
591 591
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
index 742c423567cf..0e1edd7311ff 100644
--- a/drivers/gpu/stub/Kconfig
+++ b/drivers/gpu/stub/Kconfig
@@ -3,6 +3,9 @@ config STUB_POULSBO
3 depends on PCI 3 depends on PCI
4 # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled 4 # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
5 # but for select to work, need to select ACPI_VIDEO's dependencies, ick 5 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
6 select VIDEO_OUTPUT_CONTROL if ACPI
7 select BACKLIGHT_CLASS_DEVICE if ACPI
8 select INPUT if ACPI
6 select ACPI_VIDEO if ACPI 9 select ACPI_VIDEO if ACPI
7 help 10 help
8 Choose this option if you have a system that has Intel GMA500 11 Choose this option if you have a system that has Intel GMA500
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index 1e4c21fc1a89..86d822aa9bbf 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -178,11 +178,13 @@ static int ad7414_probe(struct i2c_client *client,
178{ 178{
179 struct ad7414_data *data; 179 struct ad7414_data *data;
180 int conf; 180 int conf;
181 int err = 0; 181 int err;
182 182
183 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA | 183 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
184 I2C_FUNC_SMBUS_READ_WORD_DATA)) 184 I2C_FUNC_SMBUS_READ_WORD_DATA)) {
185 err = -EOPNOTSUPP;
185 goto exit; 186 goto exit;
187 }
186 188
187 data = kzalloc(sizeof(struct ad7414_data), GFP_KERNEL); 189 data = kzalloc(sizeof(struct ad7414_data), GFP_KERNEL);
188 if (!data) { 190 if (!data) {
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 9e775717abb7..87d92a56a939 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -1286,8 +1286,10 @@ static int adt7470_probe(struct i2c_client *client,
1286 init_completion(&data->auto_update_stop); 1286 init_completion(&data->auto_update_stop);
1287 data->auto_update = kthread_run(adt7470_update_thread, client, 1287 data->auto_update = kthread_run(adt7470_update_thread, client,
1288 dev_name(data->hwmon_dev)); 1288 dev_name(data->hwmon_dev));
1289 if (IS_ERR(data->auto_update)) 1289 if (IS_ERR(data->auto_update)) {
1290 err = PTR_ERR(data->auto_update);
1290 goto exit_unregister; 1291 goto exit_unregister;
1292 }
1291 1293
1292 return 0; 1294 return 0;
1293 1295
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index aa701a183707..f141a1de519c 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -376,10 +376,6 @@ static int fan_ctrl_init(struct gpio_fan_data *fan_data,
376 } 376 }
377 } 377 }
378 378
379 err = sysfs_create_group(&pdev->dev.kobj, &gpio_fan_ctrl_group);
380 if (err)
381 goto err_free_gpio;
382
383 fan_data->num_ctrl = num_ctrl; 379 fan_data->num_ctrl = num_ctrl;
384 fan_data->ctrl = ctrl; 380 fan_data->ctrl = ctrl;
385 fan_data->num_speed = pdata->num_speed; 381 fan_data->num_speed = pdata->num_speed;
@@ -391,6 +387,10 @@ static int fan_ctrl_init(struct gpio_fan_data *fan_data,
391 goto err_free_gpio; 387 goto err_free_gpio;
392 } 388 }
393 389
390 err = sysfs_create_group(&pdev->dev.kobj, &gpio_fan_ctrl_group);
391 if (err)
392 goto err_free_gpio;
393
394 return 0; 394 return 0;
395 395
396err_free_gpio: 396err_free_gpio:
diff --git a/drivers/input/input.c b/drivers/input/input.c
index d092ef9291da..7f26ca6ecf75 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -74,6 +74,7 @@ static int input_defuzz_abs_event(int value, int old_val, int fuzz)
74 * dev->event_lock held and interrupts disabled. 74 * dev->event_lock held and interrupts disabled.
75 */ 75 */
76static void input_pass_event(struct input_dev *dev, 76static void input_pass_event(struct input_dev *dev,
77 struct input_handler *src_handler,
77 unsigned int type, unsigned int code, int value) 78 unsigned int type, unsigned int code, int value)
78{ 79{
79 struct input_handler *handler; 80 struct input_handler *handler;
@@ -92,6 +93,15 @@ static void input_pass_event(struct input_dev *dev,
92 continue; 93 continue;
93 94
94 handler = handle->handler; 95 handler = handle->handler;
96
97 /*
98 * If this is the handler that injected this
99 * particular event we want to skip it to avoid
100 * filters firing again and again.
101 */
102 if (handler == src_handler)
103 continue;
104
95 if (!handler->filter) { 105 if (!handler->filter) {
96 if (filtered) 106 if (filtered)
97 break; 107 break;
@@ -121,7 +131,7 @@ static void input_repeat_key(unsigned long data)
121 if (test_bit(dev->repeat_key, dev->key) && 131 if (test_bit(dev->repeat_key, dev->key) &&
122 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { 132 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
123 133
124 input_pass_event(dev, EV_KEY, dev->repeat_key, 2); 134 input_pass_event(dev, NULL, EV_KEY, dev->repeat_key, 2);
125 135
126 if (dev->sync) { 136 if (dev->sync) {
127 /* 137 /*
@@ -130,7 +140,7 @@ static void input_repeat_key(unsigned long data)
130 * Otherwise assume that the driver will send 140 * Otherwise assume that the driver will send
131 * SYN_REPORT once it's done. 141 * SYN_REPORT once it's done.
132 */ 142 */
133 input_pass_event(dev, EV_SYN, SYN_REPORT, 1); 143 input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
134 } 144 }
135 145
136 if (dev->rep[REP_PERIOD]) 146 if (dev->rep[REP_PERIOD])
@@ -163,6 +173,7 @@ static void input_stop_autorepeat(struct input_dev *dev)
163#define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) 173#define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
164 174
165static int input_handle_abs_event(struct input_dev *dev, 175static int input_handle_abs_event(struct input_dev *dev,
176 struct input_handler *src_handler,
166 unsigned int code, int *pval) 177 unsigned int code, int *pval)
167{ 178{
168 bool is_mt_event; 179 bool is_mt_event;
@@ -206,13 +217,15 @@ static int input_handle_abs_event(struct input_dev *dev,
206 /* Flush pending "slot" event */ 217 /* Flush pending "slot" event */
207 if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) { 218 if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
208 input_abs_set_val(dev, ABS_MT_SLOT, dev->slot); 219 input_abs_set_val(dev, ABS_MT_SLOT, dev->slot);
209 input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot); 220 input_pass_event(dev, src_handler,
221 EV_ABS, ABS_MT_SLOT, dev->slot);
210 } 222 }
211 223
212 return INPUT_PASS_TO_HANDLERS; 224 return INPUT_PASS_TO_HANDLERS;
213} 225}
214 226
215static void input_handle_event(struct input_dev *dev, 227static void input_handle_event(struct input_dev *dev,
228 struct input_handler *src_handler,
216 unsigned int type, unsigned int code, int value) 229 unsigned int type, unsigned int code, int value)
217{ 230{
218 int disposition = INPUT_IGNORE_EVENT; 231 int disposition = INPUT_IGNORE_EVENT;
@@ -265,7 +278,8 @@ static void input_handle_event(struct input_dev *dev,
265 278
266 case EV_ABS: 279 case EV_ABS:
267 if (is_event_supported(code, dev->absbit, ABS_MAX)) 280 if (is_event_supported(code, dev->absbit, ABS_MAX))
268 disposition = input_handle_abs_event(dev, code, &value); 281 disposition = input_handle_abs_event(dev, src_handler,
282 code, &value);
269 283
270 break; 284 break;
271 285
@@ -323,7 +337,7 @@ static void input_handle_event(struct input_dev *dev,
323 dev->event(dev, type, code, value); 337 dev->event(dev, type, code, value);
324 338
325 if (disposition & INPUT_PASS_TO_HANDLERS) 339 if (disposition & INPUT_PASS_TO_HANDLERS)
326 input_pass_event(dev, type, code, value); 340 input_pass_event(dev, src_handler, type, code, value);
327} 341}
328 342
329/** 343/**
@@ -352,7 +366,7 @@ void input_event(struct input_dev *dev,
352 366
353 spin_lock_irqsave(&dev->event_lock, flags); 367 spin_lock_irqsave(&dev->event_lock, flags);
354 add_input_randomness(type, code, value); 368 add_input_randomness(type, code, value);
355 input_handle_event(dev, type, code, value); 369 input_handle_event(dev, NULL, type, code, value);
356 spin_unlock_irqrestore(&dev->event_lock, flags); 370 spin_unlock_irqrestore(&dev->event_lock, flags);
357 } 371 }
358} 372}
@@ -382,7 +396,8 @@ void input_inject_event(struct input_handle *handle,
382 rcu_read_lock(); 396 rcu_read_lock();
383 grab = rcu_dereference(dev->grab); 397 grab = rcu_dereference(dev->grab);
384 if (!grab || grab == handle) 398 if (!grab || grab == handle)
385 input_handle_event(dev, type, code, value); 399 input_handle_event(dev, handle->handler,
400 type, code, value);
386 rcu_read_unlock(); 401 rcu_read_unlock();
387 402
388 spin_unlock_irqrestore(&dev->event_lock, flags); 403 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -595,10 +610,10 @@ static void input_dev_release_keys(struct input_dev *dev)
595 for (code = 0; code <= KEY_MAX; code++) { 610 for (code = 0; code <= KEY_MAX; code++) {
596 if (is_event_supported(code, dev->keybit, KEY_MAX) && 611 if (is_event_supported(code, dev->keybit, KEY_MAX) &&
597 __test_and_clear_bit(code, dev->key)) { 612 __test_and_clear_bit(code, dev->key)) {
598 input_pass_event(dev, EV_KEY, code, 0); 613 input_pass_event(dev, NULL, EV_KEY, code, 0);
599 } 614 }
600 } 615 }
601 input_pass_event(dev, EV_SYN, SYN_REPORT, 1); 616 input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
602 } 617 }
603} 618}
604 619
@@ -873,9 +888,9 @@ int input_set_keycode(struct input_dev *dev,
873 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && 888 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
874 __test_and_clear_bit(old_keycode, dev->key)) { 889 __test_and_clear_bit(old_keycode, dev->key)) {
875 890
876 input_pass_event(dev, EV_KEY, old_keycode, 0); 891 input_pass_event(dev, NULL, EV_KEY, old_keycode, 0);
877 if (dev->sync) 892 if (dev->sync)
878 input_pass_event(dev, EV_SYN, SYN_REPORT, 1); 893 input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
879 } 894 }
880 895
881 out: 896 out:
@@ -1565,8 +1580,7 @@ static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env)
1565 } \ 1580 } \
1566 } while (0) 1581 } while (0)
1567 1582
1568#ifdef CONFIG_PM 1583static void input_dev_toggle(struct input_dev *dev, bool activate)
1569static void input_dev_reset(struct input_dev *dev, bool activate)
1570{ 1584{
1571 if (!dev->event) 1585 if (!dev->event)
1572 return; 1586 return;
@@ -1580,12 +1594,44 @@ static void input_dev_reset(struct input_dev *dev, bool activate)
1580 } 1594 }
1581} 1595}
1582 1596
1597/**
1598 * input_reset_device() - reset/restore the state of input device
1599 * @dev: input device whose state needs to be reset
1600 *
1601 * This function tries to reset the state of an opened input device and
1602 * bring internal state and state if the hardware in sync with each other.
1603 * We mark all keys as released, restore LED state, repeat rate, etc.
1604 */
1605void input_reset_device(struct input_dev *dev)
1606{
1607 mutex_lock(&dev->mutex);
1608
1609 if (dev->users) {
1610 input_dev_toggle(dev, true);
1611
1612 /*
1613 * Keys that have been pressed at suspend time are unlikely
1614 * to be still pressed when we resume.
1615 */
1616 spin_lock_irq(&dev->event_lock);
1617 input_dev_release_keys(dev);
1618 spin_unlock_irq(&dev->event_lock);
1619 }
1620
1621 mutex_unlock(&dev->mutex);
1622}
1623EXPORT_SYMBOL(input_reset_device);
1624
1625#ifdef CONFIG_PM
1583static int input_dev_suspend(struct device *dev) 1626static int input_dev_suspend(struct device *dev)
1584{ 1627{
1585 struct input_dev *input_dev = to_input_dev(dev); 1628 struct input_dev *input_dev = to_input_dev(dev);
1586 1629
1587 mutex_lock(&input_dev->mutex); 1630 mutex_lock(&input_dev->mutex);
1588 input_dev_reset(input_dev, false); 1631
1632 if (input_dev->users)
1633 input_dev_toggle(input_dev, false);
1634
1589 mutex_unlock(&input_dev->mutex); 1635 mutex_unlock(&input_dev->mutex);
1590 1636
1591 return 0; 1637 return 0;
@@ -1595,18 +1641,7 @@ static int input_dev_resume(struct device *dev)
1595{ 1641{
1596 struct input_dev *input_dev = to_input_dev(dev); 1642 struct input_dev *input_dev = to_input_dev(dev);
1597 1643
1598 mutex_lock(&input_dev->mutex); 1644 input_reset_device(input_dev);
1599 input_dev_reset(input_dev, true);
1600
1601 /*
1602 * Keys that have been pressed at suspend time are unlikely
1603 * to be still pressed when we resume.
1604 */
1605 spin_lock_irq(&input_dev->event_lock);
1606 input_dev_release_keys(input_dev);
1607 spin_unlock_irq(&input_dev->event_lock);
1608
1609 mutex_unlock(&input_dev->mutex);
1610 1645
1611 return 0; 1646 return 0;
1612} 1647}
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index b92d1cd5cba1..af45d275f686 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -4,7 +4,7 @@
4 * I2C QWERTY Keypad and IO Expander 4 * I2C QWERTY Keypad and IO Expander
5 * Bugs: Enter bugs at http://blackfin.uclinux.org/ 5 * Bugs: Enter bugs at http://blackfin.uclinux.org/
6 * 6 *
7 * Copyright (C) 2008-2009 Analog Devices Inc. 7 * Copyright (C) 2008-2010 Analog Devices Inc.
8 * Licensed under the GPL-2 or later. 8 * Licensed under the GPL-2 or later.
9 */ 9 */
10 10
@@ -24,29 +24,6 @@
24 24
25#include <linux/i2c/adp5588.h> 25#include <linux/i2c/adp5588.h>
26 26
27 /* Configuration Register1 */
28#define AUTO_INC (1 << 7)
29#define GPIEM_CFG (1 << 6)
30#define OVR_FLOW_M (1 << 5)
31#define INT_CFG (1 << 4)
32#define OVR_FLOW_IEN (1 << 3)
33#define K_LCK_IM (1 << 2)
34#define GPI_IEN (1 << 1)
35#define KE_IEN (1 << 0)
36
37/* Interrupt Status Register */
38#define CMP2_INT (1 << 5)
39#define CMP1_INT (1 << 4)
40#define OVR_FLOW_INT (1 << 3)
41#define K_LCK_INT (1 << 2)
42#define GPI_INT (1 << 1)
43#define KE_INT (1 << 0)
44
45/* Key Lock and Event Counter Register */
46#define K_LCK_EN (1 << 6)
47#define LCK21 0x30
48#define KEC 0xF
49
50/* Key Event Register xy */ 27/* Key Event Register xy */
51#define KEY_EV_PRESSED (1 << 7) 28#define KEY_EV_PRESSED (1 << 7)
52#define KEY_EV_MASK (0x7F) 29#define KEY_EV_MASK (0x7F)
@@ -55,10 +32,6 @@
55 32
56#define KEYP_MAX_EVENT 10 33#define KEYP_MAX_EVENT 10
57 34
58#define MAXGPIO 18
59#define ADP_BANK(offs) ((offs) >> 3)
60#define ADP_BIT(offs) (1u << ((offs) & 0x7))
61
62/* 35/*
63 * Early pre 4.0 Silicon required to delay readout by at least 25ms, 36 * Early pre 4.0 Silicon required to delay readout by at least 25ms,
64 * since the Event Counter Register updated 25ms after the interrupt 37 * since the Event Counter Register updated 25ms after the interrupt
@@ -75,7 +48,7 @@ struct adp5588_kpad {
75 const struct adp5588_gpi_map *gpimap; 48 const struct adp5588_gpi_map *gpimap;
76 unsigned short gpimapsize; 49 unsigned short gpimapsize;
77#ifdef CONFIG_GPIOLIB 50#ifdef CONFIG_GPIOLIB
78 unsigned char gpiomap[MAXGPIO]; 51 unsigned char gpiomap[ADP5588_MAXGPIO];
79 bool export_gpio; 52 bool export_gpio;
80 struct gpio_chip gc; 53 struct gpio_chip gc;
81 struct mutex gpio_lock; /* Protect cached dir, dat_out */ 54 struct mutex gpio_lock; /* Protect cached dir, dat_out */
@@ -103,8 +76,8 @@ static int adp5588_write(struct i2c_client *client, u8 reg, u8 val)
103static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off) 76static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
104{ 77{
105 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); 78 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
106 unsigned int bank = ADP_BANK(kpad->gpiomap[off]); 79 unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
107 unsigned int bit = ADP_BIT(kpad->gpiomap[off]); 80 unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
108 81
109 return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit); 82 return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit);
110} 83}
@@ -113,8 +86,8 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip,
113 unsigned off, int val) 86 unsigned off, int val)
114{ 87{
115 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); 88 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
116 unsigned int bank = ADP_BANK(kpad->gpiomap[off]); 89 unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
117 unsigned int bit = ADP_BIT(kpad->gpiomap[off]); 90 unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
118 91
119 mutex_lock(&kpad->gpio_lock); 92 mutex_lock(&kpad->gpio_lock);
120 93
@@ -132,8 +105,8 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip,
132static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off) 105static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off)
133{ 106{
134 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); 107 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
135 unsigned int bank = ADP_BANK(kpad->gpiomap[off]); 108 unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
136 unsigned int bit = ADP_BIT(kpad->gpiomap[off]); 109 unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
137 int ret; 110 int ret;
138 111
139 mutex_lock(&kpad->gpio_lock); 112 mutex_lock(&kpad->gpio_lock);
@@ -150,8 +123,8 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
150 unsigned off, int val) 123 unsigned off, int val)
151{ 124{
152 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); 125 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
153 unsigned int bank = ADP_BANK(kpad->gpiomap[off]); 126 unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
154 unsigned int bit = ADP_BIT(kpad->gpiomap[off]); 127 unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
155 int ret; 128 int ret;
156 129
157 mutex_lock(&kpad->gpio_lock); 130 mutex_lock(&kpad->gpio_lock);
@@ -176,7 +149,7 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip,
176static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad, 149static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad,
177 const struct adp5588_kpad_platform_data *pdata) 150 const struct adp5588_kpad_platform_data *pdata)
178{ 151{
179 bool pin_used[MAXGPIO]; 152 bool pin_used[ADP5588_MAXGPIO];
180 int n_unused = 0; 153 int n_unused = 0;
181 int i; 154 int i;
182 155
@@ -191,7 +164,7 @@ static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad,
191 for (i = 0; i < kpad->gpimapsize; i++) 164 for (i = 0; i < kpad->gpimapsize; i++)
192 pin_used[kpad->gpimap[i].pin - GPI_PIN_BASE] = true; 165 pin_used[kpad->gpimap[i].pin - GPI_PIN_BASE] = true;
193 166
194 for (i = 0; i < MAXGPIO; i++) 167 for (i = 0; i < ADP5588_MAXGPIO; i++)
195 if (!pin_used[i]) 168 if (!pin_used[i])
196 kpad->gpiomap[n_unused++] = i; 169 kpad->gpiomap[n_unused++] = i;
197 170
@@ -234,7 +207,7 @@ static int __devinit adp5588_gpio_add(struct adp5588_kpad *kpad)
234 return error; 207 return error;
235 } 208 }
236 209
237 for (i = 0; i <= ADP_BANK(MAXGPIO); i++) { 210 for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
238 kpad->dat_out[i] = adp5588_read(kpad->client, 211 kpad->dat_out[i] = adp5588_read(kpad->client,
239 GPIO_DAT_OUT1 + i); 212 GPIO_DAT_OUT1 + i);
240 kpad->dir[i] = adp5588_read(kpad->client, GPIO_DIR1 + i); 213 kpad->dir[i] = adp5588_read(kpad->client, GPIO_DIR1 + i);
@@ -318,11 +291,11 @@ static void adp5588_work(struct work_struct *work)
318 291
319 status = adp5588_read(client, INT_STAT); 292 status = adp5588_read(client, INT_STAT);
320 293
321 if (status & OVR_FLOW_INT) /* Unlikely and should never happen */ 294 if (status & ADP5588_OVR_FLOW_INT) /* Unlikely and should never happen */
322 dev_err(&client->dev, "Event Overflow Error\n"); 295 dev_err(&client->dev, "Event Overflow Error\n");
323 296
324 if (status & KE_INT) { 297 if (status & ADP5588_KE_INT) {
325 ev_cnt = adp5588_read(client, KEY_LCK_EC_STAT) & KEC; 298 ev_cnt = adp5588_read(client, KEY_LCK_EC_STAT) & ADP5588_KEC;
326 if (ev_cnt) { 299 if (ev_cnt) {
327 adp5588_report_events(kpad, ev_cnt); 300 adp5588_report_events(kpad, ev_cnt);
328 input_sync(kpad->input); 301 input_sync(kpad->input);
@@ -360,7 +333,7 @@ static int __devinit adp5588_setup(struct i2c_client *client)
360 if (pdata->en_keylock) { 333 if (pdata->en_keylock) {
361 ret |= adp5588_write(client, UNLOCK1, pdata->unlock_key1); 334 ret |= adp5588_write(client, UNLOCK1, pdata->unlock_key1);
362 ret |= adp5588_write(client, UNLOCK2, pdata->unlock_key2); 335 ret |= adp5588_write(client, UNLOCK2, pdata->unlock_key2);
363 ret |= adp5588_write(client, KEY_LCK_EC_STAT, K_LCK_EN); 336 ret |= adp5588_write(client, KEY_LCK_EC_STAT, ADP5588_K_LCK_EN);
364 } 337 }
365 338
366 for (i = 0; i < KEYP_MAX_EVENT; i++) 339 for (i = 0; i < KEYP_MAX_EVENT; i++)
@@ -384,7 +357,7 @@ static int __devinit adp5588_setup(struct i2c_client *client)
384 } 357 }
385 358
386 if (gpio_data) { 359 if (gpio_data) {
387 for (i = 0; i <= ADP_BANK(MAXGPIO); i++) { 360 for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
388 int pull_mask = gpio_data->pullup_dis_mask; 361 int pull_mask = gpio_data->pullup_dis_mask;
389 362
390 ret |= adp5588_write(client, GPIO_PULL1 + i, 363 ret |= adp5588_write(client, GPIO_PULL1 + i,
@@ -392,11 +365,14 @@ static int __devinit adp5588_setup(struct i2c_client *client)
392 } 365 }
393 } 366 }
394 367
395 ret |= adp5588_write(client, INT_STAT, CMP2_INT | CMP1_INT | 368 ret |= adp5588_write(client, INT_STAT,
396 OVR_FLOW_INT | K_LCK_INT | 369 ADP5588_CMP2_INT | ADP5588_CMP1_INT |
397 GPI_INT | KE_INT); /* Status is W1C */ 370 ADP5588_OVR_FLOW_INT | ADP5588_K_LCK_INT |
371 ADP5588_GPI_INT | ADP5588_KE_INT); /* Status is W1C */
398 372
399 ret |= adp5588_write(client, CFG, INT_CFG | OVR_FLOW_IEN | KE_IEN); 373 ret |= adp5588_write(client, CFG, ADP5588_INT_CFG |
374 ADP5588_OVR_FLOW_IEN |
375 ADP5588_KE_IEN);
400 376
401 if (ret < 0) { 377 if (ret < 0) {
402 dev_err(&client->dev, "Write Error\n"); 378 dev_err(&client->dev, "Write Error\n");
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index d358ef8623f4..11478eb2c27d 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -63,6 +63,10 @@ static bool atkbd_extra;
63module_param_named(extra, atkbd_extra, bool, 0); 63module_param_named(extra, atkbd_extra, bool, 0);
64MODULE_PARM_DESC(extra, "Enable extra LEDs and keys on IBM RapidAcces, EzKey and similar keyboards"); 64MODULE_PARM_DESC(extra, "Enable extra LEDs and keys on IBM RapidAcces, EzKey and similar keyboards");
65 65
66static bool atkbd_terminal;
67module_param_named(terminal, atkbd_terminal, bool, 0);
68MODULE_PARM_DESC(terminal, "Enable break codes on an IBM Terminal keyboard connected via AT/PS2");
69
66/* 70/*
67 * Scancode to keycode tables. These are just the default setting, and 71 * Scancode to keycode tables. These are just the default setting, and
68 * are loadable via a userland utility. 72 * are loadable via a userland utility.
@@ -136,7 +140,8 @@ static const unsigned short atkbd_unxlate_table[128] = {
136#define ATKBD_CMD_ENABLE 0x00f4 140#define ATKBD_CMD_ENABLE 0x00f4
137#define ATKBD_CMD_RESET_DIS 0x00f5 /* Reset to defaults and disable */ 141#define ATKBD_CMD_RESET_DIS 0x00f5 /* Reset to defaults and disable */
138#define ATKBD_CMD_RESET_DEF 0x00f6 /* Reset to defaults */ 142#define ATKBD_CMD_RESET_DEF 0x00f6 /* Reset to defaults */
139#define ATKBD_CMD_SETALL_MBR 0x00fa 143#define ATKBD_CMD_SETALL_MB 0x00f8 /* Set all keys to give break codes */
144#define ATKBD_CMD_SETALL_MBR 0x00fa /* ... and repeat */
140#define ATKBD_CMD_RESET_BAT 0x02ff 145#define ATKBD_CMD_RESET_BAT 0x02ff
141#define ATKBD_CMD_RESEND 0x00fe 146#define ATKBD_CMD_RESEND 0x00fe
142#define ATKBD_CMD_EX_ENABLE 0x10ea 147#define ATKBD_CMD_EX_ENABLE 0x10ea
@@ -764,6 +769,11 @@ static int atkbd_select_set(struct atkbd *atkbd, int target_set, int allow_extra
764 } 769 }
765 } 770 }
766 771
772 if (atkbd_terminal) {
773 ps2_command(ps2dev, param, ATKBD_CMD_SETALL_MB);
774 return 3;
775 }
776
767 if (target_set != 3) 777 if (target_set != 3)
768 return 2; 778 return 2;
769 779
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index 4b42ffc0532a..d1583aea1721 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -127,14 +127,6 @@ static int __devinit pcf8574_kp_probe(struct i2c_client *client, const struct i2
127 idev->id.product = 0x0001; 127 idev->id.product = 0x0001;
128 idev->id.version = 0x0100; 128 idev->id.version = 0x0100;
129 129
130 input_set_drvdata(idev, lp);
131
132 ret = input_register_device(idev);
133 if (ret) {
134 dev_err(&client->dev, "input_register_device() failed\n");
135 goto fail_register;
136 }
137
138 lp->laststate = read_state(lp); 130 lp->laststate = read_state(lp);
139 131
140 ret = request_threaded_irq(client->irq, NULL, pcf8574_kp_irq_handler, 132 ret = request_threaded_irq(client->irq, NULL, pcf8574_kp_irq_handler,
@@ -142,16 +134,21 @@ static int __devinit pcf8574_kp_probe(struct i2c_client *client, const struct i2
142 DRV_NAME, lp); 134 DRV_NAME, lp);
143 if (ret) { 135 if (ret) {
144 dev_err(&client->dev, "IRQ %d is not free\n", client->irq); 136 dev_err(&client->dev, "IRQ %d is not free\n", client->irq);
145 goto fail_irq; 137 goto fail_free_device;
138 }
139
140 ret = input_register_device(idev);
141 if (ret) {
142 dev_err(&client->dev, "input_register_device() failed\n");
143 goto fail_free_irq;
146 } 144 }
147 145
148 i2c_set_clientdata(client, lp); 146 i2c_set_clientdata(client, lp);
149 return 0; 147 return 0;
150 148
151 fail_irq: 149 fail_free_irq:
152 input_unregister_device(idev); 150 free_irq(client->irq, lp);
153 fail_register: 151 fail_free_device:
154 input_set_drvdata(idev, NULL);
155 input_free_device(idev); 152 input_free_device(idev);
156 fail_allocate: 153 fail_allocate:
157 kfree(lp); 154 kfree(lp);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index ed7ad7416b24..a5475b577086 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -351,6 +351,17 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
351 }, 351 },
352 }, 352 },
353 { 353 {
354 /*
355 * Most (all?) VAIOs do not have external PS/2 ports nor
356 * they implement active multiplexing properly, and
357 * MUX discovery usually messes up keyboard/touchpad.
358 */
359 .matches = {
360 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
361 DMI_MATCH(DMI_BOARD_NAME, "VAIO"),
362 },
363 },
364 {
354 /* Amoi M636/A737 */ 365 /* Amoi M636/A737 */
355 .matches = { 366 .matches = {
356 DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."), 367 DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index aea9a9399a36..d94f7e9aa997 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -229,12 +229,13 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
229 229
230 err = input_register_device(acecad->input); 230 err = input_register_device(acecad->input);
231 if (err) 231 if (err)
232 goto fail2; 232 goto fail3;
233 233
234 usb_set_intfdata(intf, acecad); 234 usb_set_intfdata(intf, acecad);
235 235
236 return 0; 236 return 0;
237 237
238 fail3: usb_free_urb(acecad->irq);
238 fail2: usb_free_coherent(dev, 8, acecad->data, acecad->data_dma); 239 fail2: usb_free_coherent(dev, 8, acecad->data, acecad->data_dma);
239 fail1: input_free_device(input_dev); 240 fail1: input_free_device(input_dev);
240 kfree(acecad); 241 kfree(acecad);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index cc2a88d5192f..77b8fd20cd90 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -10,7 +10,7 @@ menuconfig NEW_LEDS
10if NEW_LEDS 10if NEW_LEDS
11 11
12config LEDS_CLASS 12config LEDS_CLASS
13 tristate "LED Class Support" 13 bool "LED Class Support"
14 help 14 help
15 This option enables the led sysfs class in /sys/class/leds. You'll 15 This option enables the led sysfs class in /sys/class/leds. You'll
16 need this to do anything useful with LEDs. If unsure, say N. 16 need this to do anything useful with LEDs. If unsure, say N.
@@ -176,6 +176,24 @@ config LEDS_LP3944
176 To compile this driver as a module, choose M here: the 176 To compile this driver as a module, choose M here: the
177 module will be called leds-lp3944. 177 module will be called leds-lp3944.
178 178
179config LEDS_LP5521
180 tristate "LED Support for N.S. LP5521 LED driver chip"
181 depends on LEDS_CLASS && I2C
182 help
183 If you say yes here you get support for the National Semiconductor
184 LP5521 LED driver. It is 3 channel chip with programmable engines.
185 Driver provides direct control via LED class and interface for
186 programming the engines.
187
188config LEDS_LP5523
189 tristate "LED Support for N.S. LP5523 LED driver chip"
190 depends on LEDS_CLASS && I2C
191 help
192 If you say yes here you get support for the National Semiconductor
193 LP5523 LED driver. It is 9 channel chip with programmable engines.
194 Driver provides direct control via LED class and interface for
195 programming the engines.
196
179config LEDS_CLEVO_MAIL 197config LEDS_CLEVO_MAIL
180 tristate "Mail LED on Clevo notebook" 198 tristate "Mail LED on Clevo notebook"
181 depends on X86 && SERIO_I8042 && DMI 199 depends on X86 && SERIO_I8042 && DMI
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 9c96db40ef6d..aae6989ff6b6 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -23,6 +23,8 @@ obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
23obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o 23obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
24obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o 24obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
25obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o 25obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
26obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o
27obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
26obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o 28obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
27obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o 29obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
28obj-$(CONFIG_LEDS_FSG) += leds-fsg.o 30obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 260660076507..211e21f34bd5 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -81,6 +81,79 @@ static struct device_attribute led_class_attrs[] = {
81 __ATTR_NULL, 81 __ATTR_NULL,
82}; 82};
83 83
84static void led_timer_function(unsigned long data)
85{
86 struct led_classdev *led_cdev = (void *)data;
87 unsigned long brightness;
88 unsigned long delay;
89
90 if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
91 led_set_brightness(led_cdev, LED_OFF);
92 return;
93 }
94
95 brightness = led_get_brightness(led_cdev);
96 if (!brightness) {
97 /* Time to switch the LED on. */
98 brightness = led_cdev->blink_brightness;
99 delay = led_cdev->blink_delay_on;
100 } else {
101 /* Store the current brightness value to be able
102 * to restore it when the delay_off period is over.
103 */
104 led_cdev->blink_brightness = brightness;
105 brightness = LED_OFF;
106 delay = led_cdev->blink_delay_off;
107 }
108
109 led_set_brightness(led_cdev, brightness);
110
111 mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay));
112}
113
114static void led_stop_software_blink(struct led_classdev *led_cdev)
115{
116 /* deactivate previous settings */
117 del_timer_sync(&led_cdev->blink_timer);
118 led_cdev->blink_delay_on = 0;
119 led_cdev->blink_delay_off = 0;
120}
121
122static void led_set_software_blink(struct led_classdev *led_cdev,
123 unsigned long delay_on,
124 unsigned long delay_off)
125{
126 int current_brightness;
127
128 current_brightness = led_get_brightness(led_cdev);
129 if (current_brightness)
130 led_cdev->blink_brightness = current_brightness;
131 if (!led_cdev->blink_brightness)
132 led_cdev->blink_brightness = led_cdev->max_brightness;
133
134 if (delay_on == led_cdev->blink_delay_on &&
135 delay_off == led_cdev->blink_delay_off)
136 return;
137
138 led_stop_software_blink(led_cdev);
139
140 led_cdev->blink_delay_on = delay_on;
141 led_cdev->blink_delay_off = delay_off;
142
143 /* never on - don't blink */
144 if (!delay_on)
145 return;
146
147 /* never off - just set to brightness */
148 if (!delay_off) {
149 led_set_brightness(led_cdev, led_cdev->blink_brightness);
150 return;
151 }
152
153 mod_timer(&led_cdev->blink_timer, jiffies + 1);
154}
155
156
84/** 157/**
85 * led_classdev_suspend - suspend an led_classdev. 158 * led_classdev_suspend - suspend an led_classdev.
86 * @led_cdev: the led_classdev to suspend. 159 * @led_cdev: the led_classdev to suspend.
@@ -148,6 +221,10 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
148 221
149 led_update_brightness(led_cdev); 222 led_update_brightness(led_cdev);
150 223
224 init_timer(&led_cdev->blink_timer);
225 led_cdev->blink_timer.function = led_timer_function;
226 led_cdev->blink_timer.data = (unsigned long)led_cdev;
227
151#ifdef CONFIG_LEDS_TRIGGERS 228#ifdef CONFIG_LEDS_TRIGGERS
152 led_trigger_set_default(led_cdev); 229 led_trigger_set_default(led_cdev);
153#endif 230#endif
@@ -157,7 +234,6 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
157 234
158 return 0; 235 return 0;
159} 236}
160
161EXPORT_SYMBOL_GPL(led_classdev_register); 237EXPORT_SYMBOL_GPL(led_classdev_register);
162 238
163/** 239/**
@@ -175,6 +251,9 @@ void led_classdev_unregister(struct led_classdev *led_cdev)
175 up_write(&led_cdev->trigger_lock); 251 up_write(&led_cdev->trigger_lock);
176#endif 252#endif
177 253
254 /* Stop blinking */
255 led_brightness_set(led_cdev, LED_OFF);
256
178 device_unregister(led_cdev->dev); 257 device_unregister(led_cdev->dev);
179 258
180 down_write(&leds_list_lock); 259 down_write(&leds_list_lock);
@@ -183,6 +262,30 @@ void led_classdev_unregister(struct led_classdev *led_cdev)
183} 262}
184EXPORT_SYMBOL_GPL(led_classdev_unregister); 263EXPORT_SYMBOL_GPL(led_classdev_unregister);
185 264
265void led_blink_set(struct led_classdev *led_cdev,
266 unsigned long *delay_on,
267 unsigned long *delay_off)
268{
269 if (led_cdev->blink_set &&
270 led_cdev->blink_set(led_cdev, delay_on, delay_off))
271 return;
272
273 /* blink with 1 Hz as default if nothing specified */
274 if (!*delay_on && !*delay_off)
275 *delay_on = *delay_off = 500;
276
277 led_set_software_blink(led_cdev, *delay_on, *delay_off);
278}
279EXPORT_SYMBOL(led_blink_set);
280
281void led_brightness_set(struct led_classdev *led_cdev,
282 enum led_brightness brightness)
283{
284 led_stop_software_blink(led_cdev);
285 led_cdev->brightness_set(led_cdev, brightness);
286}
287EXPORT_SYMBOL(led_brightness_set);
288
186static int __init leds_init(void) 289static int __init leds_init(void)
187{ 290{
188 leds_class = class_create(THIS_MODULE, "leds"); 291 leds_class = class_create(THIS_MODULE, "leds");
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index f1c00db88b5e..c41eb6180c9c 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -113,7 +113,7 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger)
113 if (led_cdev->trigger->deactivate) 113 if (led_cdev->trigger->deactivate)
114 led_cdev->trigger->deactivate(led_cdev); 114 led_cdev->trigger->deactivate(led_cdev);
115 led_cdev->trigger = NULL; 115 led_cdev->trigger = NULL;
116 led_set_brightness(led_cdev, LED_OFF); 116 led_brightness_set(led_cdev, LED_OFF);
117 } 117 }
118 if (trigger) { 118 if (trigger) {
119 write_lock_irqsave(&trigger->leddev_list_lock, flags); 119 write_lock_irqsave(&trigger->leddev_list_lock, flags);
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index ea57e05d08f3..4d9fa38d9ff6 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -316,7 +316,7 @@ static struct of_platform_driver of_gpio_leds_driver = {
316 316
317static int __init gpio_led_init(void) 317static int __init gpio_led_init(void)
318{ 318{
319 int ret; 319 int ret = 0;
320 320
321#ifdef CONFIG_LEDS_GPIO_PLATFORM 321#ifdef CONFIG_LEDS_GPIO_PLATFORM
322 ret = platform_driver_register(&gpio_led_driver); 322 ret = platform_driver_register(&gpio_led_driver);
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
new file mode 100644
index 000000000000..3782f31f06d2
--- /dev/null
+++ b/drivers/leds/leds-lp5521.c
@@ -0,0 +1,821 @@
1/*
2 * LP5521 LED chip driver.
3 *
4 * Copyright (C) 2010 Nokia Corporation
5 *
6 * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/i2c.h>
26#include <linux/mutex.h>
27#include <linux/gpio.h>
28#include <linux/interrupt.h>
29#include <linux/delay.h>
30#include <linux/ctype.h>
31#include <linux/spinlock.h>
32#include <linux/wait.h>
33#include <linux/leds.h>
34#include <linux/leds-lp5521.h>
35#include <linux/workqueue.h>
36#include <linux/slab.h>
37
38#define LP5521_PROGRAM_LENGTH 32 /* in bytes */
39
40#define LP5521_MAX_LEDS 3 /* Maximum number of LEDs */
41#define LP5521_MAX_ENGINES 3 /* Maximum number of engines */
42
43#define LP5521_ENG_MASK_BASE 0x30 /* 00110000 */
44#define LP5521_ENG_STATUS_MASK 0x07 /* 00000111 */
45
46#define LP5521_CMD_LOAD 0x15 /* 00010101 */
47#define LP5521_CMD_RUN 0x2a /* 00101010 */
48#define LP5521_CMD_DIRECT 0x3f /* 00111111 */
49#define LP5521_CMD_DISABLED 0x00 /* 00000000 */
50
51/* Registers */
52#define LP5521_REG_ENABLE 0x00
53#define LP5521_REG_OP_MODE 0x01
54#define LP5521_REG_R_PWM 0x02
55#define LP5521_REG_G_PWM 0x03
56#define LP5521_REG_B_PWM 0x04
57#define LP5521_REG_R_CURRENT 0x05
58#define LP5521_REG_G_CURRENT 0x06
59#define LP5521_REG_B_CURRENT 0x07
60#define LP5521_REG_CONFIG 0x08
61#define LP5521_REG_R_CHANNEL_PC 0x09
62#define LP5521_REG_G_CHANNEL_PC 0x0A
63#define LP5521_REG_B_CHANNEL_PC 0x0B
64#define LP5521_REG_STATUS 0x0C
65#define LP5521_REG_RESET 0x0D
66#define LP5521_REG_GPO 0x0E
67#define LP5521_REG_R_PROG_MEM 0x10
68#define LP5521_REG_G_PROG_MEM 0x30
69#define LP5521_REG_B_PROG_MEM 0x50
70
71#define LP5521_PROG_MEM_BASE LP5521_REG_R_PROG_MEM
72#define LP5521_PROG_MEM_SIZE 0x20
73
74/* Base register to set LED current */
75#define LP5521_REG_LED_CURRENT_BASE LP5521_REG_R_CURRENT
76
77/* Base register to set the brightness */
78#define LP5521_REG_LED_PWM_BASE LP5521_REG_R_PWM
79
80/* Bits in ENABLE register */
81#define LP5521_MASTER_ENABLE 0x40 /* Chip master enable */
82#define LP5521_LOGARITHMIC_PWM 0x80 /* Logarithmic PWM adjustment */
83#define LP5521_EXEC_RUN 0x2A
84
85/* Bits in CONFIG register */
86#define LP5521_PWM_HF 0x40 /* PWM: 0 = 256Hz, 1 = 558Hz */
87#define LP5521_PWRSAVE_EN 0x20 /* 1 = Power save mode */
88#define LP5521_CP_MODE_OFF 0 /* Charge pump (CP) off */
89#define LP5521_CP_MODE_BYPASS 8 /* CP forced to bypass mode */
90#define LP5521_CP_MODE_1X5 0x10 /* CP forced to 1.5x mode */
91#define LP5521_CP_MODE_AUTO 0x18 /* Automatic mode selection */
92#define LP5521_R_TO_BATT 4 /* R out: 0 = CP, 1 = Vbat */
93#define LP5521_CLK_SRC_EXT 0 /* Ext-clk source (CLK_32K) */
94#define LP5521_CLK_INT 1 /* Internal clock */
95#define LP5521_CLK_AUTO 2 /* Automatic clock selection */
96
97/* Status */
98#define LP5521_EXT_CLK_USED 0x08
99
100struct lp5521_engine {
101 const struct attribute_group *attributes;
102 int id;
103 u8 mode;
104 u8 prog_page;
105 u8 engine_mask;
106};
107
108struct lp5521_led {
109 int id;
110 u8 chan_nr;
111 u8 led_current;
112 u8 max_current;
113 struct led_classdev cdev;
114 struct work_struct brightness_work;
115 u8 brightness;
116};
117
118struct lp5521_chip {
119 struct lp5521_platform_data *pdata;
120 struct mutex lock; /* Serialize control */
121 struct i2c_client *client;
122 struct lp5521_engine engines[LP5521_MAX_ENGINES];
123 struct lp5521_led leds[LP5521_MAX_LEDS];
124 u8 num_channels;
125 u8 num_leds;
126};
127
128#define cdev_to_led(c) container_of(c, struct lp5521_led, cdev)
129#define engine_to_lp5521(eng) container_of((eng), struct lp5521_chip, \
130 engines[(eng)->id - 1])
131#define led_to_lp5521(led) container_of((led), struct lp5521_chip, \
132 leds[(led)->id])
133
134static void lp5521_led_brightness_work(struct work_struct *work);
135
136static inline int lp5521_write(struct i2c_client *client, u8 reg, u8 value)
137{
138 return i2c_smbus_write_byte_data(client, reg, value);
139}
140
141static int lp5521_read(struct i2c_client *client, u8 reg, u8 *buf)
142{
143 s32 ret;
144
145 ret = i2c_smbus_read_byte_data(client, reg);
146 if (ret < 0)
147 return -EIO;
148
149 *buf = ret;
150 return 0;
151}
152
153static int lp5521_set_engine_mode(struct lp5521_engine *engine, u8 mode)
154{
155 struct lp5521_chip *chip = engine_to_lp5521(engine);
156 struct i2c_client *client = chip->client;
157 int ret;
158 u8 engine_state;
159
160 /* Only transition between RUN and DIRECT mode are handled here */
161 if (mode == LP5521_CMD_LOAD)
162 return 0;
163
164 if (mode == LP5521_CMD_DISABLED)
165 mode = LP5521_CMD_DIRECT;
166
167 ret = lp5521_read(client, LP5521_REG_OP_MODE, &engine_state);
168
169 /* set mode only for this engine */
170 engine_state &= ~(engine->engine_mask);
171 mode &= engine->engine_mask;
172 engine_state |= mode;
173 ret |= lp5521_write(client, LP5521_REG_OP_MODE, engine_state);
174
175 return ret;
176}
177
178static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
179{
180 struct lp5521_chip *chip = engine_to_lp5521(eng);
181 struct i2c_client *client = chip->client;
182 int ret;
183 int addr;
184 u8 mode;
185
186 /* move current engine to direct mode and remember the state */
187 ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT);
188 usleep_range(1000, 10000);
189 ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode);
190
191 /* For loading, all the engines to load mode */
192 lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
193 usleep_range(1000, 10000);
194 lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_LOAD);
195 usleep_range(1000, 10000);
196
197 addr = LP5521_PROG_MEM_BASE + eng->prog_page * LP5521_PROG_MEM_SIZE;
198 i2c_smbus_write_i2c_block_data(client,
199 addr,
200 LP5521_PROG_MEM_SIZE,
201 pattern);
202
203 ret |= lp5521_write(client, LP5521_REG_OP_MODE, mode);
204 return ret;
205}
206
207static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr)
208{
209 return lp5521_write(chip->client,
210 LP5521_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr,
211 curr);
212}
213
214static void lp5521_init_engine(struct lp5521_chip *chip,
215 const struct attribute_group *attr_group)
216{
217 int i;
218 for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
219 chip->engines[i].id = i + 1;
220 chip->engines[i].engine_mask = LP5521_ENG_MASK_BASE >> (i * 2);
221 chip->engines[i].prog_page = i;
222 chip->engines[i].attributes = &attr_group[i];
223 }
224}
225
226static int lp5521_configure(struct i2c_client *client,
227 const struct attribute_group *attr_group)
228{
229 struct lp5521_chip *chip = i2c_get_clientdata(client);
230 int ret;
231
232 lp5521_init_engine(chip, attr_group);
233
234 lp5521_write(client, LP5521_REG_RESET, 0xff);
235
236 usleep_range(10000, 20000);
237
238 /* Set all PWMs to direct control mode */
239 ret = lp5521_write(client, LP5521_REG_OP_MODE, 0x3F);
240
241 /* Enable auto-powersave, set charge pump to auto, red to battery */
242 ret |= lp5521_write(client, LP5521_REG_CONFIG,
243 LP5521_PWRSAVE_EN | LP5521_CP_MODE_AUTO | LP5521_R_TO_BATT);
244
245 /* Initialize all channels PWM to zero -> leds off */
246 ret |= lp5521_write(client, LP5521_REG_R_PWM, 0);
247 ret |= lp5521_write(client, LP5521_REG_G_PWM, 0);
248 ret |= lp5521_write(client, LP5521_REG_B_PWM, 0);
249
250 /* Set engines are set to run state when OP_MODE enables engines */
251 ret |= lp5521_write(client, LP5521_REG_ENABLE,
252 LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM |
253 LP5521_EXEC_RUN);
254 /* enable takes 500us */
255 usleep_range(500, 20000);
256
257 return ret;
258}
259
260static int lp5521_run_selftest(struct lp5521_chip *chip, char *buf)
261{
262 int ret;
263 u8 status;
264
265 ret = lp5521_read(chip->client, LP5521_REG_STATUS, &status);
266 if (ret < 0)
267 return ret;
268
269 /* Check that ext clock is really in use if requested */
270 if (chip->pdata && chip->pdata->clock_mode == LP5521_CLOCK_EXT)
271 if ((status & LP5521_EXT_CLK_USED) == 0)
272 return -EIO;
273 return 0;
274}
275
276static void lp5521_set_brightness(struct led_classdev *cdev,
277 enum led_brightness brightness)
278{
279 struct lp5521_led *led = cdev_to_led(cdev);
280 led->brightness = (u8)brightness;
281 schedule_work(&led->brightness_work);
282}
283
284static void lp5521_led_brightness_work(struct work_struct *work)
285{
286 struct lp5521_led *led = container_of(work,
287 struct lp5521_led,
288 brightness_work);
289 struct lp5521_chip *chip = led_to_lp5521(led);
290 struct i2c_client *client = chip->client;
291
292 mutex_lock(&chip->lock);
293 lp5521_write(client, LP5521_REG_LED_PWM_BASE + led->chan_nr,
294 led->brightness);
295 mutex_unlock(&chip->lock);
296}
297
298/* Detect the chip by setting its ENABLE register and reading it back. */
299static int lp5521_detect(struct i2c_client *client)
300{
301 int ret;
302 u8 buf;
303
304 ret = lp5521_write(client, LP5521_REG_ENABLE,
305 LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM);
306 if (ret)
307 return ret;
308 usleep_range(1000, 10000);
309 ret = lp5521_read(client, LP5521_REG_ENABLE, &buf);
310 if (ret)
311 return ret;
312 if (buf != (LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM))
313 return -ENODEV;
314
315 return 0;
316}
317
318/* Set engine mode and create appropriate sysfs attributes, if required. */
319static int lp5521_set_mode(struct lp5521_engine *engine, u8 mode)
320{
321 struct lp5521_chip *chip = engine_to_lp5521(engine);
322 struct i2c_client *client = chip->client;
323 struct device *dev = &client->dev;
324 int ret = 0;
325
326 /* if in that mode already do nothing, except for run */
327 if (mode == engine->mode && mode != LP5521_CMD_RUN)
328 return 0;
329
330 if (mode == LP5521_CMD_RUN) {
331 ret = lp5521_set_engine_mode(engine, LP5521_CMD_RUN);
332 } else if (mode == LP5521_CMD_LOAD) {
333 lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
334 lp5521_set_engine_mode(engine, LP5521_CMD_LOAD);
335
336 ret = sysfs_create_group(&dev->kobj, engine->attributes);
337 if (ret)
338 return ret;
339 } else if (mode == LP5521_CMD_DISABLED) {
340 lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
341 }
342
343 /* remove load attribute from sysfs if not in load mode */
344 if (engine->mode == LP5521_CMD_LOAD && mode != LP5521_CMD_LOAD)
345 sysfs_remove_group(&dev->kobj, engine->attributes);
346
347 engine->mode = mode;
348
349 return ret;
350}
351
352static int lp5521_do_store_load(struct lp5521_engine *engine,
353 const char *buf, size_t len)
354{
355 struct lp5521_chip *chip = engine_to_lp5521(engine);
356 struct i2c_client *client = chip->client;
357 int ret, nrchars, offset = 0, i = 0;
358 char c[3];
359 unsigned cmd;
360 u8 pattern[LP5521_PROGRAM_LENGTH] = {0};
361
362 while ((offset < len - 1) && (i < LP5521_PROGRAM_LENGTH)) {
363 /* separate sscanfs because length is working only for %s */
364 ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
365 ret = sscanf(c, "%2x", &cmd);
366 if (ret != 1)
367 goto fail;
368 pattern[i] = (u8)cmd;
369
370 offset += nrchars;
371 i++;
372 }
373
374 /* Each instruction is 16bit long. Check that length is even */
375 if (i % 2)
376 goto fail;
377
378 mutex_lock(&chip->lock);
379 ret = lp5521_load_program(engine, pattern);
380 mutex_unlock(&chip->lock);
381
382 if (ret) {
383 dev_err(&client->dev, "failed loading pattern\n");
384 return ret;
385 }
386
387 return len;
388fail:
389 dev_err(&client->dev, "wrong pattern format\n");
390 return -EINVAL;
391}
392
393static ssize_t store_engine_load(struct device *dev,
394 struct device_attribute *attr,
395 const char *buf, size_t len, int nr)
396{
397 struct i2c_client *client = to_i2c_client(dev);
398 struct lp5521_chip *chip = i2c_get_clientdata(client);
399 return lp5521_do_store_load(&chip->engines[nr - 1], buf, len);
400}
401
402#define store_load(nr) \
403static ssize_t store_engine##nr##_load(struct device *dev, \
404 struct device_attribute *attr, \
405 const char *buf, size_t len) \
406{ \
407 return store_engine_load(dev, attr, buf, len, nr); \
408}
409store_load(1)
410store_load(2)
411store_load(3)
412
413static ssize_t show_engine_mode(struct device *dev,
414 struct device_attribute *attr,
415 char *buf, int nr)
416{
417 struct i2c_client *client = to_i2c_client(dev);
418 struct lp5521_chip *chip = i2c_get_clientdata(client);
419 switch (chip->engines[nr - 1].mode) {
420 case LP5521_CMD_RUN:
421 return sprintf(buf, "run\n");
422 case LP5521_CMD_LOAD:
423 return sprintf(buf, "load\n");
424 case LP5521_CMD_DISABLED:
425 return sprintf(buf, "disabled\n");
426 default:
427 return sprintf(buf, "disabled\n");
428 }
429}
430
431#define show_mode(nr) \
432static ssize_t show_engine##nr##_mode(struct device *dev, \
433 struct device_attribute *attr, \
434 char *buf) \
435{ \
436 return show_engine_mode(dev, attr, buf, nr); \
437}
438show_mode(1)
439show_mode(2)
440show_mode(3)
441
442static ssize_t store_engine_mode(struct device *dev,
443 struct device_attribute *attr,
444 const char *buf, size_t len, int nr)
445{
446 struct i2c_client *client = to_i2c_client(dev);
447 struct lp5521_chip *chip = i2c_get_clientdata(client);
448 struct lp5521_engine *engine = &chip->engines[nr - 1];
449 mutex_lock(&chip->lock);
450
451 if (!strncmp(buf, "run", 3))
452 lp5521_set_mode(engine, LP5521_CMD_RUN);
453 else if (!strncmp(buf, "load", 4))
454 lp5521_set_mode(engine, LP5521_CMD_LOAD);
455 else if (!strncmp(buf, "disabled", 8))
456 lp5521_set_mode(engine, LP5521_CMD_DISABLED);
457
458 mutex_unlock(&chip->lock);
459 return len;
460}
461
462#define store_mode(nr) \
463static ssize_t store_engine##nr##_mode(struct device *dev, \
464 struct device_attribute *attr, \
465 const char *buf, size_t len) \
466{ \
467 return store_engine_mode(dev, attr, buf, len, nr); \
468}
469store_mode(1)
470store_mode(2)
471store_mode(3)
472
473static ssize_t show_max_current(struct device *dev,
474 struct device_attribute *attr,
475 char *buf)
476{
477 struct led_classdev *led_cdev = dev_get_drvdata(dev);
478 struct lp5521_led *led = cdev_to_led(led_cdev);
479
480 return sprintf(buf, "%d\n", led->max_current);
481}
482
483static ssize_t show_current(struct device *dev,
484 struct device_attribute *attr,
485 char *buf)
486{
487 struct led_classdev *led_cdev = dev_get_drvdata(dev);
488 struct lp5521_led *led = cdev_to_led(led_cdev);
489
490 return sprintf(buf, "%d\n", led->led_current);
491}
492
493static ssize_t store_current(struct device *dev,
494 struct device_attribute *attr,
495 const char *buf, size_t len)
496{
497 struct led_classdev *led_cdev = dev_get_drvdata(dev);
498 struct lp5521_led *led = cdev_to_led(led_cdev);
499 struct lp5521_chip *chip = led_to_lp5521(led);
500 ssize_t ret;
501 unsigned long curr;
502
503 if (strict_strtoul(buf, 0, &curr))
504 return -EINVAL;
505
506 if (curr > led->max_current)
507 return -EINVAL;
508
509 mutex_lock(&chip->lock);
510 ret = lp5521_set_led_current(chip, led->id, curr);
511 mutex_unlock(&chip->lock);
512
513 if (ret < 0)
514 return ret;
515
516 led->led_current = (u8)curr;
517
518 return len;
519}
520
521static ssize_t lp5521_selftest(struct device *dev,
522 struct device_attribute *attr,
523 char *buf)
524{
525 struct i2c_client *client = to_i2c_client(dev);
526 struct lp5521_chip *chip = i2c_get_clientdata(client);
527 int ret;
528
529 mutex_lock(&chip->lock);
530 ret = lp5521_run_selftest(chip, buf);
531 mutex_unlock(&chip->lock);
532 return sprintf(buf, "%s\n", ret ? "FAIL" : "OK");
533}
534
535/* led class device attributes */
536static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current);
537static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
538
539static struct attribute *lp5521_led_attributes[] = {
540 &dev_attr_led_current.attr,
541 &dev_attr_max_current.attr,
542 NULL,
543};
544
545static struct attribute_group lp5521_led_attribute_group = {
546 .attrs = lp5521_led_attributes
547};
548
549/* device attributes */
550static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO,
551 show_engine1_mode, store_engine1_mode);
552static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO,
553 show_engine2_mode, store_engine2_mode);
554static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO,
555 show_engine3_mode, store_engine3_mode);
556static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load);
557static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load);
558static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load);
559static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL);
560
561static struct attribute *lp5521_attributes[] = {
562 &dev_attr_engine1_mode.attr,
563 &dev_attr_engine2_mode.attr,
564 &dev_attr_engine3_mode.attr,
565 &dev_attr_selftest.attr,
566 NULL
567};
568
569static struct attribute *lp5521_engine1_attributes[] = {
570 &dev_attr_engine1_load.attr,
571 NULL
572};
573
574static struct attribute *lp5521_engine2_attributes[] = {
575 &dev_attr_engine2_load.attr,
576 NULL
577};
578
579static struct attribute *lp5521_engine3_attributes[] = {
580 &dev_attr_engine3_load.attr,
581 NULL
582};
583
584static const struct attribute_group lp5521_group = {
585 .attrs = lp5521_attributes,
586};
587
588static const struct attribute_group lp5521_engine_group[] = {
589 {.attrs = lp5521_engine1_attributes },
590 {.attrs = lp5521_engine2_attributes },
591 {.attrs = lp5521_engine3_attributes },
592};
593
594static int lp5521_register_sysfs(struct i2c_client *client)
595{
596 struct device *dev = &client->dev;
597 return sysfs_create_group(&dev->kobj, &lp5521_group);
598}
599
600static void lp5521_unregister_sysfs(struct i2c_client *client)
601{
602 struct lp5521_chip *chip = i2c_get_clientdata(client);
603 struct device *dev = &client->dev;
604 int i;
605
606 sysfs_remove_group(&dev->kobj, &lp5521_group);
607
608 for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
609 if (chip->engines[i].mode == LP5521_CMD_LOAD)
610 sysfs_remove_group(&dev->kobj,
611 chip->engines[i].attributes);
612 }
613
614 for (i = 0; i < chip->num_leds; i++)
615 sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
616 &lp5521_led_attribute_group);
617}
618
619static int __init lp5521_init_led(struct lp5521_led *led,
620 struct i2c_client *client,
621 int chan, struct lp5521_platform_data *pdata)
622{
623 struct device *dev = &client->dev;
624 char name[32];
625 int res;
626
627 if (chan >= LP5521_MAX_LEDS)
628 return -EINVAL;
629
630 if (pdata->led_config[chan].led_current == 0)
631 return 0;
632
633 led->led_current = pdata->led_config[chan].led_current;
634 led->max_current = pdata->led_config[chan].max_current;
635 led->chan_nr = pdata->led_config[chan].chan_nr;
636
637 if (led->chan_nr >= LP5521_MAX_LEDS) {
638 dev_err(dev, "Use channel numbers between 0 and %d\n",
639 LP5521_MAX_LEDS - 1);
640 return -EINVAL;
641 }
642
643 snprintf(name, sizeof(name), "%s:channel%d", client->name, chan);
644 led->cdev.brightness_set = lp5521_set_brightness;
645 led->cdev.name = name;
646 res = led_classdev_register(dev, &led->cdev);
647 if (res < 0) {
648 dev_err(dev, "couldn't register led on channel %d\n", chan);
649 return res;
650 }
651
652 res = sysfs_create_group(&led->cdev.dev->kobj,
653 &lp5521_led_attribute_group);
654 if (res < 0) {
655 dev_err(dev, "couldn't register current attribute\n");
656 led_classdev_unregister(&led->cdev);
657 return res;
658 }
659 return 0;
660}
661
662static int lp5521_probe(struct i2c_client *client,
663 const struct i2c_device_id *id)
664{
665 struct lp5521_chip *chip;
666 struct lp5521_platform_data *pdata;
667 int ret, i, led;
668
669 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
670 if (!chip)
671 return -ENOMEM;
672
673 i2c_set_clientdata(client, chip);
674 chip->client = client;
675
676 pdata = client->dev.platform_data;
677
678 if (!pdata) {
679 dev_err(&client->dev, "no platform data\n");
680 ret = -EINVAL;
681 goto fail1;
682 }
683
684 mutex_init(&chip->lock);
685
686 chip->pdata = pdata;
687
688 if (pdata->setup_resources) {
689 ret = pdata->setup_resources();
690 if (ret < 0)
691 goto fail1;
692 }
693
694 if (pdata->enable) {
695 pdata->enable(0);
696 usleep_range(1000, 10000);
697 pdata->enable(1);
698 usleep_range(1000, 10000); /* Spec says min 500us */
699 }
700
701 ret = lp5521_detect(client);
702
703 if (ret) {
704 dev_err(&client->dev, "Chip not found\n");
705 goto fail2;
706 }
707
708 dev_info(&client->dev, "%s programmable led chip found\n", id->name);
709
710 ret = lp5521_configure(client, lp5521_engine_group);
711 if (ret < 0) {
712 dev_err(&client->dev, "error configuring chip\n");
713 goto fail2;
714 }
715
716 /* Initialize leds */
717 chip->num_channels = pdata->num_channels;
718 chip->num_leds = 0;
719 led = 0;
720 for (i = 0; i < pdata->num_channels; i++) {
721 /* Do not initialize channels that are not connected */
722 if (pdata->led_config[i].led_current == 0)
723 continue;
724
725 ret = lp5521_init_led(&chip->leds[led], client, i, pdata);
726 if (ret) {
727 dev_err(&client->dev, "error initializing leds\n");
728 goto fail3;
729 }
730 chip->num_leds++;
731
732 chip->leds[led].id = led;
733 /* Set initial LED current */
734 lp5521_set_led_current(chip, led,
735 chip->leds[led].led_current);
736
737 INIT_WORK(&(chip->leds[led].brightness_work),
738 lp5521_led_brightness_work);
739
740 led++;
741 }
742
743 ret = lp5521_register_sysfs(client);
744 if (ret) {
745 dev_err(&client->dev, "registering sysfs failed\n");
746 goto fail3;
747 }
748 return ret;
749fail3:
750 for (i = 0; i < chip->num_leds; i++) {
751 led_classdev_unregister(&chip->leds[i].cdev);
752 cancel_work_sync(&chip->leds[i].brightness_work);
753 }
754fail2:
755 if (pdata->enable)
756 pdata->enable(0);
757 if (pdata->release_resources)
758 pdata->release_resources();
759fail1:
760 kfree(chip);
761 return ret;
762}
763
764static int lp5521_remove(struct i2c_client *client)
765{
766 struct lp5521_chip *chip = i2c_get_clientdata(client);
767 int i;
768
769 lp5521_unregister_sysfs(client);
770
771 for (i = 0; i < chip->num_leds; i++) {
772 led_classdev_unregister(&chip->leds[i].cdev);
773 cancel_work_sync(&chip->leds[i].brightness_work);
774 }
775
776 if (chip->pdata->enable)
777 chip->pdata->enable(0);
778 if (chip->pdata->release_resources)
779 chip->pdata->release_resources();
780 kfree(chip);
781 return 0;
782}
783
784static const struct i2c_device_id lp5521_id[] = {
785 { "lp5521", 0 }, /* Three channel chip */
786 { }
787};
788MODULE_DEVICE_TABLE(i2c, lp5521_id);
789
790static struct i2c_driver lp5521_driver = {
791 .driver = {
792 .name = "lp5521",
793 },
794 .probe = lp5521_probe,
795 .remove = lp5521_remove,
796 .id_table = lp5521_id,
797};
798
799static int __init lp5521_init(void)
800{
801 int ret;
802
803 ret = i2c_add_driver(&lp5521_driver);
804
805 if (ret < 0)
806 printk(KERN_ALERT "Adding lp5521 driver failed\n");
807
808 return ret;
809}
810
811static void __exit lp5521_exit(void)
812{
813 i2c_del_driver(&lp5521_driver);
814}
815
816module_init(lp5521_init);
817module_exit(lp5521_exit);
818
819MODULE_AUTHOR("Mathias Nyman, Yuri Zaporozhets, Samu Onkalo");
820MODULE_DESCRIPTION("LP5521 LED engine");
821MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
new file mode 100644
index 000000000000..1e11fcc08b28
--- /dev/null
+++ b/drivers/leds/leds-lp5523.c
@@ -0,0 +1,1065 @@
1/*
2 * lp5523.c - LP5523 LED Driver
3 *
4 * Copyright (C) 2010 Nokia Corporation
5 *
6 * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/i2c.h>
26#include <linux/mutex.h>
27#include <linux/gpio.h>
28#include <linux/interrupt.h>
29#include <linux/delay.h>
30#include <linux/ctype.h>
31#include <linux/spinlock.h>
32#include <linux/wait.h>
33#include <linux/leds.h>
34#include <linux/leds-lp5523.h>
35#include <linux/workqueue.h>
36#include <linux/slab.h>
37
38#define LP5523_REG_ENABLE 0x00
39#define LP5523_REG_OP_MODE 0x01
40#define LP5523_REG_RATIOMETRIC_MSB 0x02
41#define LP5523_REG_RATIOMETRIC_LSB 0x03
42#define LP5523_REG_ENABLE_LEDS_MSB 0x04
43#define LP5523_REG_ENABLE_LEDS_LSB 0x05
44#define LP5523_REG_LED_CNTRL_BASE 0x06
45#define LP5523_REG_LED_PWM_BASE 0x16
46#define LP5523_REG_LED_CURRENT_BASE 0x26
47#define LP5523_REG_CONFIG 0x36
48#define LP5523_REG_CHANNEL1_PC 0x37
49#define LP5523_REG_CHANNEL2_PC 0x38
50#define LP5523_REG_CHANNEL3_PC 0x39
51#define LP5523_REG_STATUS 0x3a
52#define LP5523_REG_GPO 0x3b
53#define LP5523_REG_VARIABLE 0x3c
54#define LP5523_REG_RESET 0x3d
55#define LP5523_REG_TEMP_CTRL 0x3e
56#define LP5523_REG_TEMP_READ 0x3f
57#define LP5523_REG_TEMP_WRITE 0x40
58#define LP5523_REG_LED_TEST_CTRL 0x41
59#define LP5523_REG_LED_TEST_ADC 0x42
60#define LP5523_REG_ENG1_VARIABLE 0x45
61#define LP5523_REG_ENG2_VARIABLE 0x46
62#define LP5523_REG_ENG3_VARIABLE 0x47
63#define LP5523_REG_MASTER_FADER1 0x48
64#define LP5523_REG_MASTER_FADER2 0x49
65#define LP5523_REG_MASTER_FADER3 0x4a
66#define LP5523_REG_CH1_PROG_START 0x4c
67#define LP5523_REG_CH2_PROG_START 0x4d
68#define LP5523_REG_CH3_PROG_START 0x4e
69#define LP5523_REG_PROG_PAGE_SEL 0x4f
70#define LP5523_REG_PROG_MEM 0x50
71
72#define LP5523_CMD_LOAD 0x15 /* 00010101 */
73#define LP5523_CMD_RUN 0x2a /* 00101010 */
74#define LP5523_CMD_DISABLED 0x00 /* 00000000 */
75
76#define LP5523_ENABLE 0x40
77#define LP5523_AUTO_INC 0x40
78#define LP5523_PWR_SAVE 0x20
79#define LP5523_PWM_PWR_SAVE 0x04
80#define LP5523_CP_1 0x08
81#define LP5523_CP_1_5 0x10
82#define LP5523_CP_AUTO 0x18
83#define LP5523_INT_CLK 0x01
84#define LP5523_AUTO_CLK 0x02
85#define LP5523_EN_LEDTEST 0x80
86#define LP5523_LEDTEST_DONE 0x80
87
88#define LP5523_DEFAULT_CURRENT 50 /* microAmps */
89#define LP5523_PROGRAM_LENGTH 32 /* in bytes */
90#define LP5523_PROGRAM_PAGES 6
91#define LP5523_ADC_SHORTCIRC_LIM 80
92
93#define LP5523_LEDS 9
94#define LP5523_ENGINES 3
95
96#define LP5523_ENG_MASK_BASE 0x30 /* 00110000 */
97
98#define LP5523_ENG_STATUS_MASK 0x07 /* 00000111 */
99
100#define LP5523_IRQ_FLAGS IRQF_TRIGGER_FALLING
101
102#define LP5523_EXT_CLK_USED 0x08
103
104#define LED_ACTIVE(mux, led) (!!(mux & (0x0001 << led)))
105#define SHIFT_MASK(id) (((id) - 1) * 2)
106
107struct lp5523_engine {
108 const struct attribute_group *attributes;
109 int id;
110 u8 mode;
111 u8 prog_page;
112 u8 mux_page;
113 u16 led_mux;
114 u8 engine_mask;
115};
116
117struct lp5523_led {
118 int id;
119 u8 chan_nr;
120 u8 led_current;
121 u8 max_current;
122 struct led_classdev cdev;
123 struct work_struct brightness_work;
124 u8 brightness;
125};
126
127struct lp5523_chip {
128 struct mutex lock; /* Serialize control */
129 struct i2c_client *client;
130 struct lp5523_engine engines[LP5523_ENGINES];
131 struct lp5523_led leds[LP5523_LEDS];
132 struct lp5523_platform_data *pdata;
133 u8 num_channels;
134 u8 num_leds;
135};
136
137#define cdev_to_led(c) container_of(c, struct lp5523_led, cdev)
138
139static struct lp5523_chip *engine_to_lp5523(struct lp5523_engine *engine)
140{
141 return container_of(engine, struct lp5523_chip,
142 engines[engine->id - 1]);
143}
144
145static struct lp5523_chip *led_to_lp5523(struct lp5523_led *led)
146{
147 return container_of(led, struct lp5523_chip,
148 leds[led->id]);
149}
150
151static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode);
152static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode);
153static int lp5523_load_program(struct lp5523_engine *engine, u8 *pattern);
154
155static void lp5523_led_brightness_work(struct work_struct *work);
156
157static int lp5523_write(struct i2c_client *client, u8 reg, u8 value)
158{
159 return i2c_smbus_write_byte_data(client, reg, value);
160}
161
162static int lp5523_read(struct i2c_client *client, u8 reg, u8 *buf)
163{
164 s32 ret = i2c_smbus_read_byte_data(client, reg);
165
166 if (ret < 0)
167 return -EIO;
168
169 *buf = ret;
170 return 0;
171}
172
173static int lp5523_detect(struct i2c_client *client)
174{
175 int ret;
176 u8 buf;
177
178 ret = lp5523_write(client, LP5523_REG_ENABLE, 0x40);
179 if (ret)
180 return ret;
181 ret = lp5523_read(client, LP5523_REG_ENABLE, &buf);
182 if (ret)
183 return ret;
184 if (buf == 0x40)
185 return 0;
186 else
187 return -ENODEV;
188}
189
190static int lp5523_configure(struct i2c_client *client)
191{
192 struct lp5523_chip *chip = i2c_get_clientdata(client);
193 int ret = 0;
194 u8 status;
195
196 /* one pattern per engine setting led mux start and stop addresses */
197 u8 pattern[][LP5523_PROGRAM_LENGTH] = {
198 { 0x9c, 0x30, 0x9c, 0xb0, 0x9d, 0x80, 0xd8, 0x00, 0},
199 { 0x9c, 0x40, 0x9c, 0xc0, 0x9d, 0x80, 0xd8, 0x00, 0},
200 { 0x9c, 0x50, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0},
201 };
202
203 lp5523_write(client, LP5523_REG_RESET, 0xff);
204
205 usleep_range(10000, 100000);
206
207 ret |= lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE);
208 /* Chip startup time after reset is 500 us */
209 usleep_range(1000, 10000);
210
211 ret |= lp5523_write(client, LP5523_REG_CONFIG,
212 LP5523_AUTO_INC | LP5523_PWR_SAVE |
213 LP5523_CP_AUTO | LP5523_AUTO_CLK |
214 LP5523_PWM_PWR_SAVE);
215
216 /* turn on all leds */
217 ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_MSB, 0x01);
218 ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_LSB, 0xff);
219
220 /* hardcode 32 bytes of memory for each engine from program memory */
221 ret |= lp5523_write(client, LP5523_REG_CH1_PROG_START, 0x00);
222 ret |= lp5523_write(client, LP5523_REG_CH2_PROG_START, 0x10);
223 ret |= lp5523_write(client, LP5523_REG_CH3_PROG_START, 0x20);
224
225 /* write led mux address space for each channel */
226 ret |= lp5523_load_program(&chip->engines[0], pattern[0]);
227 ret |= lp5523_load_program(&chip->engines[1], pattern[1]);
228 ret |= lp5523_load_program(&chip->engines[2], pattern[2]);
229
230 if (ret) {
231 dev_err(&client->dev, "could not load mux programs\n");
232 return -1;
233 }
234
235 /* set all engines exec state and mode to run 00101010 */
236 ret |= lp5523_write(client, LP5523_REG_ENABLE,
237 (LP5523_CMD_RUN | LP5523_ENABLE));
238
239 ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_RUN);
240
241 if (ret) {
242 dev_err(&client->dev, "could not start mux programs\n");
243 return -1;
244 }
245
246 /* Wait 3ms and check the engine status */
247 usleep_range(3000, 20000);
248 lp5523_read(client, LP5523_REG_STATUS, &status);
249 status &= LP5523_ENG_STATUS_MASK;
250
251 if (status == LP5523_ENG_STATUS_MASK) {
252 dev_dbg(&client->dev, "all engines configured\n");
253 } else {
254 dev_info(&client->dev, "status == %x\n", status);
255 dev_err(&client->dev, "cound not configure LED engine\n");
256 return -1;
257 }
258
259 dev_info(&client->dev, "disabling engines\n");
260
261 ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_DISABLED);
262
263 return ret;
264}
265
266static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode)
267{
268 struct lp5523_chip *chip = engine_to_lp5523(engine);
269 struct i2c_client *client = chip->client;
270 int ret;
271 u8 engine_state;
272
273 ret = lp5523_read(client, LP5523_REG_OP_MODE, &engine_state);
274 if (ret)
275 goto fail;
276
277 engine_state &= ~(engine->engine_mask);
278
279 /* set mode only for this engine */
280 mode &= engine->engine_mask;
281
282 engine_state |= mode;
283
284 ret |= lp5523_write(client, LP5523_REG_OP_MODE, engine_state);
285fail:
286 return ret;
287}
288
289static int lp5523_load_mux(struct lp5523_engine *engine, u16 mux)
290{
291 struct lp5523_chip *chip = engine_to_lp5523(engine);
292 struct i2c_client *client = chip->client;
293 int ret = 0;
294
295 ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
296
297 ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL, engine->mux_page);
298 ret |= lp5523_write(client, LP5523_REG_PROG_MEM,
299 (u8)(mux >> 8));
300 ret |= lp5523_write(client, LP5523_REG_PROG_MEM + 1, (u8)(mux));
301 engine->led_mux = mux;
302
303 return ret;
304}
305
306static int lp5523_load_program(struct lp5523_engine *engine, u8 *pattern)
307{
308 struct lp5523_chip *chip = engine_to_lp5523(engine);
309 struct i2c_client *client = chip->client;
310
311 int ret = 0;
312
313 ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
314
315 ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL,
316 engine->prog_page);
317 ret |= i2c_smbus_write_i2c_block_data(client, LP5523_REG_PROG_MEM,
318 LP5523_PROGRAM_LENGTH, pattern);
319
320 return ret;
321}
322
323static int lp5523_run_program(struct lp5523_engine *engine)
324{
325 struct lp5523_chip *chip = engine_to_lp5523(engine);
326 struct i2c_client *client = chip->client;
327 int ret;
328
329 ret = lp5523_write(client, LP5523_REG_ENABLE,
330 LP5523_CMD_RUN | LP5523_ENABLE);
331 if (ret)
332 goto fail;
333
334 ret = lp5523_set_engine_mode(engine, LP5523_CMD_RUN);
335fail:
336 return ret;
337}
338
339static int lp5523_mux_parse(const char *buf, u16 *mux, size_t len)
340{
341 int i;
342 u16 tmp_mux = 0;
343 len = len < LP5523_LEDS ? len : LP5523_LEDS;
344 for (i = 0; i < len; i++) {
345 switch (buf[i]) {
346 case '1':
347 tmp_mux |= (1 << i);
348 break;
349 case '0':
350 break;
351 case '\n':
352 i = len;
353 break;
354 default:
355 return -1;
356 }
357 }
358 *mux = tmp_mux;
359
360 return 0;
361}
362
363static void lp5523_mux_to_array(u16 led_mux, char *array)
364{
365 int i, pos = 0;
366 for (i = 0; i < LP5523_LEDS; i++)
367 pos += sprintf(array + pos, "%x", LED_ACTIVE(led_mux, i));
368
369 array[pos] = '\0';
370}
371
372/*--------------------------------------------------------------*/
373/* Sysfs interface */
374/*--------------------------------------------------------------*/
375
376static ssize_t show_engine_leds(struct device *dev,
377 struct device_attribute *attr,
378 char *buf, int nr)
379{
380 struct i2c_client *client = to_i2c_client(dev);
381 struct lp5523_chip *chip = i2c_get_clientdata(client);
382 char mux[LP5523_LEDS + 1];
383
384 lp5523_mux_to_array(chip->engines[nr - 1].led_mux, mux);
385
386 return sprintf(buf, "%s\n", mux);
387}
388
389#define show_leds(nr) \
390static ssize_t show_engine##nr##_leds(struct device *dev, \
391 struct device_attribute *attr, \
392 char *buf) \
393{ \
394 return show_engine_leds(dev, attr, buf, nr); \
395}
396show_leds(1)
397show_leds(2)
398show_leds(3)
399
400static ssize_t store_engine_leds(struct device *dev,
401 struct device_attribute *attr,
402 const char *buf, size_t len, int nr)
403{
404 struct i2c_client *client = to_i2c_client(dev);
405 struct lp5523_chip *chip = i2c_get_clientdata(client);
406 u16 mux = 0;
407
408 if (lp5523_mux_parse(buf, &mux, len))
409 return -EINVAL;
410
411 if (lp5523_load_mux(&chip->engines[nr - 1], mux))
412 return -EINVAL;
413
414 return len;
415}
416
417#define store_leds(nr) \
418static ssize_t store_engine##nr##_leds(struct device *dev, \
419 struct device_attribute *attr, \
420 const char *buf, size_t len) \
421{ \
422 return store_engine_leds(dev, attr, buf, len, nr); \
423}
424store_leds(1)
425store_leds(2)
426store_leds(3)
427
428static ssize_t lp5523_selftest(struct device *dev,
429 struct device_attribute *attr,
430 char *buf)
431{
432 struct i2c_client *client = to_i2c_client(dev);
433 struct lp5523_chip *chip = i2c_get_clientdata(client);
434 int i, ret, pos = 0;
435 int led = 0;
436 u8 status, adc, vdd;
437
438 mutex_lock(&chip->lock);
439
440 ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
441 if (ret < 0)
442 goto fail;
443
444 /* Check that ext clock is really in use if requested */
445 if ((chip->pdata) && (chip->pdata->clock_mode == LP5523_CLOCK_EXT))
446 if ((status & LP5523_EXT_CLK_USED) == 0)
447 goto fail;
448
449 /* Measure VDD (i.e. VBAT) first (channel 16 corresponds to VDD) */
450 lp5523_write(chip->client, LP5523_REG_LED_TEST_CTRL,
451 LP5523_EN_LEDTEST | 16);
452 usleep_range(3000, 10000);
453 ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
454 if (!(status & LP5523_LEDTEST_DONE))
455 usleep_range(3000, 10000);
456
457 ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd);
458 vdd--; /* There may be some fluctuation in measurement */
459
460 for (i = 0; i < LP5523_LEDS; i++) {
461 /* Skip non-existing channels */
462 if (chip->pdata->led_config[i].led_current == 0)
463 continue;
464
465 /* Set default current */
466 lp5523_write(chip->client,
467 LP5523_REG_LED_CURRENT_BASE + i,
468 chip->pdata->led_config[i].led_current);
469
470 lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0xff);
471 /* let current stabilize 2ms before measurements start */
472 usleep_range(2000, 10000);
473 lp5523_write(chip->client,
474 LP5523_REG_LED_TEST_CTRL,
475 LP5523_EN_LEDTEST | i);
476 /* ledtest takes 2.7ms */
477 usleep_range(3000, 10000);
478 ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
479 if (!(status & LP5523_LEDTEST_DONE))
480 usleep_range(3000, 10000);
481 ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc);
482
483 if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM)
484 pos += sprintf(buf + pos, "LED %d FAIL\n", i);
485
486 lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0x00);
487
488 /* Restore current */
489 lp5523_write(chip->client,
490 LP5523_REG_LED_CURRENT_BASE + i,
491 chip->leds[led].led_current);
492 led++;
493 }
494 if (pos == 0)
495 pos = sprintf(buf, "OK\n");
496 goto release_lock;
497fail:
498 pos = sprintf(buf, "FAIL\n");
499
500release_lock:
501 mutex_unlock(&chip->lock);
502
503 return pos;
504}
505
506static void lp5523_set_brightness(struct led_classdev *cdev,
507 enum led_brightness brightness)
508{
509 struct lp5523_led *led = cdev_to_led(cdev);
510
511 led->brightness = (u8)brightness;
512
513 schedule_work(&led->brightness_work);
514}
515
516static void lp5523_led_brightness_work(struct work_struct *work)
517{
518 struct lp5523_led *led = container_of(work,
519 struct lp5523_led,
520 brightness_work);
521 struct lp5523_chip *chip = led_to_lp5523(led);
522 struct i2c_client *client = chip->client;
523
524 mutex_lock(&chip->lock);
525
526 lp5523_write(client, LP5523_REG_LED_PWM_BASE + led->chan_nr,
527 led->brightness);
528
529 mutex_unlock(&chip->lock);
530}
531
532static int lp5523_do_store_load(struct lp5523_engine *engine,
533 const char *buf, size_t len)
534{
535 struct lp5523_chip *chip = engine_to_lp5523(engine);
536 struct i2c_client *client = chip->client;
537 int ret, nrchars, offset = 0, i = 0;
538 char c[3];
539 unsigned cmd;
540 u8 pattern[LP5523_PROGRAM_LENGTH] = {0};
541
542 while ((offset < len - 1) && (i < LP5523_PROGRAM_LENGTH)) {
543 /* separate sscanfs because length is working only for %s */
544 ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
545 ret = sscanf(c, "%2x", &cmd);
546 if (ret != 1)
547 goto fail;
548 pattern[i] = (u8)cmd;
549
550 offset += nrchars;
551 i++;
552 }
553
554 /* Each instruction is 16bit long. Check that length is even */
555 if (i % 2)
556 goto fail;
557
558 mutex_lock(&chip->lock);
559
560 ret = lp5523_load_program(engine, pattern);
561 mutex_unlock(&chip->lock);
562
563 if (ret) {
564 dev_err(&client->dev, "failed loading pattern\n");
565 return ret;
566 }
567
568 return len;
569fail:
570 dev_err(&client->dev, "wrong pattern format\n");
571 return -EINVAL;
572}
573
574static ssize_t store_engine_load(struct device *dev,
575 struct device_attribute *attr,
576 const char *buf, size_t len, int nr)
577{
578 struct i2c_client *client = to_i2c_client(dev);
579 struct lp5523_chip *chip = i2c_get_clientdata(client);
580 return lp5523_do_store_load(&chip->engines[nr - 1], buf, len);
581}
582
583#define store_load(nr) \
584static ssize_t store_engine##nr##_load(struct device *dev, \
585 struct device_attribute *attr, \
586 const char *buf, size_t len) \
587{ \
588 return store_engine_load(dev, attr, buf, len, nr); \
589}
590store_load(1)
591store_load(2)
592store_load(3)
593
594static ssize_t show_engine_mode(struct device *dev,
595 struct device_attribute *attr,
596 char *buf, int nr)
597{
598 struct i2c_client *client = to_i2c_client(dev);
599 struct lp5523_chip *chip = i2c_get_clientdata(client);
600 switch (chip->engines[nr - 1].mode) {
601 case LP5523_CMD_RUN:
602 return sprintf(buf, "run\n");
603 case LP5523_CMD_LOAD:
604 return sprintf(buf, "load\n");
605 case LP5523_CMD_DISABLED:
606 return sprintf(buf, "disabled\n");
607 default:
608 return sprintf(buf, "disabled\n");
609 }
610}
611
612#define show_mode(nr) \
613static ssize_t show_engine##nr##_mode(struct device *dev, \
614 struct device_attribute *attr, \
615 char *buf) \
616{ \
617 return show_engine_mode(dev, attr, buf, nr); \
618}
619show_mode(1)
620show_mode(2)
621show_mode(3)
622
623static ssize_t store_engine_mode(struct device *dev,
624 struct device_attribute *attr,
625 const char *buf, size_t len, int nr)
626{
627 struct i2c_client *client = to_i2c_client(dev);
628 struct lp5523_chip *chip = i2c_get_clientdata(client);
629 struct lp5523_engine *engine = &chip->engines[nr - 1];
630 mutex_lock(&chip->lock);
631
632 if (!strncmp(buf, "run", 3))
633 lp5523_set_mode(engine, LP5523_CMD_RUN);
634 else if (!strncmp(buf, "load", 4))
635 lp5523_set_mode(engine, LP5523_CMD_LOAD);
636 else if (!strncmp(buf, "disabled", 8))
637 lp5523_set_mode(engine, LP5523_CMD_DISABLED);
638
639 mutex_unlock(&chip->lock);
640 return len;
641}
642
643#define store_mode(nr) \
644static ssize_t store_engine##nr##_mode(struct device *dev, \
645 struct device_attribute *attr, \
646 const char *buf, size_t len) \
647{ \
648 return store_engine_mode(dev, attr, buf, len, nr); \
649}
650store_mode(1)
651store_mode(2)
652store_mode(3)
653
654static ssize_t show_max_current(struct device *dev,
655 struct device_attribute *attr,
656 char *buf)
657{
658 struct led_classdev *led_cdev = dev_get_drvdata(dev);
659 struct lp5523_led *led = cdev_to_led(led_cdev);
660
661 return sprintf(buf, "%d\n", led->max_current);
662}
663
664static ssize_t show_current(struct device *dev,
665 struct device_attribute *attr,
666 char *buf)
667{
668 struct led_classdev *led_cdev = dev_get_drvdata(dev);
669 struct lp5523_led *led = cdev_to_led(led_cdev);
670
671 return sprintf(buf, "%d\n", led->led_current);
672}
673
674static ssize_t store_current(struct device *dev,
675 struct device_attribute *attr,
676 const char *buf, size_t len)
677{
678 struct led_classdev *led_cdev = dev_get_drvdata(dev);
679 struct lp5523_led *led = cdev_to_led(led_cdev);
680 struct lp5523_chip *chip = led_to_lp5523(led);
681 ssize_t ret;
682 unsigned long curr;
683
684 if (strict_strtoul(buf, 0, &curr))
685 return -EINVAL;
686
687 if (curr > led->max_current)
688 return -EINVAL;
689
690 mutex_lock(&chip->lock);
691 ret = lp5523_write(chip->client,
692 LP5523_REG_LED_CURRENT_BASE + led->chan_nr,
693 (u8)curr);
694 mutex_unlock(&chip->lock);
695
696 if (ret < 0)
697 return ret;
698
699 led->led_current = (u8)curr;
700
701 return len;
702}
703
704/* led class device attributes */
705static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current);
706static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
707
708static struct attribute *lp5523_led_attributes[] = {
709 &dev_attr_led_current.attr,
710 &dev_attr_max_current.attr,
711 NULL,
712};
713
714static struct attribute_group lp5523_led_attribute_group = {
715 .attrs = lp5523_led_attributes
716};
717
718/* device attributes */
719static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO,
720 show_engine1_mode, store_engine1_mode);
721static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO,
722 show_engine2_mode, store_engine2_mode);
723static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO,
724 show_engine3_mode, store_engine3_mode);
725static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUGO,
726 show_engine1_leds, store_engine1_leds);
727static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUGO,
728 show_engine2_leds, store_engine2_leds);
729static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUGO,
730 show_engine3_leds, store_engine3_leds);
731static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load);
732static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load);
733static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load);
734static DEVICE_ATTR(selftest, S_IRUGO, lp5523_selftest, NULL);
735
736static struct attribute *lp5523_attributes[] = {
737 &dev_attr_engine1_mode.attr,
738 &dev_attr_engine2_mode.attr,
739 &dev_attr_engine3_mode.attr,
740 &dev_attr_selftest.attr,
741 NULL
742};
743
744static struct attribute *lp5523_engine1_attributes[] = {
745 &dev_attr_engine1_load.attr,
746 &dev_attr_engine1_leds.attr,
747 NULL
748};
749
750static struct attribute *lp5523_engine2_attributes[] = {
751 &dev_attr_engine2_load.attr,
752 &dev_attr_engine2_leds.attr,
753 NULL
754};
755
756static struct attribute *lp5523_engine3_attributes[] = {
757 &dev_attr_engine3_load.attr,
758 &dev_attr_engine3_leds.attr,
759 NULL
760};
761
762static const struct attribute_group lp5523_group = {
763 .attrs = lp5523_attributes,
764};
765
766static const struct attribute_group lp5523_engine_group[] = {
767 {.attrs = lp5523_engine1_attributes },
768 {.attrs = lp5523_engine2_attributes },
769 {.attrs = lp5523_engine3_attributes },
770};
771
772static int lp5523_register_sysfs(struct i2c_client *client)
773{
774 struct device *dev = &client->dev;
775 int ret;
776
777 ret = sysfs_create_group(&dev->kobj, &lp5523_group);
778 if (ret < 0)
779 return ret;
780
781 return 0;
782}
783
784static void lp5523_unregister_sysfs(struct i2c_client *client)
785{
786 struct lp5523_chip *chip = i2c_get_clientdata(client);
787 struct device *dev = &client->dev;
788 int i;
789
790 sysfs_remove_group(&dev->kobj, &lp5523_group);
791
792 for (i = 0; i < ARRAY_SIZE(chip->engines); i++)
793 if (chip->engines[i].mode == LP5523_CMD_LOAD)
794 sysfs_remove_group(&dev->kobj, &lp5523_engine_group[i]);
795
796 for (i = 0; i < chip->num_leds; i++)
797 sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
798 &lp5523_led_attribute_group);
799}
800
801/*--------------------------------------------------------------*/
802/* Set chip operating mode */
803/*--------------------------------------------------------------*/
804static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode)
805{
806 /* engine to chip */
807 struct lp5523_chip *chip = engine_to_lp5523(engine);
808 struct i2c_client *client = chip->client;
809 struct device *dev = &client->dev;
810 int ret = 0;
811
812 /* if in that mode already do nothing, except for run */
813 if (mode == engine->mode && mode != LP5523_CMD_RUN)
814 return 0;
815
816 if (mode == LP5523_CMD_RUN) {
817 ret = lp5523_run_program(engine);
818 } else if (mode == LP5523_CMD_LOAD) {
819 lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
820 lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
821
822 ret = sysfs_create_group(&dev->kobj, engine->attributes);
823 if (ret)
824 return ret;
825 } else if (mode == LP5523_CMD_DISABLED) {
826 lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
827 }
828
829 /* remove load attribute from sysfs if not in load mode */
830 if (engine->mode == LP5523_CMD_LOAD && mode != LP5523_CMD_LOAD)
831 sysfs_remove_group(&dev->kobj, engine->attributes);
832
833 engine->mode = mode;
834
835 return ret;
836}
837
838/*--------------------------------------------------------------*/
839/* Probe, Attach, Remove */
840/*--------------------------------------------------------------*/
841static int __init lp5523_init_engine(struct lp5523_engine *engine, int id)
842{
843 if (id < 1 || id > LP5523_ENGINES)
844 return -1;
845 engine->id = id;
846 engine->engine_mask = LP5523_ENG_MASK_BASE >> SHIFT_MASK(id);
847 engine->prog_page = id - 1;
848 engine->mux_page = id + 2;
849 engine->attributes = &lp5523_engine_group[id - 1];
850
851 return 0;
852}
853
854static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev,
855 int chan, struct lp5523_platform_data *pdata)
856{
857 char name[32];
858 int res;
859
860 if (chan >= LP5523_LEDS)
861 return -EINVAL;
862
863 if (pdata->led_config[chan].led_current) {
864 led->led_current = pdata->led_config[chan].led_current;
865 led->max_current = pdata->led_config[chan].max_current;
866 led->chan_nr = pdata->led_config[chan].chan_nr;
867
868 if (led->chan_nr >= LP5523_LEDS) {
869 dev_err(dev, "Use channel numbers between 0 and %d\n",
870 LP5523_LEDS - 1);
871 return -EINVAL;
872 }
873
874 snprintf(name, 32, "lp5523:channel%d", chan);
875
876 led->cdev.name = name;
877 led->cdev.brightness_set = lp5523_set_brightness;
878 res = led_classdev_register(dev, &led->cdev);
879 if (res < 0) {
880 dev_err(dev, "couldn't register led on channel %d\n",
881 chan);
882 return res;
883 }
884 res = sysfs_create_group(&led->cdev.dev->kobj,
885 &lp5523_led_attribute_group);
886 if (res < 0) {
887 dev_err(dev, "couldn't register current attribute\n");
888 led_classdev_unregister(&led->cdev);
889 return res;
890 }
891 } else {
892 led->led_current = 0;
893 }
894 return 0;
895}
896
897static struct i2c_driver lp5523_driver;
898
899static int lp5523_probe(struct i2c_client *client,
900 const struct i2c_device_id *id)
901{
902 struct lp5523_chip *chip;
903 struct lp5523_platform_data *pdata;
904 int ret, i, led;
905
906 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
907 if (!chip)
908 return -ENOMEM;
909
910 i2c_set_clientdata(client, chip);
911 chip->client = client;
912
913 pdata = client->dev.platform_data;
914
915 if (!pdata) {
916 dev_err(&client->dev, "no platform data\n");
917 ret = -EINVAL;
918 goto fail1;
919 }
920
921 mutex_init(&chip->lock);
922
923 chip->pdata = pdata;
924
925 if (pdata->setup_resources) {
926 ret = pdata->setup_resources();
927 if (ret < 0)
928 goto fail1;
929 }
930
931 if (pdata->enable) {
932 pdata->enable(0);
933 usleep_range(1000, 10000);
934 pdata->enable(1);
935 usleep_range(1000, 10000); /* Spec says min 500us */
936 }
937
938 ret = lp5523_detect(client);
939 if (ret)
940 goto fail2;
941
942 dev_info(&client->dev, "LP5523 Programmable led chip found\n");
943
944 /* Initialize engines */
945 for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
946 ret = lp5523_init_engine(&chip->engines[i], i + 1);
947 if (ret) {
948 dev_err(&client->dev, "error initializing engine\n");
949 goto fail2;
950 }
951 }
952 ret = lp5523_configure(client);
953 if (ret < 0) {
954 dev_err(&client->dev, "error configuring chip\n");
955 goto fail2;
956 }
957
958 /* Initialize leds */
959 chip->num_channels = pdata->num_channels;
960 chip->num_leds = 0;
961 led = 0;
962 for (i = 0; i < pdata->num_channels; i++) {
963 /* Do not initialize channels that are not connected */
964 if (pdata->led_config[i].led_current == 0)
965 continue;
966
967 ret = lp5523_init_led(&chip->leds[led], &client->dev, i, pdata);
968 if (ret) {
969 dev_err(&client->dev, "error initializing leds\n");
970 goto fail3;
971 }
972 chip->num_leds++;
973
974 chip->leds[led].id = led;
975 /* Set LED current */
976 lp5523_write(client,
977 LP5523_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr,
978 chip->leds[led].led_current);
979
980 INIT_WORK(&(chip->leds[led].brightness_work),
981 lp5523_led_brightness_work);
982
983 led++;
984 }
985
986 ret = lp5523_register_sysfs(client);
987 if (ret) {
988 dev_err(&client->dev, "registering sysfs failed\n");
989 goto fail3;
990 }
991 return ret;
992fail3:
993 for (i = 0; i < chip->num_leds; i++) {
994 led_classdev_unregister(&chip->leds[i].cdev);
995 cancel_work_sync(&chip->leds[i].brightness_work);
996 }
997fail2:
998 if (pdata->enable)
999 pdata->enable(0);
1000 if (pdata->release_resources)
1001 pdata->release_resources();
1002fail1:
1003 kfree(chip);
1004 return ret;
1005}
1006
1007static int lp5523_remove(struct i2c_client *client)
1008{
1009 struct lp5523_chip *chip = i2c_get_clientdata(client);
1010 int i;
1011
1012 lp5523_unregister_sysfs(client);
1013
1014 for (i = 0; i < chip->num_leds; i++) {
1015 led_classdev_unregister(&chip->leds[i].cdev);
1016 cancel_work_sync(&chip->leds[i].brightness_work);
1017 }
1018
1019 if (chip->pdata->enable)
1020 chip->pdata->enable(0);
1021 if (chip->pdata->release_resources)
1022 chip->pdata->release_resources();
1023 kfree(chip);
1024 return 0;
1025}
1026
1027static const struct i2c_device_id lp5523_id[] = {
1028 { "lp5523", 0 },
1029 { }
1030};
1031
1032MODULE_DEVICE_TABLE(i2c, lp5523_id);
1033
1034static struct i2c_driver lp5523_driver = {
1035 .driver = {
1036 .name = "lp5523",
1037 },
1038 .probe = lp5523_probe,
1039 .remove = lp5523_remove,
1040 .id_table = lp5523_id,
1041};
1042
1043static int __init lp5523_init(void)
1044{
1045 int ret;
1046
1047 ret = i2c_add_driver(&lp5523_driver);
1048
1049 if (ret < 0)
1050 printk(KERN_ALERT "Adding lp5523 driver failed\n");
1051
1052 return ret;
1053}
1054
1055static void __exit lp5523_exit(void)
1056{
1057 i2c_del_driver(&lp5523_driver);
1058}
1059
1060module_init(lp5523_init);
1061module_exit(lp5523_exit);
1062
1063MODULE_AUTHOR("Mathias Nyman <mathias.nyman@nokia.com>");
1064MODULE_DESCRIPTION("LP5523 LED engine");
1065MODULE_LICENSE("GPL");
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index 82b77bd482ff..b09bcbeade9c 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -12,73 +12,25 @@
12 */ 12 */
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/jiffies.h>
16#include <linux/kernel.h> 15#include <linux/kernel.h>
17#include <linux/init.h> 16#include <linux/init.h>
18#include <linux/list.h>
19#include <linux/spinlock.h>
20#include <linux/device.h> 17#include <linux/device.h>
21#include <linux/sysdev.h>
22#include <linux/timer.h>
23#include <linux/ctype.h> 18#include <linux/ctype.h>
24#include <linux/leds.h> 19#include <linux/leds.h>
25#include <linux/slab.h>
26#include "leds.h" 20#include "leds.h"
27 21
28struct timer_trig_data {
29 int brightness_on; /* LED brightness during "on" period.
30 * (LED_OFF < brightness_on <= LED_FULL)
31 */
32 unsigned long delay_on; /* milliseconds on */
33 unsigned long delay_off; /* milliseconds off */
34 struct timer_list timer;
35};
36
37static void led_timer_function(unsigned long data)
38{
39 struct led_classdev *led_cdev = (struct led_classdev *) data;
40 struct timer_trig_data *timer_data = led_cdev->trigger_data;
41 unsigned long brightness;
42 unsigned long delay;
43
44 if (!timer_data->delay_on || !timer_data->delay_off) {
45 led_set_brightness(led_cdev, LED_OFF);
46 return;
47 }
48
49 brightness = led_get_brightness(led_cdev);
50 if (!brightness) {
51 /* Time to switch the LED on. */
52 brightness = timer_data->brightness_on;
53 delay = timer_data->delay_on;
54 } else {
55 /* Store the current brightness value to be able
56 * to restore it when the delay_off period is over.
57 */
58 timer_data->brightness_on = brightness;
59 brightness = LED_OFF;
60 delay = timer_data->delay_off;
61 }
62
63 led_set_brightness(led_cdev, brightness);
64
65 mod_timer(&timer_data->timer, jiffies + msecs_to_jiffies(delay));
66}
67
68static ssize_t led_delay_on_show(struct device *dev, 22static ssize_t led_delay_on_show(struct device *dev,
69 struct device_attribute *attr, char *buf) 23 struct device_attribute *attr, char *buf)
70{ 24{
71 struct led_classdev *led_cdev = dev_get_drvdata(dev); 25 struct led_classdev *led_cdev = dev_get_drvdata(dev);
72 struct timer_trig_data *timer_data = led_cdev->trigger_data;
73 26
74 return sprintf(buf, "%lu\n", timer_data->delay_on); 27 return sprintf(buf, "%lu\n", led_cdev->blink_delay_on);
75} 28}
76 29
77static ssize_t led_delay_on_store(struct device *dev, 30static ssize_t led_delay_on_store(struct device *dev,
78 struct device_attribute *attr, const char *buf, size_t size) 31 struct device_attribute *attr, const char *buf, size_t size)
79{ 32{
80 struct led_classdev *led_cdev = dev_get_drvdata(dev); 33 struct led_classdev *led_cdev = dev_get_drvdata(dev);
81 struct timer_trig_data *timer_data = led_cdev->trigger_data;
82 int ret = -EINVAL; 34 int ret = -EINVAL;
83 char *after; 35 char *after;
84 unsigned long state = simple_strtoul(buf, &after, 10); 36 unsigned long state = simple_strtoul(buf, &after, 10);
@@ -88,21 +40,7 @@ static ssize_t led_delay_on_store(struct device *dev,
88 count++; 40 count++;
89 41
90 if (count == size) { 42 if (count == size) {
91 if (timer_data->delay_on != state) { 43 led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
92 /* the new value differs from the previous */
93 timer_data->delay_on = state;
94
95 /* deactivate previous settings */
96 del_timer_sync(&timer_data->timer);
97
98 /* try to activate hardware acceleration, if any */
99 if (!led_cdev->blink_set ||
100 led_cdev->blink_set(led_cdev,
101 &timer_data->delay_on, &timer_data->delay_off)) {
102 /* no hardware acceleration, blink via timer */
103 mod_timer(&timer_data->timer, jiffies + 1);
104 }
105 }
106 ret = count; 44 ret = count;
107 } 45 }
108 46
@@ -113,16 +51,14 @@ static ssize_t led_delay_off_show(struct device *dev,
113 struct device_attribute *attr, char *buf) 51 struct device_attribute *attr, char *buf)
114{ 52{
115 struct led_classdev *led_cdev = dev_get_drvdata(dev); 53 struct led_classdev *led_cdev = dev_get_drvdata(dev);
116 struct timer_trig_data *timer_data = led_cdev->trigger_data;
117 54
118 return sprintf(buf, "%lu\n", timer_data->delay_off); 55 return sprintf(buf, "%lu\n", led_cdev->blink_delay_off);
119} 56}
120 57
121static ssize_t led_delay_off_store(struct device *dev, 58static ssize_t led_delay_off_store(struct device *dev,
122 struct device_attribute *attr, const char *buf, size_t size) 59 struct device_attribute *attr, const char *buf, size_t size)
123{ 60{
124 struct led_classdev *led_cdev = dev_get_drvdata(dev); 61 struct led_classdev *led_cdev = dev_get_drvdata(dev);
125 struct timer_trig_data *timer_data = led_cdev->trigger_data;
126 int ret = -EINVAL; 62 int ret = -EINVAL;
127 char *after; 63 char *after;
128 unsigned long state = simple_strtoul(buf, &after, 10); 64 unsigned long state = simple_strtoul(buf, &after, 10);
@@ -132,21 +68,7 @@ static ssize_t led_delay_off_store(struct device *dev,
132 count++; 68 count++;
133 69
134 if (count == size) { 70 if (count == size) {
135 if (timer_data->delay_off != state) { 71 led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
136 /* the new value differs from the previous */
137 timer_data->delay_off = state;
138
139 /* deactivate previous settings */
140 del_timer_sync(&timer_data->timer);
141
142 /* try to activate hardware acceleration, if any */
143 if (!led_cdev->blink_set ||
144 led_cdev->blink_set(led_cdev,
145 &timer_data->delay_on, &timer_data->delay_off)) {
146 /* no hardware acceleration, blink via timer */
147 mod_timer(&timer_data->timer, jiffies + 1);
148 }
149 }
150 ret = count; 72 ret = count;
151 } 73 }
152 74
@@ -158,60 +80,34 @@ static DEVICE_ATTR(delay_off, 0644, led_delay_off_show, led_delay_off_store);
158 80
159static void timer_trig_activate(struct led_classdev *led_cdev) 81static void timer_trig_activate(struct led_classdev *led_cdev)
160{ 82{
161 struct timer_trig_data *timer_data;
162 int rc; 83 int rc;
163 84
164 timer_data = kzalloc(sizeof(struct timer_trig_data), GFP_KERNEL); 85 led_cdev->trigger_data = NULL;
165 if (!timer_data)
166 return;
167
168 timer_data->brightness_on = led_get_brightness(led_cdev);
169 if (timer_data->brightness_on == LED_OFF)
170 timer_data->brightness_on = led_cdev->max_brightness;
171 led_cdev->trigger_data = timer_data;
172
173 init_timer(&timer_data->timer);
174 timer_data->timer.function = led_timer_function;
175 timer_data->timer.data = (unsigned long) led_cdev;
176 86
177 rc = device_create_file(led_cdev->dev, &dev_attr_delay_on); 87 rc = device_create_file(led_cdev->dev, &dev_attr_delay_on);
178 if (rc) 88 if (rc)
179 goto err_out; 89 return;
180 rc = device_create_file(led_cdev->dev, &dev_attr_delay_off); 90 rc = device_create_file(led_cdev->dev, &dev_attr_delay_off);
181 if (rc) 91 if (rc)
182 goto err_out_delayon; 92 goto err_out_delayon;
183 93
184 /* If there is hardware support for blinking, start one 94 led_cdev->trigger_data = (void *)1;
185 * user friendly blink rate chosen by the driver.
186 */
187 if (led_cdev->blink_set)
188 led_cdev->blink_set(led_cdev,
189 &timer_data->delay_on, &timer_data->delay_off);
190 95
191 return; 96 return;
192 97
193err_out_delayon: 98err_out_delayon:
194 device_remove_file(led_cdev->dev, &dev_attr_delay_on); 99 device_remove_file(led_cdev->dev, &dev_attr_delay_on);
195err_out:
196 led_cdev->trigger_data = NULL;
197 kfree(timer_data);
198} 100}
199 101
200static void timer_trig_deactivate(struct led_classdev *led_cdev) 102static void timer_trig_deactivate(struct led_classdev *led_cdev)
201{ 103{
202 struct timer_trig_data *timer_data = led_cdev->trigger_data; 104 if (led_cdev->trigger_data) {
203 unsigned long on = 0, off = 0;
204
205 if (timer_data) {
206 device_remove_file(led_cdev->dev, &dev_attr_delay_on); 105 device_remove_file(led_cdev->dev, &dev_attr_delay_on);
207 device_remove_file(led_cdev->dev, &dev_attr_delay_off); 106 device_remove_file(led_cdev->dev, &dev_attr_delay_off);
208 del_timer_sync(&timer_data->timer);
209 kfree(timer_data);
210 } 107 }
211 108
212 /* If there is hardware support for blinking, stop it */ 109 /* Stop blinking */
213 if (led_cdev->blink_set) 110 led_brightness_set(led_cdev, LED_OFF);
214 led_cdev->blink_set(led_cdev, &on, &off);
215} 111}
216 112
217static struct led_trigger timer_led_trigger = { 113static struct led_trigger timer_led_trigger = {
diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c
index 444696625171..f5f4da3d0b67 100644
--- a/drivers/macintosh/adb-iop.c
+++ b/drivers/macintosh/adb-iop.c
@@ -80,7 +80,7 @@ static void adb_iop_end_req(struct adb_request *req, int state)
80static void adb_iop_complete(struct iop_msg *msg) 80static void adb_iop_complete(struct iop_msg *msg)
81{ 81{
82 struct adb_request *req; 82 struct adb_request *req;
83 uint flags; 83 unsigned long flags;
84 84
85 local_irq_save(flags); 85 local_irq_save(flags);
86 86
@@ -103,7 +103,7 @@ static void adb_iop_listen(struct iop_msg *msg)
103{ 103{
104 struct adb_iopmsg *amsg = (struct adb_iopmsg *) msg->message; 104 struct adb_iopmsg *amsg = (struct adb_iopmsg *) msg->message;
105 struct adb_request *req; 105 struct adb_request *req;
106 uint flags; 106 unsigned long flags;
107#ifdef DEBUG_ADB_IOP 107#ifdef DEBUG_ADB_IOP
108 int i; 108 int i;
109#endif 109#endif
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4e957f3140a8..324a3663fcda 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -706,7 +706,7 @@ static struct mdk_personality *find_pers(int level, char *clevel)
706/* return the offset of the super block in 512byte sectors */ 706/* return the offset of the super block in 512byte sectors */
707static inline sector_t calc_dev_sboffset(struct block_device *bdev) 707static inline sector_t calc_dev_sboffset(struct block_device *bdev)
708{ 708{
709 sector_t num_sectors = bdev->bd_inode->i_size / 512; 709 sector_t num_sectors = i_size_read(bdev->bd_inode) / 512;
710 return MD_NEW_SIZE_SECTORS(num_sectors); 710 return MD_NEW_SIZE_SECTORS(num_sectors);
711} 711}
712 712
@@ -1386,7 +1386,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1386 */ 1386 */
1387 switch(minor_version) { 1387 switch(minor_version) {
1388 case 0: 1388 case 0:
1389 sb_start = rdev->bdev->bd_inode->i_size >> 9; 1389 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1390 sb_start -= 8*2; 1390 sb_start -= 8*2;
1391 sb_start &= ~(sector_t)(4*2-1); 1391 sb_start &= ~(sector_t)(4*2-1);
1392 break; 1392 break;
@@ -1472,7 +1472,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
1472 ret = 0; 1472 ret = 0;
1473 } 1473 }
1474 if (minor_version) 1474 if (minor_version)
1475 rdev->sectors = (rdev->bdev->bd_inode->i_size >> 9) - 1475 rdev->sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
1476 le64_to_cpu(sb->data_offset); 1476 le64_to_cpu(sb->data_offset);
1477 else 1477 else
1478 rdev->sectors = rdev->sb_start; 1478 rdev->sectors = rdev->sb_start;
@@ -1680,7 +1680,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1680 return 0; /* component must fit device */ 1680 return 0; /* component must fit device */
1681 if (rdev->sb_start < rdev->data_offset) { 1681 if (rdev->sb_start < rdev->data_offset) {
1682 /* minor versions 1 and 2; superblock before data */ 1682 /* minor versions 1 and 2; superblock before data */
1683 max_sectors = rdev->bdev->bd_inode->i_size >> 9; 1683 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
1684 max_sectors -= rdev->data_offset; 1684 max_sectors -= rdev->data_offset;
1685 if (!num_sectors || num_sectors > max_sectors) 1685 if (!num_sectors || num_sectors > max_sectors)
1686 num_sectors = max_sectors; 1686 num_sectors = max_sectors;
@@ -1690,7 +1690,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1690 } else { 1690 } else {
1691 /* minor version 0; superblock after data */ 1691 /* minor version 0; superblock after data */
1692 sector_t sb_start; 1692 sector_t sb_start;
1693 sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2; 1693 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
1694 sb_start &= ~(sector_t)(4*2 - 1); 1694 sb_start &= ~(sector_t)(4*2 - 1);
1695 max_sectors = rdev->sectors + sb_start - rdev->sb_start; 1695 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1696 if (!num_sectors || num_sectors > max_sectors) 1696 if (!num_sectors || num_sectors > max_sectors)
@@ -2584,7 +2584,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2584 if (!sectors) 2584 if (!sectors)
2585 return -EBUSY; 2585 return -EBUSY;
2586 } else if (!sectors) 2586 } else if (!sectors)
2587 sectors = (rdev->bdev->bd_inode->i_size >> 9) - 2587 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
2588 rdev->data_offset; 2588 rdev->data_offset;
2589 } 2589 }
2590 if (sectors < my_mddev->dev_sectors) 2590 if (sectors < my_mddev->dev_sectors)
@@ -2797,7 +2797,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
2797 2797
2798 kobject_init(&rdev->kobj, &rdev_ktype); 2798 kobject_init(&rdev->kobj, &rdev_ktype);
2799 2799
2800 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 2800 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
2801 if (!size) { 2801 if (!size) {
2802 printk(KERN_WARNING 2802 printk(KERN_WARNING
2803 "md: %s has zero or unknown size, marking faulty!\n", 2803 "md: %s has zero or unknown size, marking faulty!\n",
@@ -5235,8 +5235,8 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
5235 5235
5236 if (!mddev->persistent) { 5236 if (!mddev->persistent) {
5237 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 5237 printk(KERN_INFO "md: nonpersistent superblock ...\n");
5238 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; 5238 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
5239 } else 5239 } else
5240 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 5240 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
5241 rdev->sectors = rdev->sb_start; 5241 rdev->sectors = rdev->sb_start;
5242 5242
@@ -5306,7 +5306,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
5306 if (mddev->persistent) 5306 if (mddev->persistent)
5307 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 5307 rdev->sb_start = calc_dev_sboffset(rdev->bdev);
5308 else 5308 else
5309 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; 5309 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
5310 5310
5311 rdev->sectors = rdev->sb_start; 5311 rdev->sectors = rdev->sb_start;
5312 5312
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c
index f9b91ba8900c..0ed09358027e 100644
--- a/drivers/misc/apds9802als.c
+++ b/drivers/misc/apds9802als.c
@@ -123,7 +123,7 @@ static ssize_t als_sensing_range_store(struct device *dev,
123{ 123{
124 struct i2c_client *client = to_i2c_client(dev); 124 struct i2c_client *client = to_i2c_client(dev);
125 struct als_data *data = i2c_get_clientdata(client); 125 struct als_data *data = i2c_get_clientdata(client);
126 unsigned int ret_val; 126 int ret_val;
127 unsigned long val; 127 unsigned long val;
128 128
129 if (strict_strtoul(buf, 10, &val)) 129 if (strict_strtoul(buf, 10, &val))
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index cee632e645e1..d79a972f2c79 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -649,7 +649,7 @@ static ssize_t bh1770_power_state_store(struct device *dev,
649{ 649{
650 struct bh1770_chip *chip = dev_get_drvdata(dev); 650 struct bh1770_chip *chip = dev_get_drvdata(dev);
651 unsigned long value; 651 unsigned long value;
652 size_t ret; 652 ssize_t ret;
653 653
654 if (strict_strtoul(buf, 0, &value)) 654 if (strict_strtoul(buf, 0, &value))
655 return -EINVAL; 655 return -EINVAL;
@@ -659,8 +659,12 @@ static ssize_t bh1770_power_state_store(struct device *dev,
659 pm_runtime_get_sync(dev); 659 pm_runtime_get_sync(dev);
660 660
661 ret = bh1770_lux_rate(chip, chip->lux_rate_index); 661 ret = bh1770_lux_rate(chip, chip->lux_rate_index);
662 ret |= bh1770_lux_interrupt_control(chip, BH1770_ENABLE); 662 if (ret < 0) {
663 pm_runtime_put(dev);
664 goto leave;
665 }
663 666
667 ret = bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
664 if (ret < 0) { 668 if (ret < 0) {
665 pm_runtime_put(dev); 669 pm_runtime_put(dev);
666 goto leave; 670 goto leave;
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
index 34fe835921c4..ca47e6285075 100644
--- a/drivers/misc/isl29020.c
+++ b/drivers/misc/isl29020.c
@@ -87,7 +87,7 @@ static ssize_t als_sensing_range_store(struct device *dev,
87 struct device_attribute *attr, const char *buf, size_t count) 87 struct device_attribute *attr, const char *buf, size_t count)
88{ 88{
89 struct i2c_client *client = to_i2c_client(dev); 89 struct i2c_client *client = to_i2c_client(dev);
90 unsigned int ret_val; 90 int ret_val;
91 unsigned long val; 91 unsigned long val;
92 92
93 if (strict_strtoul(buf, 10, &val)) 93 if (strict_strtoul(buf, 10, &val))
@@ -106,6 +106,8 @@ static ssize_t als_sensing_range_store(struct device *dev,
106 val = 4; 106 val = 4;
107 107
108 ret_val = i2c_smbus_read_byte_data(client, 0x00); 108 ret_val = i2c_smbus_read_byte_data(client, 0x00);
109 if (ret_val < 0)
110 return ret_val;
109 111
110 ret_val &= 0xFC; /*reset the bit before setting them */ 112 ret_val &= 0xFC; /*reset the bit before setting them */
111 ret_val |= val - 1; 113 ret_val |= val - 1;
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 459614d2d7bc..94d5f59d5a6f 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -1680,7 +1680,7 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1680 rc = XMIT_PLAIN; 1680 rc = XMIT_PLAIN;
1681 1681
1682 else { 1682 else {
1683 if (skb->protocol == htons(ETH_P_IPV6)) { 1683 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
1684 rc = XMIT_CSUM_V6; 1684 rc = XMIT_CSUM_V6;
1685 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 1685 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1686 rc |= XMIT_CSUM_TCP; 1686 rc |= XMIT_CSUM_TCP;
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 6de5e2e448a5..c3449bbc585a 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -753,7 +753,9 @@ static int cxgb4vf_open(struct net_device *dev)
753 if (err) 753 if (err)
754 return err; 754 return err;
755 set_bit(pi->port_id, &adapter->open_device_map); 755 set_bit(pi->port_id, &adapter->open_device_map);
756 link_start(dev); 756 err = link_start(dev);
757 if (err)
758 return err;
757 netif_tx_start_all_queues(dev); 759 netif_tx_start_all_queues(dev);
758 return 0; 760 return 0;
759} 761}
@@ -1103,18 +1105,6 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
1103 return 0; 1105 return 0;
1104} 1106}
1105 1107
1106/*
1107 * Return a TX Queue on which to send the specified skb.
1108 */
1109static u16 cxgb4vf_select_queue(struct net_device *dev, struct sk_buff *skb)
1110{
1111 /*
1112 * XXX For now just use the default hash but we probably want to
1113 * XXX look at other possibilities ...
1114 */
1115 return skb_tx_hash(dev, skb);
1116}
1117
1118#ifdef CONFIG_NET_POLL_CONTROLLER 1108#ifdef CONFIG_NET_POLL_CONTROLLER
1119/* 1109/*
1120 * Poll all of our receive queues. This is called outside of normal interrupt 1110 * Poll all of our receive queues. This is called outside of normal interrupt
@@ -2075,6 +2065,22 @@ static int adap_init0(struct adapter *adapter)
2075 } 2065 }
2076 2066
2077 /* 2067 /*
2068 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2069 * 2.6.31 and later we can't call pci_reset_function() in order to
2070 * issue an FLR because of a self- deadlock on the device semaphore.
2071 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2072 * cases where they're needed -- for instance, some versions of KVM
2073 * fail to reset "Assigned Devices" when the VM reboots. Therefore we
2074 * use the firmware based reset in order to reset any per function
2075 * state.
2076 */
2077 err = t4vf_fw_reset(adapter);
2078 if (err < 0) {
2079 dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
2080 return err;
2081 }
2082
2083 /*
2078 * Grab basic operational parameters. These will predominantly have 2084 * Grab basic operational parameters. These will predominantly have
2079 * been set up by the Physical Function Driver or will be hard coded 2085 * been set up by the Physical Function Driver or will be hard coded
2080 * into the adapter. We just have to live with them ... Note that 2086 * into the adapter. We just have to live with them ... Note that
@@ -2417,7 +2423,6 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
2417 .ndo_get_stats = cxgb4vf_get_stats, 2423 .ndo_get_stats = cxgb4vf_get_stats,
2418 .ndo_set_rx_mode = cxgb4vf_set_rxmode, 2424 .ndo_set_rx_mode = cxgb4vf_set_rxmode,
2419 .ndo_set_mac_address = cxgb4vf_set_mac_addr, 2425 .ndo_set_mac_address = cxgb4vf_set_mac_addr,
2420 .ndo_select_queue = cxgb4vf_select_queue,
2421 .ndo_validate_addr = eth_validate_addr, 2426 .ndo_validate_addr = eth_validate_addr,
2422 .ndo_do_ioctl = cxgb4vf_do_ioctl, 2427 .ndo_do_ioctl = cxgb4vf_do_ioctl,
2423 .ndo_change_mtu = cxgb4vf_change_mtu, 2428 .ndo_change_mtu = cxgb4vf_change_mtu,
@@ -2624,7 +2629,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2624 netdev->do_ioctl = cxgb4vf_do_ioctl; 2629 netdev->do_ioctl = cxgb4vf_do_ioctl;
2625 netdev->change_mtu = cxgb4vf_change_mtu; 2630 netdev->change_mtu = cxgb4vf_change_mtu;
2626 netdev->set_mac_address = cxgb4vf_set_mac_addr; 2631 netdev->set_mac_address = cxgb4vf_set_mac_addr;
2627 netdev->select_queue = cxgb4vf_select_queue;
2628#ifdef CONFIG_NET_POLL_CONTROLLER 2632#ifdef CONFIG_NET_POLL_CONTROLLER
2629 netdev->poll_controller = cxgb4vf_poll_controller; 2633 netdev->poll_controller = cxgb4vf_poll_controller;
2630#endif 2634#endif
@@ -2843,6 +2847,14 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = {
2843 CH_DEVICE(0x4800, 0), /* T440-dbg */ 2847 CH_DEVICE(0x4800, 0), /* T440-dbg */
2844 CH_DEVICE(0x4801, 0), /* T420-cr */ 2848 CH_DEVICE(0x4801, 0), /* T420-cr */
2845 CH_DEVICE(0x4802, 0), /* T422-cr */ 2849 CH_DEVICE(0x4802, 0), /* T422-cr */
2850 CH_DEVICE(0x4803, 0), /* T440-cr */
2851 CH_DEVICE(0x4804, 0), /* T420-bch */
2852 CH_DEVICE(0x4805, 0), /* T440-bch */
2853 CH_DEVICE(0x4806, 0), /* T460-ch */
2854 CH_DEVICE(0x4807, 0), /* T420-so */
2855 CH_DEVICE(0x4808, 0), /* T420-cx */
2856 CH_DEVICE(0x4809, 0), /* T420-bt */
2857 CH_DEVICE(0x480a, 0), /* T404-bt */
2846 { 0, } 2858 { 0, }
2847}; 2859};
2848 2860
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index f10864ddafbe..ecf0770bf0ff 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -154,13 +154,14 @@ enum {
154 */ 154 */
155 RX_COPY_THRES = 256, 155 RX_COPY_THRES = 256,
156 RX_PULL_LEN = 128, 156 RX_PULL_LEN = 128,
157};
158 157
159/* 158 /*
160 * Can't define this in the above enum because PKTSHIFT isn't a constant in 159 * Main body length for sk_buffs used for RX Ethernet packets with
161 * the VF Driver ... 160 * fragments. Should be >= RX_PULL_LEN but possibly bigger to give
162 */ 161 * pskb_may_pull() some room.
163#define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT) 162 */
163 RX_SKB_LEN = 512,
164};
164 165
165/* 166/*
166 * Software state per TX descriptor. 167 * Software state per TX descriptor.
@@ -1355,6 +1356,67 @@ out_free:
1355} 1356}
1356 1357
1357/** 1358/**
1359 * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
1360 * @gl: the gather list
1361 * @skb_len: size of sk_buff main body if it carries fragments
1362 * @pull_len: amount of data to move to the sk_buff's main body
1363 *
1364 * Builds an sk_buff from the given packet gather list. Returns the
1365 * sk_buff or %NULL if sk_buff allocation failed.
1366 */
1367struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
1368 unsigned int skb_len, unsigned int pull_len)
1369{
1370 struct sk_buff *skb;
1371 struct skb_shared_info *ssi;
1372
1373 /*
1374 * If the ingress packet is small enough, allocate an skb large enough
1375 * for all of the data and copy it inline. Otherwise, allocate an skb
1376 * with enough room to pull in the header and reference the rest of
1377 * the data via the skb fragment list.
1378 *
1379 * Below we rely on RX_COPY_THRES being less than the smallest Rx
1380 * buff! size, which is expected since buffers are at least
1381 * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one
1382 * fragment.
1383 */
1384 if (gl->tot_len <= RX_COPY_THRES) {
1385 /* small packets have only one fragment */
1386 skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
1387 if (unlikely(!skb))
1388 goto out;
1389 __skb_put(skb, gl->tot_len);
1390 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1391 } else {
1392 skb = alloc_skb(skb_len, GFP_ATOMIC);
1393 if (unlikely(!skb))
1394 goto out;
1395 __skb_put(skb, pull_len);
1396 skb_copy_to_linear_data(skb, gl->va, pull_len);
1397
1398 ssi = skb_shinfo(skb);
1399 ssi->frags[0].page = gl->frags[0].page;
1400 ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
1401 ssi->frags[0].size = gl->frags[0].size - pull_len;
1402 if (gl->nfrags > 1)
1403 memcpy(&ssi->frags[1], &gl->frags[1],
1404 (gl->nfrags-1) * sizeof(skb_frag_t));
1405 ssi->nr_frags = gl->nfrags;
1406
1407 skb->len = gl->tot_len;
1408 skb->data_len = skb->len - pull_len;
1409 skb->truesize += skb->data_len;
1410
1411 /* Get a reference for the last page, we don't own it */
1412 get_page(gl->frags[gl->nfrags - 1].page);
1413 }
1414
1415out:
1416 return skb;
1417}
1418
1419/**
1358 * t4vf_pktgl_free - free a packet gather list 1420 * t4vf_pktgl_free - free a packet gather list
1359 * @gl: the gather list 1421 * @gl: the gather list
1360 * 1422 *
@@ -1463,10 +1525,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1463{ 1525{
1464 struct sk_buff *skb; 1526 struct sk_buff *skb;
1465 struct port_info *pi; 1527 struct port_info *pi;
1466 struct skb_shared_info *ssi;
1467 const struct cpl_rx_pkt *pkt = (void *)&rsp[1]; 1528 const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
1468 bool csum_ok = pkt->csum_calc && !pkt->err_vec; 1529 bool csum_ok = pkt->csum_calc && !pkt->err_vec;
1469 unsigned int len = be16_to_cpu(pkt->len);
1470 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1530 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1471 1531
1472 /* 1532 /*
@@ -1481,42 +1541,14 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1481 } 1541 }
1482 1542
1483 /* 1543 /*
1484 * If the ingress packet is small enough, allocate an skb large enough 1544 * Convert the Packet Gather List into an skb.
1485 * for all of the data and copy it inline. Otherwise, allocate an skb
1486 * with enough room to pull in the header and reference the rest of
1487 * the data via the skb fragment list.
1488 */ 1545 */
1489 if (len <= RX_COPY_THRES) { 1546 skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
1490 /* small packets have only one fragment */ 1547 if (unlikely(!skb)) {
1491 skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC); 1548 t4vf_pktgl_free(gl);
1492 if (!skb) 1549 rxq->stats.rx_drops++;
1493 goto nomem; 1550 return 0;
1494 __skb_put(skb, gl->frags[0].size);
1495 skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size);
1496 } else {
1497 skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC);
1498 if (!skb)
1499 goto nomem;
1500 __skb_put(skb, RX_PKT_PULL_LEN);
1501 skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN);
1502
1503 ssi = skb_shinfo(skb);
1504 ssi->frags[0].page = gl->frags[0].page;
1505 ssi->frags[0].page_offset = (gl->frags[0].page_offset +
1506 RX_PKT_PULL_LEN);
1507 ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN;
1508 if (gl->nfrags > 1)
1509 memcpy(&ssi->frags[1], &gl->frags[1],
1510 (gl->nfrags-1) * sizeof(skb_frag_t));
1511 ssi->nr_frags = gl->nfrags;
1512 skb->len = len + PKTSHIFT;
1513 skb->data_len = skb->len - RX_PKT_PULL_LEN;
1514 skb->truesize += skb->data_len;
1515
1516 /* Get a reference for the last page, we don't own it */
1517 get_page(gl->frags[gl->nfrags - 1].page);
1518 } 1551 }
1519
1520 __skb_pull(skb, PKTSHIFT); 1552 __skb_pull(skb, PKTSHIFT);
1521 skb->protocol = eth_type_trans(skb, rspq->netdev); 1553 skb->protocol = eth_type_trans(skb, rspq->netdev);
1522 skb_record_rx_queue(skb, rspq->idx); 1554 skb_record_rx_queue(skb, rspq->idx);
@@ -1549,11 +1581,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1549 netif_receive_skb(skb); 1581 netif_receive_skb(skb);
1550 1582
1551 return 0; 1583 return 0;
1552
1553nomem:
1554 t4vf_pktgl_free(gl);
1555 rxq->stats.rx_drops++;
1556 return 0;
1557} 1584}
1558 1585
1559/** 1586/**
@@ -1679,6 +1706,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
1679 } 1706 }
1680 len = RSPD_LEN(len); 1707 len = RSPD_LEN(len);
1681 } 1708 }
1709 gl.tot_len = len;
1682 1710
1683 /* 1711 /*
1684 * Gather packet fragments. 1712 * Gather packet fragments.
diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/cxgb4vf/t4vf_common.h
index 873cb7d86c57..a65c80aed1f2 100644
--- a/drivers/net/cxgb4vf/t4vf_common.h
+++ b/drivers/net/cxgb4vf/t4vf_common.h
@@ -235,6 +235,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
235int __devinit t4vf_wait_dev_ready(struct adapter *); 235int __devinit t4vf_wait_dev_ready(struct adapter *);
236int __devinit t4vf_port_init(struct adapter *, int); 236int __devinit t4vf_port_init(struct adapter *, int);
237 237
238int t4vf_fw_reset(struct adapter *);
238int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *); 239int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
239int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *); 240int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
240 241
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index ea1c123f0cb4..e306c20dfaee 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -326,6 +326,25 @@ int __devinit t4vf_port_init(struct adapter *adapter, int pidx)
326} 326}
327 327
328/** 328/**
329 * t4vf_fw_reset - issue a reset to FW
330 * @adapter: the adapter
331 *
332 * Issues a reset command to FW. For a Physical Function this would
333 * result in the Firmware reseting all of its state. For a Virtual
334 * Function this just resets the state associated with the VF.
335 */
336int t4vf_fw_reset(struct adapter *adapter)
337{
338 struct fw_reset_cmd cmd;
339
340 memset(&cmd, 0, sizeof(cmd));
341 cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) |
342 FW_CMD_WRITE);
343 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
344 return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
345}
346
347/**
329 * t4vf_query_params - query FW or device parameters 348 * t4vf_query_params - query FW or device parameters
330 * @adapter: the adapter 349 * @adapter: the adapter
331 * @nparams: the number of parameters 350 * @nparams: the number of parameters
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 5c566ebc54b8..3bc8e276ba4d 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -635,9 +635,10 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
635 if (wol->wolopts & ~WAKE_MAGIC) 635 if (wol->wolopts & ~WAKE_MAGIC)
636 return -EINVAL; 636 return -EINVAL;
637 637
638 device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
639
638 spin_lock_irqsave(&priv->bflock, flags); 640 spin_lock_irqsave(&priv->bflock, flags);
639 priv->wol_en = wol->wolopts & WAKE_MAGIC ? 1 : 0; 641 priv->wol_en = !!device_may_wakeup(&dev->dev);
640 device_set_wakeup_enable(&dev->dev, priv->wol_en);
641 spin_unlock_irqrestore(&priv->bflock, flags); 642 spin_unlock_irqrestore(&priv->bflock, flags);
642 643
643 return 0; 644 return 0;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 2bd3eb4ee5a1..fbad4d819608 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -764,8 +764,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
764#ifdef IXGBE_FCOE 764#ifdef IXGBE_FCOE
765 /* adjust for FCoE Sequence Offload */ 765 /* adjust for FCoE Sequence Offload */
766 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 766 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
767 && (skb->protocol == htons(ETH_P_FCOE)) && 767 && skb_is_gso(skb)
768 skb_is_gso(skb)) { 768 && vlan_get_protocol(skb) ==
769 htons(ETH_P_FCOE)) {
769 hlen = skb_transport_offset(skb) + 770 hlen = skb_transport_offset(skb) +
770 sizeof(struct fc_frame_header) + 771 sizeof(struct fc_frame_header) +
771 sizeof(struct fcoe_crc_eof); 772 sizeof(struct fcoe_crc_eof);
@@ -5823,7 +5824,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5823 5824
5824static int ixgbe_tso(struct ixgbe_adapter *adapter, 5825static int ixgbe_tso(struct ixgbe_adapter *adapter,
5825 struct ixgbe_ring *tx_ring, struct sk_buff *skb, 5826 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
5826 u32 tx_flags, u8 *hdr_len) 5827 u32 tx_flags, u8 *hdr_len, __be16 protocol)
5827{ 5828{
5828 struct ixgbe_adv_tx_context_desc *context_desc; 5829 struct ixgbe_adv_tx_context_desc *context_desc;
5829 unsigned int i; 5830 unsigned int i;
@@ -5841,7 +5842,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5841 l4len = tcp_hdrlen(skb); 5842 l4len = tcp_hdrlen(skb);
5842 *hdr_len += l4len; 5843 *hdr_len += l4len;
5843 5844
5844 if (skb->protocol == htons(ETH_P_IP)) { 5845 if (protocol == htons(ETH_P_IP)) {
5845 struct iphdr *iph = ip_hdr(skb); 5846 struct iphdr *iph = ip_hdr(skb);
5846 iph->tot_len = 0; 5847 iph->tot_len = 0;
5847 iph->check = 0; 5848 iph->check = 0;
@@ -5880,7 +5881,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5880 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | 5881 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
5881 IXGBE_ADVTXD_DTYP_CTXT); 5882 IXGBE_ADVTXD_DTYP_CTXT);
5882 5883
5883 if (skb->protocol == htons(ETH_P_IP)) 5884 if (protocol == htons(ETH_P_IP))
5884 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 5885 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
5885 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 5886 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5886 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 5887 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
@@ -5906,16 +5907,10 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5906 return false; 5907 return false;
5907} 5908}
5908 5909
5909static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb) 5910static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5911 __be16 protocol)
5910{ 5912{
5911 u32 rtn = 0; 5913 u32 rtn = 0;
5912 __be16 protocol;
5913
5914 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
5915 protocol = ((const struct vlan_ethhdr *)skb->data)->
5916 h_vlan_encapsulated_proto;
5917 else
5918 protocol = skb->protocol;
5919 5914
5920 switch (protocol) { 5915 switch (protocol) {
5921 case cpu_to_be16(ETH_P_IP): 5916 case cpu_to_be16(ETH_P_IP):
@@ -5943,7 +5938,7 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
5943 default: 5938 default:
5944 if (unlikely(net_ratelimit())) 5939 if (unlikely(net_ratelimit()))
5945 e_warn(probe, "partial checksum but proto=%x!\n", 5940 e_warn(probe, "partial checksum but proto=%x!\n",
5946 skb->protocol); 5941 protocol);
5947 break; 5942 break;
5948 } 5943 }
5949 5944
@@ -5952,7 +5947,8 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
5952 5947
5953static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, 5948static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5954 struct ixgbe_ring *tx_ring, 5949 struct ixgbe_ring *tx_ring,
5955 struct sk_buff *skb, u32 tx_flags) 5950 struct sk_buff *skb, u32 tx_flags,
5951 __be16 protocol)
5956{ 5952{
5957 struct ixgbe_adv_tx_context_desc *context_desc; 5953 struct ixgbe_adv_tx_context_desc *context_desc;
5958 unsigned int i; 5954 unsigned int i;
@@ -5981,7 +5977,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5981 IXGBE_ADVTXD_DTYP_CTXT); 5977 IXGBE_ADVTXD_DTYP_CTXT);
5982 5978
5983 if (skb->ip_summed == CHECKSUM_PARTIAL) 5979 if (skb->ip_summed == CHECKSUM_PARTIAL)
5984 type_tucmd_mlhl |= ixgbe_psum(adapter, skb); 5980 type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
5985 5981
5986 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 5982 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
5987 /* use index zero for tx checksum offload */ 5983 /* use index zero for tx checksum offload */
@@ -6179,7 +6175,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6179} 6175}
6180 6176
6181static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, 6177static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6182 int queue, u32 tx_flags) 6178 int queue, u32 tx_flags, __be16 protocol)
6183{ 6179{
6184 struct ixgbe_atr_input atr_input; 6180 struct ixgbe_atr_input atr_input;
6185 struct tcphdr *th; 6181 struct tcphdr *th;
@@ -6190,7 +6186,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6190 u8 l4type = 0; 6186 u8 l4type = 0;
6191 6187
6192 /* Right now, we support IPv4 only */ 6188 /* Right now, we support IPv4 only */
6193 if (skb->protocol != htons(ETH_P_IP)) 6189 if (protocol != htons(ETH_P_IP))
6194 return; 6190 return;
6195 /* check if we're UDP or TCP */ 6191 /* check if we're UDP or TCP */
6196 if (iph->protocol == IPPROTO_TCP) { 6192 if (iph->protocol == IPPROTO_TCP) {
@@ -6257,10 +6253,13 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6257{ 6253{
6258 struct ixgbe_adapter *adapter = netdev_priv(dev); 6254 struct ixgbe_adapter *adapter = netdev_priv(dev);
6259 int txq = smp_processor_id(); 6255 int txq = smp_processor_id();
6260
6261#ifdef IXGBE_FCOE 6256#ifdef IXGBE_FCOE
6262 if ((skb->protocol == htons(ETH_P_FCOE)) || 6257 __be16 protocol;
6263 (skb->protocol == htons(ETH_P_FIP))) { 6258
6259 protocol = vlan_get_protocol(skb);
6260
6261 if ((protocol == htons(ETH_P_FCOE)) ||
6262 (protocol == htons(ETH_P_FIP))) {
6264 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 6263 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
6265 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); 6264 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
6266 txq += adapter->ring_feature[RING_F_FCOE].mask; 6265 txq += adapter->ring_feature[RING_F_FCOE].mask;
@@ -6303,6 +6302,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6303 int tso; 6302 int tso;
6304 int count = 0; 6303 int count = 0;
6305 unsigned int f; 6304 unsigned int f;
6305 __be16 protocol;
6306
6307 protocol = vlan_get_protocol(skb);
6306 6308
6307 if (vlan_tx_tag_present(skb)) { 6309 if (vlan_tx_tag_present(skb)) {
6308 tx_flags |= vlan_tx_tag_get(skb); 6310 tx_flags |= vlan_tx_tag_get(skb);
@@ -6323,8 +6325,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6323 /* for FCoE with DCB, we force the priority to what 6325 /* for FCoE with DCB, we force the priority to what
6324 * was specified by the switch */ 6326 * was specified by the switch */
6325 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && 6327 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
6326 (skb->protocol == htons(ETH_P_FCOE) || 6328 (protocol == htons(ETH_P_FCOE) ||
6327 skb->protocol == htons(ETH_P_FIP))) { 6329 protocol == htons(ETH_P_FIP))) {
6328#ifdef CONFIG_IXGBE_DCB 6330#ifdef CONFIG_IXGBE_DCB
6329 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 6331 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6330 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK 6332 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
@@ -6334,7 +6336,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6334 } 6336 }
6335#endif 6337#endif
6336 /* flag for FCoE offloads */ 6338 /* flag for FCoE offloads */
6337 if (skb->protocol == htons(ETH_P_FCOE)) 6339 if (protocol == htons(ETH_P_FCOE))
6338 tx_flags |= IXGBE_TX_FLAGS_FCOE; 6340 tx_flags |= IXGBE_TX_FLAGS_FCOE;
6339 } 6341 }
6340#endif 6342#endif
@@ -6368,9 +6370,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6368 tx_flags |= IXGBE_TX_FLAGS_FSO; 6370 tx_flags |= IXGBE_TX_FLAGS_FSO;
6369#endif /* IXGBE_FCOE */ 6371#endif /* IXGBE_FCOE */
6370 } else { 6372 } else {
6371 if (skb->protocol == htons(ETH_P_IP)) 6373 if (protocol == htons(ETH_P_IP))
6372 tx_flags |= IXGBE_TX_FLAGS_IPV4; 6374 tx_flags |= IXGBE_TX_FLAGS_IPV4;
6373 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); 6375 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
6376 protocol);
6374 if (tso < 0) { 6377 if (tso < 0) {
6375 dev_kfree_skb_any(skb); 6378 dev_kfree_skb_any(skb);
6376 return NETDEV_TX_OK; 6379 return NETDEV_TX_OK;
@@ -6378,7 +6381,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6378 6381
6379 if (tso) 6382 if (tso)
6380 tx_flags |= IXGBE_TX_FLAGS_TSO; 6383 tx_flags |= IXGBE_TX_FLAGS_TSO;
6381 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && 6384 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
6385 protocol) &&
6382 (skb->ip_summed == CHECKSUM_PARTIAL)) 6386 (skb->ip_summed == CHECKSUM_PARTIAL))
6383 tx_flags |= IXGBE_TX_FLAGS_CSUM; 6387 tx_flags |= IXGBE_TX_FLAGS_CSUM;
6384 } 6388 }
@@ -6392,7 +6396,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6392 test_bit(__IXGBE_FDIR_INIT_DONE, 6396 test_bit(__IXGBE_FDIR_INIT_DONE,
6393 &tx_ring->reinit_state)) { 6397 &tx_ring->reinit_state)) {
6394 ixgbe_atr(adapter, skb, tx_ring->queue_index, 6398 ixgbe_atr(adapter, skb, tx_ring->queue_index,
6395 tx_flags); 6399 tx_flags, protocol);
6396 tx_ring->atr_count = 0; 6400 tx_ring->atr_count = 0;
6397 } 6401 }
6398 } 6402 }
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index d2e166e29dda..8a4d19e5de06 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -111,13 +111,14 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id);
111 111
112typedef struct axnet_dev_t { 112typedef struct axnet_dev_t {
113 struct pcmcia_device *p_dev; 113 struct pcmcia_device *p_dev;
114 caddr_t base; 114 caddr_t base;
115 struct timer_list watchdog; 115 struct timer_list watchdog;
116 int stale, fast_poll; 116 int stale, fast_poll;
117 u_short link_status; 117 u_short link_status;
118 u_char duplex_flag; 118 u_char duplex_flag;
119 int phy_id; 119 int phy_id;
120 int flags; 120 int flags;
121 int active_low;
121} axnet_dev_t; 122} axnet_dev_t;
122 123
123static inline axnet_dev_t *PRIV(struct net_device *dev) 124static inline axnet_dev_t *PRIV(struct net_device *dev)
@@ -322,6 +323,8 @@ static int axnet_config(struct pcmcia_device *link)
322 if (info->flags & IS_AX88790) 323 if (info->flags & IS_AX88790)
323 outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */ 324 outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */
324 325
326 info->active_low = 0;
327
325 for (i = 0; i < 32; i++) { 328 for (i = 0; i < 32; i++) {
326 j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); 329 j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
327 j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2); 330 j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
@@ -329,15 +332,18 @@ static int axnet_config(struct pcmcia_device *link)
329 if ((j != 0) && (j != 0xffff)) break; 332 if ((j != 0) && (j != 0xffff)) break;
330 } 333 }
331 334
332 /* Maybe PHY is in power down mode. (PPD_SET = 1)
333 Bit 2 of CCSR is active low. */
334 if (i == 32) { 335 if (i == 32) {
336 /* Maybe PHY is in power down mode. (PPD_SET = 1)
337 Bit 2 of CCSR is active low. */
335 pcmcia_write_config_byte(link, CISREG_CCSR, 0x04); 338 pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
336 for (i = 0; i < 32; i++) { 339 for (i = 0; i < 32; i++) {
337 j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); 340 j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
338 j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2); 341 j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
339 if (j == j2) continue; 342 if (j == j2) continue;
340 if ((j != 0) && (j != 0xffff)) break; 343 if ((j != 0) && (j != 0xffff)) {
344 info->active_low = 1;
345 break;
346 }
341 } 347 }
342 } 348 }
343 349
@@ -383,8 +389,12 @@ static int axnet_suspend(struct pcmcia_device *link)
383static int axnet_resume(struct pcmcia_device *link) 389static int axnet_resume(struct pcmcia_device *link)
384{ 390{
385 struct net_device *dev = link->priv; 391 struct net_device *dev = link->priv;
392 axnet_dev_t *info = PRIV(dev);
386 393
387 if (link->open) { 394 if (link->open) {
395 if (info->active_low == 1)
396 pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
397
388 axnet_reset_8390(dev); 398 axnet_reset_8390(dev);
389 AX88190_init(dev, 1); 399 AX88190_init(dev, 1);
390 netif_device_attach(dev); 400 netif_device_attach(dev);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index d88ce9fb1cbd..4c4d16905efb 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -846,10 +846,10 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
846 else 846 else
847 tp->features &= ~RTL_FEATURE_WOL; 847 tp->features &= ~RTL_FEATURE_WOL;
848 __rtl8169_set_wol(tp, wol->wolopts); 848 __rtl8169_set_wol(tp, wol->wolopts);
849 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
850
851 spin_unlock_irq(&tp->lock); 849 spin_unlock_irq(&tp->lock);
852 850
851 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
852
853 return 0; 853 return 0;
854} 854}
855 855
@@ -2931,7 +2931,7 @@ static const struct rtl_cfg_info {
2931 .hw_start = rtl_hw_start_8168, 2931 .hw_start = rtl_hw_start_8168,
2932 .region = 2, 2932 .region = 2,
2933 .align = 8, 2933 .align = 8,
2934 .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow | 2934 .intr_event = SYSErr | LinkChg | RxOverflow |
2935 TxErr | TxOK | RxOK | RxErr, 2935 TxErr | TxOK | RxOK | RxErr,
2936 .napi_event = TxErr | TxOK | RxOK | RxOverflow, 2936 .napi_event = TxErr | TxOK | RxOK | RxOverflow,
2937 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, 2937 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
@@ -4588,7 +4588,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
4588 } 4588 }
4589 4589
4590 /* Work around for rx fifo overflow */ 4590 /* Work around for rx fifo overflow */
4591 if (unlikely(status & RxFIFOOver)) { 4591 if (unlikely(status & RxFIFOOver) &&
4592 (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
4592 netif_stop_queue(dev); 4593 netif_stop_queue(dev);
4593 rtl8169_tx_timeout(dev); 4594 rtl8169_tx_timeout(dev);
4594 break; 4595 break;
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index a4c3f5708246..acbdab3d66ca 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2050,12 +2050,16 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
2050 2050
2051 ugeth_vdbg("%s: IN", __func__); 2051 ugeth_vdbg("%s: IN", __func__);
2052 2052
2053 /*
2054 * Tell the kernel the link is down.
2055 * Must be done before disabling the controller
2056 * or deadlock may happen.
2057 */
2058 phy_stop(phydev);
2059
2053 /* Disable the controller */ 2060 /* Disable the controller */
2054 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); 2061 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
2055 2062
2056 /* Tell the kernel the link is down */
2057 phy_stop(phydev);
2058
2059 /* Mask all interrupts */ 2063 /* Mask all interrupts */
2060 out_be32(ugeth->uccf->p_uccm, 0x00000000); 2064 out_be32(ugeth->uccf->p_uccm, 0x00000000);
2061 2065
@@ -2065,9 +2069,6 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
2065 /* Disable Rx and Tx */ 2069 /* Disable Rx and Tx */
2066 clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); 2070 clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
2067 2071
2068 phy_disconnect(ugeth->phydev);
2069 ugeth->phydev = NULL;
2070
2071 ucc_geth_memclean(ugeth); 2072 ucc_geth_memclean(ugeth);
2072} 2073}
2073 2074
@@ -3550,7 +3551,10 @@ static int ucc_geth_close(struct net_device *dev)
3550 3551
3551 napi_disable(&ugeth->napi); 3552 napi_disable(&ugeth->napi);
3552 3553
3554 cancel_work_sync(&ugeth->timeout_work);
3553 ucc_geth_stop(ugeth); 3555 ucc_geth_stop(ugeth);
3556 phy_disconnect(ugeth->phydev);
3557 ugeth->phydev = NULL;
3554 3558
3555 free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); 3559 free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
3556 3560
@@ -3579,8 +3583,12 @@ static void ucc_geth_timeout_work(struct work_struct *work)
3579 * Must reset MAC *and* PHY. This is done by reopening 3583 * Must reset MAC *and* PHY. This is done by reopening
3580 * the device. 3584 * the device.
3581 */ 3585 */
3582 ucc_geth_close(dev); 3586 netif_tx_stop_all_queues(dev);
3583 ucc_geth_open(dev); 3587 ucc_geth_stop(ugeth);
3588 ucc_geth_init_mac(ugeth);
3589 /* Must start PHY here */
3590 phy_start(ugeth->phydev);
3591 netif_tx_start_all_queues(dev);
3584 } 3592 }
3585 3593
3586 netif_tx_schedule_all(dev); 3594 netif_tx_schedule_all(dev);
@@ -3594,7 +3602,6 @@ static void ucc_geth_timeout(struct net_device *dev)
3594{ 3602{
3595 struct ucc_geth_private *ugeth = netdev_priv(dev); 3603 struct ucc_geth_private *ugeth = netdev_priv(dev);
3596 3604
3597 netif_carrier_off(dev);
3598 schedule_work(&ugeth->timeout_work); 3605 schedule_work(&ugeth->timeout_work);
3599} 3606}
3600 3607
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bb6b67f6b0cc..b6d402806ae6 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -986,9 +986,15 @@ static int virtnet_probe(struct virtio_device *vdev)
986 goto unregister; 986 goto unregister;
987 } 987 }
988 988
989 vi->status = VIRTIO_NET_S_LINK_UP; 989 /* Assume link up if device can't report link status,
990 virtnet_update_status(vi); 990 otherwise get link status from config. */
991 netif_carrier_on(dev); 991 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
992 netif_carrier_off(dev);
993 virtnet_update_status(vi);
994 } else {
995 vi->status = VIRTIO_NET_S_LINK_UP;
996 netif_carrier_on(dev);
997 }
992 998
993 pr_debug("virtnet: registered device %s\n", dev->name); 999 pr_debug("virtnet: registered device %s\n", dev->name);
994 return 0; 1000 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index a0471f2e1c7a..48261b7252d0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -410,6 +410,9 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
410 val &= ~(AR_WA_BIT6 | AR_WA_BIT7); 410 val &= ~(AR_WA_BIT6 | AR_WA_BIT7);
411 } 411 }
412 412
413 if (AR_SREV_9280(ah))
414 val |= AR_WA_BIT22;
415
413 if (AR_SREV_9285E_20(ah)) 416 if (AR_SREV_9285E_20(ah))
414 val |= AR_WA_BIT23; 417 val |= AR_WA_BIT23;
415 418
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 9b8e7e3fcebd..170d44a35ccb 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -675,6 +675,7 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
675} 675}
676 676
677extern struct ieee80211_ops ath9k_ops; 677extern struct ieee80211_ops ath9k_ops;
678extern struct pm_qos_request_list ath9k_pm_qos_req;
678extern int modparam_nohwcrypt; 679extern int modparam_nohwcrypt;
679extern int led_blink; 680extern int led_blink;
680 681
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 6576f683dba0..f7ec31b4ddd3 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -35,6 +35,7 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
35 { USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */ 35 { USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */
36 { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */ 36 { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */
37 { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */ 37 { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */
38 { USB_DEVICE(0x13D3, 0x3346) }, /* IMC Networks */
38 { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */ 39 { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
39 { USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */ 40 { USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */
40 { }, 41 { },
@@ -540,11 +541,11 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
540 return; 541 return;
541 } 542 }
542 543
543 usb_fill_int_urb(urb, hif_dev->udev, 544 usb_fill_bulk_urb(urb, hif_dev->udev,
544 usb_rcvbulkpipe(hif_dev->udev, 545 usb_rcvbulkpipe(hif_dev->udev,
545 USB_REG_IN_PIPE), 546 USB_REG_IN_PIPE),
546 nskb->data, MAX_REG_IN_BUF_SIZE, 547 nskb->data, MAX_REG_IN_BUF_SIZE,
547 ath9k_hif_usb_reg_in_cb, nskb, 1); 548 ath9k_hif_usb_reg_in_cb, nskb);
548 549
549 ret = usb_submit_urb(urb, GFP_ATOMIC); 550 ret = usb_submit_urb(urb, GFP_ATOMIC);
550 if (ret) { 551 if (ret) {
@@ -720,11 +721,11 @@ static int ath9k_hif_usb_alloc_reg_in_urb(struct hif_device_usb *hif_dev)
720 if (!skb) 721 if (!skb)
721 goto err; 722 goto err;
722 723
723 usb_fill_int_urb(hif_dev->reg_in_urb, hif_dev->udev, 724 usb_fill_bulk_urb(hif_dev->reg_in_urb, hif_dev->udev,
724 usb_rcvbulkpipe(hif_dev->udev, 725 usb_rcvbulkpipe(hif_dev->udev,
725 USB_REG_IN_PIPE), 726 USB_REG_IN_PIPE),
726 skb->data, MAX_REG_IN_BUF_SIZE, 727 skb->data, MAX_REG_IN_BUF_SIZE,
727 ath9k_hif_usb_reg_in_cb, skb, 1); 728 ath9k_hif_usb_reg_in_cb, skb);
728 729
729 if (usb_submit_urb(hif_dev->reg_in_urb, GFP_KERNEL) != 0) 730 if (usb_submit_urb(hif_dev->reg_in_urb, GFP_KERNEL) != 0)
730 goto err; 731 goto err;
@@ -843,14 +844,6 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
843 goto err_fw_req; 844 goto err_fw_req;
844 } 845 }
845 846
846 /* Alloc URBs */
847 ret = ath9k_hif_usb_alloc_urbs(hif_dev);
848 if (ret) {
849 dev_err(&hif_dev->udev->dev,
850 "ath9k_htc: Unable to allocate URBs\n");
851 goto err_urb;
852 }
853
854 /* Download firmware */ 847 /* Download firmware */
855 ret = ath9k_hif_usb_download_fw(hif_dev); 848 ret = ath9k_hif_usb_download_fw(hif_dev);
856 if (ret) { 849 if (ret) {
@@ -866,16 +859,22 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
866 */ 859 */
867 for (idx = 0; idx < alt->desc.bNumEndpoints; idx++) { 860 for (idx = 0; idx < alt->desc.bNumEndpoints; idx++) {
868 endp = &alt->endpoint[idx].desc; 861 endp = &alt->endpoint[idx].desc;
869 if (((endp->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) 862 if ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
870 == 0x04) && 863 == USB_ENDPOINT_XFER_INT) {
871 ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
872 == USB_ENDPOINT_XFER_INT)) {
873 endp->bmAttributes &= ~USB_ENDPOINT_XFERTYPE_MASK; 864 endp->bmAttributes &= ~USB_ENDPOINT_XFERTYPE_MASK;
874 endp->bmAttributes |= USB_ENDPOINT_XFER_BULK; 865 endp->bmAttributes |= USB_ENDPOINT_XFER_BULK;
875 endp->bInterval = 0; 866 endp->bInterval = 0;
876 } 867 }
877 } 868 }
878 869
870 /* Alloc URBs */
871 ret = ath9k_hif_usb_alloc_urbs(hif_dev);
872 if (ret) {
873 dev_err(&hif_dev->udev->dev,
874 "ath9k_htc: Unable to allocate URBs\n");
875 goto err_urb;
876 }
877
879 return 0; 878 return 0;
880 879
881err_fw_download: 880err_fw_download:
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index cc13ee117823..6ebc68bca91f 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -484,6 +484,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
484 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, 484 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
485 "Failed allocating banks for " 485 "Failed allocating banks for "
486 "external radio\n"); 486 "external radio\n");
487 ath9k_hw_rf_free_ext_banks(ah);
487 return ecode; 488 return ecode;
488 } 489 }
489 490
@@ -952,9 +953,12 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
952 REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); 953 REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
953 break; 954 break;
954 case NL80211_IFTYPE_STATION: 955 case NL80211_IFTYPE_STATION:
955 case NL80211_IFTYPE_MONITOR:
956 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE); 956 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE);
957 break; 957 break;
958 default:
959 if (ah->is_monitoring)
960 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE);
961 break;
958 } 962 }
959} 963}
960 964
@@ -1634,7 +1638,6 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
1634 1638
1635 switch (ah->opmode) { 1639 switch (ah->opmode) {
1636 case NL80211_IFTYPE_STATION: 1640 case NL80211_IFTYPE_STATION:
1637 case NL80211_IFTYPE_MONITOR:
1638 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon)); 1641 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
1639 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff); 1642 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff);
1640 REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff); 1643 REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff);
@@ -1663,6 +1666,14 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
1663 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN; 1666 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
1664 break; 1667 break;
1665 default: 1668 default:
1669 if (ah->is_monitoring) {
1670 REG_WRITE(ah, AR_NEXT_TBTT_TIMER,
1671 TU_TO_USEC(next_beacon));
1672 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff);
1673 REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff);
1674 flags |= AR_TBTT_TIMER_EN;
1675 break;
1676 }
1666 ath_print(ath9k_hw_common(ah), ATH_DBG_BEACON, 1677 ath_print(ath9k_hw_common(ah), ATH_DBG_BEACON,
1667 "%s: unsupported opmode: %d\n", 1678 "%s: unsupported opmode: %d\n",
1668 __func__, ah->opmode); 1679 __func__, ah->opmode);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index d032939768b0..d47d1b4b6002 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -622,6 +622,7 @@ struct ath_hw {
622 622
623 bool sw_mgmt_crypto; 623 bool sw_mgmt_crypto;
624 bool is_pciexpress; 624 bool is_pciexpress;
625 bool is_monitoring;
625 bool need_an_top2_fixup; 626 bool need_an_top2_fixup;
626 u16 tx_trig_level; 627 u16 tx_trig_level;
627 628
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 95b41db0d86b..6a0d99eff404 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/pm_qos_params.h>
18 19
19#include "ath9k.h" 20#include "ath9k.h"
20 21
@@ -179,6 +180,8 @@ static const struct ath_ops ath9k_common_ops = {
179 .write = ath9k_iowrite32, 180 .write = ath9k_iowrite32,
180}; 181};
181 182
183struct pm_qos_request_list ath9k_pm_qos_req;
184
182/**************************/ 185/**************************/
183/* Initialization */ 186/* Initialization */
184/**************************/ 187/**************************/
@@ -756,6 +759,9 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
756 ath_init_leds(sc); 759 ath_init_leds(sc);
757 ath_start_rfkill_poll(sc); 760 ath_start_rfkill_poll(sc);
758 761
762 pm_qos_add_request(&ath9k_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
763 PM_QOS_DEFAULT_VALUE);
764
759 return 0; 765 return 0;
760 766
761error_world: 767error_world:
@@ -811,6 +817,8 @@ void ath9k_deinit_device(struct ath_softc *sc)
811 817
812 ath9k_ps_wakeup(sc); 818 ath9k_ps_wakeup(sc);
813 819
820 pm_qos_remove_request(&ath9k_pm_qos_req);
821
814 wiphy_rfkill_stop_polling(sc->hw->wiphy); 822 wiphy_rfkill_stop_polling(sc->hw->wiphy);
815 ath_deinit_leds(sc); 823 ath_deinit_leds(sc);
816 824
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index b52f1cf8a603..25d3ef4c338e 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include <linux/nl80211.h> 17#include <linux/nl80211.h>
18#include <linux/pm_qos_params.h>
18#include "ath9k.h" 19#include "ath9k.h"
19#include "btcoex.h" 20#include "btcoex.h"
20 21
@@ -93,11 +94,13 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
93{ 94{
94 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 95 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
95 unsigned long flags; 96 unsigned long flags;
97 enum ath9k_power_mode power_mode;
96 98
97 spin_lock_irqsave(&sc->sc_pm_lock, flags); 99 spin_lock_irqsave(&sc->sc_pm_lock, flags);
98 if (++sc->ps_usecount != 1) 100 if (++sc->ps_usecount != 1)
99 goto unlock; 101 goto unlock;
100 102
103 power_mode = sc->sc_ah->power_mode;
101 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); 104 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
102 105
103 /* 106 /*
@@ -105,10 +108,12 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
105 * useful data. Better clear them now so that they don't mess up 108 * useful data. Better clear them now so that they don't mess up
106 * survey data results. 109 * survey data results.
107 */ 110 */
108 spin_lock(&common->cc_lock); 111 if (power_mode != ATH9K_PM_AWAKE) {
109 ath_hw_cycle_counters_update(common); 112 spin_lock(&common->cc_lock);
110 memset(&common->cc_survey, 0, sizeof(common->cc_survey)); 113 ath_hw_cycle_counters_update(common);
111 spin_unlock(&common->cc_lock); 114 memset(&common->cc_survey, 0, sizeof(common->cc_survey));
115 spin_unlock(&common->cc_lock);
116 }
112 117
113 unlock: 118 unlock:
114 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 119 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
@@ -1217,6 +1222,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
1217 ah->imask |= ATH9K_INT_CST; 1222 ah->imask |= ATH9K_INT_CST;
1218 1223
1219 sc->sc_flags &= ~SC_OP_INVALID; 1224 sc->sc_flags &= ~SC_OP_INVALID;
1225 sc->sc_ah->is_monitoring = false;
1220 1226
1221 /* Disable BMISS interrupt when we're not associated */ 1227 /* Disable BMISS interrupt when we're not associated */
1222 ah->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS); 1228 ah->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
@@ -1238,6 +1244,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
1238 ath9k_btcoex_timer_resume(sc); 1244 ath9k_btcoex_timer_resume(sc);
1239 } 1245 }
1240 1246
1247 pm_qos_update_request(&ath9k_pm_qos_req, 55);
1248
1241mutex_unlock: 1249mutex_unlock:
1242 mutex_unlock(&sc->mutex); 1250 mutex_unlock(&sc->mutex);
1243 1251
@@ -1415,6 +1423,8 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1415 1423
1416 sc->sc_flags |= SC_OP_INVALID; 1424 sc->sc_flags |= SC_OP_INVALID;
1417 1425
1426 pm_qos_update_request(&ath9k_pm_qos_req, PM_QOS_DEFAULT_VALUE);
1427
1418 mutex_unlock(&sc->mutex); 1428 mutex_unlock(&sc->mutex);
1419 1429
1420 ath_print(common, ATH_DBG_CONFIG, "Driver halt\n"); 1430 ath_print(common, ATH_DBG_CONFIG, "Driver halt\n");
@@ -1493,8 +1503,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1493 ath9k_hw_set_interrupts(ah, ah->imask); 1503 ath9k_hw_set_interrupts(ah, ah->imask);
1494 1504
1495 if (vif->type == NL80211_IFTYPE_AP || 1505 if (vif->type == NL80211_IFTYPE_AP ||
1496 vif->type == NL80211_IFTYPE_ADHOC || 1506 vif->type == NL80211_IFTYPE_ADHOC) {
1497 vif->type == NL80211_IFTYPE_MONITOR) {
1498 sc->sc_flags |= SC_OP_ANI_RUN; 1507 sc->sc_flags |= SC_OP_ANI_RUN;
1499 ath_start_ani(common); 1508 ath_start_ani(common);
1500 } 1509 }
@@ -1644,8 +1653,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1644 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 1653 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
1645 if (conf->flags & IEEE80211_CONF_MONITOR) { 1654 if (conf->flags & IEEE80211_CONF_MONITOR) {
1646 ath_print(common, ATH_DBG_CONFIG, 1655 ath_print(common, ATH_DBG_CONFIG,
1647 "HW opmode set to Monitor mode\n"); 1656 "Monitor mode is enabled\n");
1648 sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR; 1657 sc->sc_ah->is_monitoring = true;
1658 } else {
1659 ath_print(common, ATH_DBG_CONFIG,
1660 "Monitor mode is disabled\n");
1661 sc->sc_ah->is_monitoring = false;
1649 } 1662 }
1650 } 1663 }
1651 1664
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index fddb0129bb57..c76ea53c20ce 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -441,7 +441,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
441 */ 441 */
442 if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && 442 if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) &&
443 (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || 443 (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) ||
444 (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR)) 444 (sc->sc_ah->is_monitoring))
445 rfilt |= ATH9K_RX_FILTER_PROM; 445 rfilt |= ATH9K_RX_FILTER_PROM;
446 446
447 if (sc->rx.rxfilter & FIF_CONTROL) 447 if (sc->rx.rxfilter & FIF_CONTROL)
@@ -897,7 +897,7 @@ static bool ath9k_rx_accept(struct ath_common *common,
897 * decryption and MIC failures. For monitor mode, 897 * decryption and MIC failures. For monitor mode,
898 * we also ignore the CRC error. 898 * we also ignore the CRC error.
899 */ 899 */
900 if (ah->opmode == NL80211_IFTYPE_MONITOR) { 900 if (ah->is_monitoring) {
901 if (rx_stats->rs_status & 901 if (rx_stats->rs_status &
902 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 902 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
903 ATH9K_RXERR_CRC)) 903 ATH9K_RXERR_CRC))
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 42976b0a01c1..fa05b711e5cd 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -703,6 +703,7 @@
703#define AR_WA_RESET_EN (1 << 18) /* Sw Control to enable PCI-Reset to POR (bit 15) */ 703#define AR_WA_RESET_EN (1 << 18) /* Sw Control to enable PCI-Reset to POR (bit 15) */
704#define AR_WA_ANALOG_SHIFT (1 << 20) 704#define AR_WA_ANALOG_SHIFT (1 << 20)
705#define AR_WA_POR_SHORT (1 << 21) /* PCI-E Phy reset control */ 705#define AR_WA_POR_SHORT (1 << 21) /* PCI-E Phy reset control */
706#define AR_WA_BIT22 (1 << 22)
706#define AR9285_WA_DEFAULT 0x004a050b 707#define AR9285_WA_DEFAULT 0x004a050b
707#define AR9280_WA_DEFAULT 0x0040073b 708#define AR9280_WA_DEFAULT 0x0040073b
708#define AR_WA_DEFAULT 0x0000073f 709#define AR_WA_DEFAULT 0x0000073f
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index d8607f4c144d..3317039cd28f 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -82,9 +82,11 @@ static struct usb_device_id carl9170_usb_ids[] = {
82 { USB_DEVICE(0x07d1, 0x3c10) }, 82 { USB_DEVICE(0x07d1, 0x3c10) },
83 /* D-Link DWA 160 A2 */ 83 /* D-Link DWA 160 A2 */
84 { USB_DEVICE(0x07d1, 0x3a09) }, 84 { USB_DEVICE(0x07d1, 0x3a09) },
85 /* D-Link DWA 130 D */
86 { USB_DEVICE(0x07d1, 0x3a0f) },
85 /* Netgear WNA1000 */ 87 /* Netgear WNA1000 */
86 { USB_DEVICE(0x0846, 0x9040) }, 88 { USB_DEVICE(0x0846, 0x9040) },
87 /* Netgear WNDA3100 */ 89 /* Netgear WNDA3100 (v1) */
88 { USB_DEVICE(0x0846, 0x9010) }, 90 { USB_DEVICE(0x0846, 0x9010) },
89 /* Netgear WN111 v2 */ 91 /* Netgear WN111 v2 */
90 { USB_DEVICE(0x0846, 0x9001), .driver_info = CARL9170_ONE_LED }, 92 { USB_DEVICE(0x0846, 0x9001), .driver_info = CARL9170_ONE_LED },
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 8f8c4b73f8b9..7edf8c2fb8c7 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -4000,7 +4000,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4000 * "the hard way", rather than using device's scan. 4000 * "the hard way", rather than using device's scan.
4001 */ 4001 */
4002 if (iwl3945_mod_params.disable_hw_scan) { 4002 if (iwl3945_mod_params.disable_hw_scan) {
4003 IWL_ERR(priv, "sw scan support is deprecated\n"); 4003 dev_printk(KERN_DEBUG, &(pdev->dev),
4004 "sw scan support is deprecated\n");
4004 iwl3945_hw_ops.hw_scan = NULL; 4005 iwl3945_hw_ops.hw_scan = NULL;
4005 } 4006 }
4006 4007
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 5046a0005034..373930afc26b 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -700,8 +700,9 @@ static void lbs_scan_worker(struct work_struct *work)
700 700
701 if (priv->scan_channel < priv->scan_req->n_channels) { 701 if (priv->scan_channel < priv->scan_req->n_channels) {
702 cancel_delayed_work(&priv->scan_work); 702 cancel_delayed_work(&priv->scan_work);
703 queue_delayed_work(priv->work_thread, &priv->scan_work, 703 if (!priv->stopping)
704 msecs_to_jiffies(300)); 704 queue_delayed_work(priv->work_thread, &priv->scan_work,
705 msecs_to_jiffies(300));
705 } 706 }
706 707
707 /* This is the final data we are about to send */ 708 /* This is the final data we are about to send */
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index f062ed583901..cb14c38caf3a 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -36,6 +36,7 @@ struct lbs_private {
36 /* CFG80211 */ 36 /* CFG80211 */
37 struct wireless_dev *wdev; 37 struct wireless_dev *wdev;
38 bool wiphy_registered; 38 bool wiphy_registered;
39 bool stopping;
39 struct cfg80211_scan_request *scan_req; 40 struct cfg80211_scan_request *scan_req;
40 u8 assoc_bss[ETH_ALEN]; 41 u8 assoc_bss[ETH_ALEN];
41 u8 disassoc_reason; 42 u8 disassoc_reason;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 47ce5a6ba120..46b88b118c99 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -104,6 +104,7 @@ static int lbs_dev_open(struct net_device *dev)
104 lbs_deb_enter(LBS_DEB_NET); 104 lbs_deb_enter(LBS_DEB_NET);
105 105
106 spin_lock_irq(&priv->driver_lock); 106 spin_lock_irq(&priv->driver_lock);
107 priv->stopping = false;
107 108
108 if (priv->connect_status == LBS_CONNECTED) 109 if (priv->connect_status == LBS_CONNECTED)
109 netif_carrier_on(dev); 110 netif_carrier_on(dev);
@@ -131,10 +132,16 @@ static int lbs_eth_stop(struct net_device *dev)
131 lbs_deb_enter(LBS_DEB_NET); 132 lbs_deb_enter(LBS_DEB_NET);
132 133
133 spin_lock_irq(&priv->driver_lock); 134 spin_lock_irq(&priv->driver_lock);
135 priv->stopping = true;
134 netif_stop_queue(dev); 136 netif_stop_queue(dev);
135 spin_unlock_irq(&priv->driver_lock); 137 spin_unlock_irq(&priv->driver_lock);
136 138
137 schedule_work(&priv->mcast_work); 139 schedule_work(&priv->mcast_work);
140 cancel_delayed_work_sync(&priv->scan_work);
141 if (priv->scan_req) {
142 cfg80211_scan_done(priv->scan_req, false);
143 priv->scan_req = NULL;
144 }
138 145
139 lbs_deb_leave(LBS_DEB_NET); 146 lbs_deb_leave(LBS_DEB_NET);
140 return 0; 147 return 0;
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index eea1ef2f502b..4396d4b9bfb9 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -221,9 +221,6 @@ config RT2X00_LIB_LEDS
221 boolean 221 boolean
222 default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n) 222 default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n)
223 223
224comment "rt2x00 leds support disabled due to modularized LEDS_CLASS and built-in rt2x00"
225 depends on RT2X00_LIB=y && LEDS_CLASS=m
226
227config RT2X00_LIB_DEBUGFS 224config RT2X00_LIB_DEBUGFS
228 bool "Ralink debugfs support" 225 bool "Ralink debugfs support"
229 depends on RT2X00_LIB && MAC80211_DEBUGFS 226 depends on RT2X00_LIB && MAC80211_DEBUGFS
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index a87c4985326e..3a5a6fcc0ead 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -13,7 +13,6 @@
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/msi.h> 15#include <linux/msi.h>
16#include <xen/xenbus.h>
17#include <xen/interface/io/pciif.h> 16#include <xen/interface/io/pciif.h>
18#include <asm/xen/pci.h> 17#include <asm/xen/pci.h>
19#include <linux/interrupt.h> 18#include <linux/interrupt.h>
@@ -576,8 +575,9 @@ static pci_ers_result_t pcifront_common_process(int cmd,
576 575
577 pcidev = pci_get_bus_and_slot(bus, devfn); 576 pcidev = pci_get_bus_and_slot(bus, devfn);
578 if (!pcidev || !pcidev->driver) { 577 if (!pcidev || !pcidev->driver) {
579 dev_err(&pcidev->dev, 578 dev_err(&pdev->xdev->dev, "device or AER driver is NULL\n");
580 "device or driver is NULL\n"); 579 if (pcidev)
580 pci_dev_put(pcidev);
581 return result; 581 return result;
582 } 582 }
583 pdrv = pcidev->driver; 583 pdrv = pcidev->driver;
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 68cf0c99138a..7b5080c45569 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1159,11 +1159,11 @@ int __devinit rio_init_mports(void)
1159 1159
1160 list_for_each_entry(port, &rio_mports, node) { 1160 list_for_each_entry(port, &rio_mports, node) {
1161 if (!request_mem_region(port->iores.start, 1161 if (!request_mem_region(port->iores.start,
1162 port->iores.end - port->iores.start, 1162 resource_size(&port->iores),
1163 port->name)) { 1163 port->name)) {
1164 printk(KERN_ERR 1164 printk(KERN_ERR
1165 "RIO: Error requesting master port region 0x%016llx-0x%016llx\n", 1165 "RIO: Error requesting master port region 0x%016llx-0x%016llx\n",
1166 (u64)port->iores.start, (u64)port->iores.end - 1); 1166 (u64)port->iores.start, (u64)port->iores.end);
1167 rc = -ENOMEM; 1167 rc = -ENOMEM;
1168 goto out; 1168 goto out;
1169 } 1169 }
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1de30eb83bb0..f3cf924a2cd9 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -320,19 +320,11 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
320 "changed. The Linux SCSI layer does not " 320 "changed. The Linux SCSI layer does not "
321 "automatically adjust these parameters.\n"); 321 "automatically adjust these parameters.\n");
322 322
323 if (scmd->request->cmd_flags & REQ_HARDBARRIER) 323 /*
324 /* 324 * Pass the UA upwards for a determination in the completion
325 * barrier requests should always retry on UA 325 * functions.
326 * otherwise block will get a spurious error 326 */
327 */ 327 return SUCCESS;
328 return NEEDS_RETRY;
329 else
330 /*
331 * for normal (non barrier) commands, pass the
332 * UA upwards for a determination in the
333 * completion functions
334 */
335 return SUCCESS;
336 328
337 /* these three are not supported */ 329 /* these three are not supported */
338 case COPY_ABORTED: 330 case COPY_ABORTED:
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 4d8e14b7aa93..dd5e1ac22251 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2343,8 +2343,11 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
2343 2343
2344 /* 2344 /*
2345 * CTS flow control flag and modem status interrupts 2345 * CTS flow control flag and modem status interrupts
2346 * Only disable MSI if no threads are waiting in
2347 * serial_core::uart_wait_modem_status
2346 */ 2348 */
2347 up->ier &= ~UART_IER_MSI; 2349 if (!waitqueue_active(&up->port.state->port.delta_msr_wait))
2350 up->ier &= ~UART_IER_MSI;
2348 if (!(up->bugs & UART_BUG_NOMSR) && 2351 if (!(up->bugs & UART_BUG_NOMSR) &&
2349 UART_ENABLE_MS(&up->port, termios->c_cflag)) 2352 UART_ENABLE_MS(&up->port, termios->c_cflag))
2350 up->ier |= UART_IER_MSI; 2353 up->ier |= UART_IER_MSI;
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 53be4d35a0aa..842e3b2a02b1 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -2285,6 +2285,8 @@ static struct pciserial_board pci_boards[] __devinitdata = {
2285 2285
2286static const struct pci_device_id softmodem_blacklist[] = { 2286static const struct pci_device_id softmodem_blacklist[] = {
2287 { PCI_VDEVICE(AL, 0x5457), }, /* ALi Corporation M5457 AC'97 Modem */ 2287 { PCI_VDEVICE(AL, 0x5457), }, /* ALi Corporation M5457 AC'97 Modem */
2288 { PCI_VDEVICE(MOTOROLA, 0x3052), }, /* Motorola Si3052-based modem */
2289 { PCI_DEVICE(0x1543, 0x3052), }, /* Si3052-based modem, default IDs */
2288}; 2290};
2289 2291
2290/* 2292/*
@@ -2863,6 +2865,9 @@ static struct pci_device_id serial_pci_tbl[] = {
2863 PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL, 2865 PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL,
2864 0, 0, 2866 0, 0,
2865 pbn_b0_4_1152000 }, 2867 pbn_b0_4_1152000 },
2868 { PCI_VENDOR_ID_OXSEMI, 0x9505,
2869 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2870 pbn_b0_bt_2_921600 },
2866 2871
2867 /* 2872 /*
2868 * The below card is a little controversial since it is the 2873 * The below card is a little controversial since it is the
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index a9eff2b18eab..19cac9f610fd 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -23,6 +23,7 @@
23#include <linux/tty.h> 23#include <linux/tty.h>
24#include <linux/tty_flip.h> 24#include <linux/tty_flip.h>
25#include <linux/serial_core.h> 25#include <linux/serial_core.h>
26#include <linux/dma-mapping.h>
26 27
27#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ 28#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
28 defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) 29 defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
@@ -33,12 +34,10 @@
33#include <asm/gpio.h> 34#include <asm/gpio.h>
34#include <mach/bfin_serial_5xx.h> 35#include <mach/bfin_serial_5xx.h>
35 36
36#ifdef CONFIG_SERIAL_BFIN_DMA 37#include <asm/dma.h>
37#include <linux/dma-mapping.h>
38#include <asm/io.h> 38#include <asm/io.h>
39#include <asm/irq.h> 39#include <asm/irq.h>
40#include <asm/cacheflush.h> 40#include <asm/cacheflush.h>
41#endif
42 41
43#ifdef CONFIG_SERIAL_BFIN_MODULE 42#ifdef CONFIG_SERIAL_BFIN_MODULE
44# undef CONFIG_EARLY_PRINTK 43# undef CONFIG_EARLY_PRINTK
@@ -360,7 +359,6 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart)
360 UART_PUT_CHAR(uart, xmit->buf[xmit->tail]); 359 UART_PUT_CHAR(uart, xmit->buf[xmit->tail]);
361 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 360 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
362 uart->port.icount.tx++; 361 uart->port.icount.tx++;
363 SSYNC();
364 } 362 }
365 363
366 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 364 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -688,6 +686,13 @@ static int bfin_serial_startup(struct uart_port *port)
688 686
689# ifdef CONFIG_BF54x 687# ifdef CONFIG_BF54x
690 { 688 {
689 /*
690 * UART2 and UART3 on BF548 share interrupt PINs and DMA
691 * controllers with SPORT2 and SPORT3. UART rx and tx
692 * interrupts are generated in PIO mode only when configure
693 * their peripheral mapping registers properly, which means
694 * request corresponding DMA channels in PIO mode as well.
695 */
691 unsigned uart_dma_ch_rx, uart_dma_ch_tx; 696 unsigned uart_dma_ch_rx, uart_dma_ch_tx;
692 697
693 switch (uart->port.irq) { 698 switch (uart->port.irq) {
@@ -734,8 +739,7 @@ static int bfin_serial_startup(struct uart_port *port)
734 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | 739 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
735 IRQF_DISABLED, "BFIN_UART_CTS", uart)) { 740 IRQF_DISABLED, "BFIN_UART_CTS", uart)) {
736 uart->cts_pin = -1; 741 uart->cts_pin = -1;
737 pr_info("Unable to attach BlackFin UART CTS interrupt.\ 742 pr_info("Unable to attach BlackFin UART CTS interrupt. So, disable it.\n");
738 So, disable it.\n");
739 } 743 }
740 } 744 }
741 if (uart->rts_pin >= 0) { 745 if (uart->rts_pin >= 0) {
@@ -747,8 +751,7 @@ static int bfin_serial_startup(struct uart_port *port)
747 if (request_irq(uart->status_irq, 751 if (request_irq(uart->status_irq,
748 bfin_serial_mctrl_cts_int, 752 bfin_serial_mctrl_cts_int,
749 IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) { 753 IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
750 pr_info("Unable to attach BlackFin UART Modem \ 754 pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n");
751 Status interrupt.\n");
752 } 755 }
753 756
754 /* CTS RTS PINs are negative assertive. */ 757 /* CTS RTS PINs are negative assertive. */
@@ -846,6 +849,8 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
846 if (termios->c_cflag & CMSPAR) 849 if (termios->c_cflag & CMSPAR)
847 lcr |= STP; 850 lcr |= STP;
848 851
852 spin_lock_irqsave(&uart->port.lock, flags);
853
849 port->read_status_mask = OE; 854 port->read_status_mask = OE;
850 if (termios->c_iflag & INPCK) 855 if (termios->c_iflag & INPCK)
851 port->read_status_mask |= (FE | PE); 856 port->read_status_mask |= (FE | PE);
@@ -875,8 +880,6 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
875 if (termios->c_line != N_IRDA) 880 if (termios->c_line != N_IRDA)
876 quot -= ANOMALY_05000230; 881 quot -= ANOMALY_05000230;
877 882
878 spin_lock_irqsave(&uart->port.lock, flags);
879
880 UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15); 883 UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15);
881 884
882 /* Disable UART */ 885 /* Disable UART */
@@ -1321,6 +1324,14 @@ struct console __init *bfin_earlyserial_init(unsigned int port,
1321 struct bfin_serial_port *uart; 1324 struct bfin_serial_port *uart;
1322 struct ktermios t; 1325 struct ktermios t;
1323 1326
1327#ifdef CONFIG_SERIAL_BFIN_CONSOLE
1328 /*
1329 * If we are using early serial, don't let the normal console rewind
1330 * log buffer, since that causes things to be printed multiple times
1331 */
1332 bfin_serial_console.flags &= ~CON_PRINTBUFFER;
1333#endif
1334
1324 if (port == -1 || port >= nr_active_ports) 1335 if (port == -1 || port >= nr_active_ports)
1325 port = 0; 1336 port = 0;
1326 bfin_serial_init_ports(); 1337 bfin_serial_init_ports();
diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
index d4b711c9a416..3374618300af 100644
--- a/drivers/serial/kgdboc.c
+++ b/drivers/serial/kgdboc.c
@@ -18,6 +18,7 @@
18#include <linux/tty.h> 18#include <linux/tty.h>
19#include <linux/console.h> 19#include <linux/console.h>
20#include <linux/vt_kern.h> 20#include <linux/vt_kern.h>
21#include <linux/input.h>
21 22
22#define MAX_CONFIG_LEN 40 23#define MAX_CONFIG_LEN 40
23 24
@@ -37,6 +38,61 @@ static struct tty_driver *kgdb_tty_driver;
37static int kgdb_tty_line; 38static int kgdb_tty_line;
38 39
39#ifdef CONFIG_KDB_KEYBOARD 40#ifdef CONFIG_KDB_KEYBOARD
41static int kgdboc_reset_connect(struct input_handler *handler,
42 struct input_dev *dev,
43 const struct input_device_id *id)
44{
45 input_reset_device(dev);
46
47 /* Retrun an error - we do not want to bind, just to reset */
48 return -ENODEV;
49}
50
51static void kgdboc_reset_disconnect(struct input_handle *handle)
52{
53 /* We do not expect anyone to actually bind to us */
54 BUG();
55}
56
57static const struct input_device_id kgdboc_reset_ids[] = {
58 {
59 .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
60 .evbit = { BIT_MASK(EV_KEY) },
61 },
62 { }
63};
64
65static struct input_handler kgdboc_reset_handler = {
66 .connect = kgdboc_reset_connect,
67 .disconnect = kgdboc_reset_disconnect,
68 .name = "kgdboc_reset",
69 .id_table = kgdboc_reset_ids,
70};
71
72static DEFINE_MUTEX(kgdboc_reset_mutex);
73
74static void kgdboc_restore_input_helper(struct work_struct *dummy)
75{
76 /*
77 * We need to take a mutex to prevent several instances of
78 * this work running on different CPUs so they don't try
79 * to register again already registered handler.
80 */
81 mutex_lock(&kgdboc_reset_mutex);
82
83 if (input_register_handler(&kgdboc_reset_handler) == 0)
84 input_unregister_handler(&kgdboc_reset_handler);
85
86 mutex_unlock(&kgdboc_reset_mutex);
87}
88
89static DECLARE_WORK(kgdboc_restore_input_work, kgdboc_restore_input_helper);
90
91static void kgdboc_restore_input(void)
92{
93 schedule_work(&kgdboc_restore_input_work);
94}
95
40static int kgdboc_register_kbd(char **cptr) 96static int kgdboc_register_kbd(char **cptr)
41{ 97{
42 if (strncmp(*cptr, "kbd", 3) == 0) { 98 if (strncmp(*cptr, "kbd", 3) == 0) {
@@ -64,10 +120,12 @@ static void kgdboc_unregister_kbd(void)
64 i--; 120 i--;
65 } 121 }
66 } 122 }
123 flush_work_sync(&kgdboc_restore_input_work);
67} 124}
68#else /* ! CONFIG_KDB_KEYBOARD */ 125#else /* ! CONFIG_KDB_KEYBOARD */
69#define kgdboc_register_kbd(x) 0 126#define kgdboc_register_kbd(x) 0
70#define kgdboc_unregister_kbd() 127#define kgdboc_unregister_kbd()
128#define kgdboc_restore_input()
71#endif /* ! CONFIG_KDB_KEYBOARD */ 129#endif /* ! CONFIG_KDB_KEYBOARD */
72 130
73static int kgdboc_option_setup(char *opt) 131static int kgdboc_option_setup(char *opt)
@@ -231,6 +289,7 @@ static void kgdboc_post_exp_handler(void)
231 dbg_restore_graphics = 0; 289 dbg_restore_graphics = 0;
232 con_debug_leave(); 290 con_debug_leave();
233 } 291 }
292 kgdboc_restore_input();
234} 293}
235 294
236static struct kgdb_io kgdboc_io_ops = { 295static struct kgdb_io kgdboc_io_ops = {
diff --git a/drivers/staging/ath6kl/Kconfig b/drivers/staging/ath6kl/Kconfig
index ae2cdf48b74c..8a5caa30b85f 100644
--- a/drivers/staging/ath6kl/Kconfig
+++ b/drivers/staging/ath6kl/Kconfig
@@ -102,7 +102,7 @@ config AR600x_BT_RESET_PIN
102 102
103config ATH6KL_CFG80211 103config ATH6KL_CFG80211
104 bool "CFG80211 support" 104 bool "CFG80211 support"
105 depends on ATH6K_LEGACY 105 depends on ATH6K_LEGACY && CFG80211
106 help 106 help
107 Enables support for CFG80211 APIs. The default option is to use WEXT. Even with this option enabled, WEXT is not explicitly disabled and the onus of not exercising WEXT lies on the application(s) running in the user space. 107 Enables support for CFG80211 APIs. The default option is to use WEXT. Even with this option enabled, WEXT is not explicitly disabled and the onus of not exercising WEXT lies on the application(s) running in the user space.
108 108
diff --git a/drivers/staging/ath6kl/os/linux/ar6000_drv.c b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
index c5a6d6c16735..a659f7047373 100644
--- a/drivers/staging/ath6kl/os/linux/ar6000_drv.c
+++ b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
@@ -1126,7 +1126,7 @@ ar6000_transfer_bin_file(AR_SOFTC_T *ar, AR6K_BIN_FILE file, A_UINT32 address, A
1126 if ((board_ext_address) && (fw_entry->size == (board_data_size + board_ext_data_size))) { 1126 if ((board_ext_address) && (fw_entry->size == (board_data_size + board_ext_data_size))) {
1127 A_UINT32 param; 1127 A_UINT32 param;
1128 1128
1129 status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (A_UCHAR *)(((A_UINT32)fw_entry->data) + board_data_size), board_ext_data_size); 1129 status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (A_UCHAR *)(fw_entry->data + board_data_size), board_ext_data_size);
1130 1130
1131 if (status != A_OK) { 1131 if (status != A_OK) {
1132 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__)); 1132 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__));
@@ -3030,7 +3030,8 @@ ar6000_data_tx(struct sk_buff *skb, struct net_device *dev)
3030 A_UINT8 csumDest=0; 3030 A_UINT8 csumDest=0;
3031 A_UINT8 csum=skb->ip_summed; 3031 A_UINT8 csum=skb->ip_summed;
3032 if(csumOffload && (csum==CHECKSUM_PARTIAL)){ 3032 if(csumOffload && (csum==CHECKSUM_PARTIAL)){
3033 csumStart=skb->csum_start-(skb->network_header-skb->head)+sizeof(ATH_LLC_SNAP_HDR); 3033 csumStart = (skb->head + skb->csum_start - skb_network_header(skb) +
3034 sizeof(ATH_LLC_SNAP_HDR));
3034 csumDest=skb->csum_offset+csumStart; 3035 csumDest=skb->csum_offset+csumStart;
3035 } 3036 }
3036#endif 3037#endif
diff --git a/drivers/staging/ath6kl/os/linux/cfg80211.c b/drivers/staging/ath6kl/os/linux/cfg80211.c
index c94ad29eeb4d..7269d0a1d618 100644
--- a/drivers/staging/ath6kl/os/linux/cfg80211.c
+++ b/drivers/staging/ath6kl/os/linux/cfg80211.c
@@ -808,7 +808,7 @@ ar6k_cfg80211_scanComplete_event(AR_SOFTC_T *ar, A_STATUS status)
808 808
809static int 809static int
810ar6k_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, 810ar6k_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
811 A_UINT8 key_index, const A_UINT8 *mac_addr, 811 A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr,
812 struct key_params *params) 812 struct key_params *params)
813{ 813{
814 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev); 814 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
@@ -901,7 +901,7 @@ ar6k_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
901 901
902static int 902static int
903ar6k_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, 903ar6k_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
904 A_UINT8 key_index, const A_UINT8 *mac_addr) 904 A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr)
905{ 905{
906 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev); 906 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
907 907
@@ -936,7 +936,8 @@ ar6k_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
936 936
937static int 937static int
938ar6k_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, 938ar6k_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
939 A_UINT8 key_index, const A_UINT8 *mac_addr, void *cookie, 939 A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr,
940 void *cookie,
940 void (*callback)(void *cookie, struct key_params*)) 941 void (*callback)(void *cookie, struct key_params*))
941{ 942{
942 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev); 943 AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index 80cfa8669585..b68a7e5173be 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -165,7 +165,7 @@ static void update_mac_addresses(struct batman_if *batman_if)
165 batman_if->net_dev->dev_addr, ETH_ALEN); 165 batman_if->net_dev->dev_addr, ETH_ALEN);
166} 166}
167 167
168static void check_known_mac_addr(uint8_t *addr) 168static void check_known_mac_addr(struct net_device *net_dev)
169{ 169{
170 struct batman_if *batman_if; 170 struct batman_if *batman_if;
171 171
@@ -175,11 +175,16 @@ static void check_known_mac_addr(uint8_t *addr)
175 (batman_if->if_status != IF_TO_BE_ACTIVATED)) 175 (batman_if->if_status != IF_TO_BE_ACTIVATED))
176 continue; 176 continue;
177 177
178 if (!compare_orig(batman_if->net_dev->dev_addr, addr)) 178 if (batman_if->net_dev == net_dev)
179 continue;
180
181 if (!compare_orig(batman_if->net_dev->dev_addr,
182 net_dev->dev_addr))
179 continue; 183 continue;
180 184
181 pr_warning("The newly added mac address (%pM) already exists " 185 pr_warning("The newly added mac address (%pM) already exists "
182 "on: %s\n", addr, batman_if->net_dev->name); 186 "on: %s\n", net_dev->dev_addr,
187 batman_if->net_dev->name);
183 pr_warning("It is strongly recommended to keep mac addresses " 188 pr_warning("It is strongly recommended to keep mac addresses "
184 "unique to avoid problems!\n"); 189 "unique to avoid problems!\n");
185 } 190 }
@@ -430,7 +435,7 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
430 atomic_set(&batman_if->refcnt, 0); 435 atomic_set(&batman_if->refcnt, 0);
431 hardif_hold(batman_if); 436 hardif_hold(batman_if);
432 437
433 check_known_mac_addr(batman_if->net_dev->dev_addr); 438 check_known_mac_addr(batman_if->net_dev);
434 439
435 spin_lock(&if_list_lock); 440 spin_lock(&if_list_lock);
436 list_add_tail_rcu(&batman_if->list, &if_list); 441 list_add_tail_rcu(&batman_if->list, &if_list);
@@ -515,7 +520,7 @@ static int hard_if_event(struct notifier_block *this,
515 goto out; 520 goto out;
516 } 521 }
517 522
518 check_known_mac_addr(batman_if->net_dev->dev_addr); 523 check_known_mac_addr(batman_if->net_dev);
519 update_mac_addresses(batman_if); 524 update_mac_addresses(batman_if);
520 525
521 bat_priv = netdev_priv(batman_if->soft_iface); 526 bat_priv = netdev_priv(batman_if->soft_iface);
diff --git a/drivers/staging/batman-adv/routing.c b/drivers/staging/batman-adv/routing.c
index 90102631330b..657b69e6b957 100644
--- a/drivers/staging/batman-adv/routing.c
+++ b/drivers/staging/batman-adv/routing.c
@@ -1000,10 +1000,10 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
1000 1000
1001/* find a suitable router for this originator, and use 1001/* find a suitable router for this originator, and use
1002 * bonding if possible. */ 1002 * bonding if possible. */
1003struct neigh_node *find_router(struct orig_node *orig_node, 1003struct neigh_node *find_router(struct bat_priv *bat_priv,
1004 struct orig_node *orig_node,
1004 struct batman_if *recv_if) 1005 struct batman_if *recv_if)
1005{ 1006{
1006 struct bat_priv *bat_priv;
1007 struct orig_node *primary_orig_node; 1007 struct orig_node *primary_orig_node;
1008 struct orig_node *router_orig; 1008 struct orig_node *router_orig;
1009 struct neigh_node *router, *first_candidate, *best_router; 1009 struct neigh_node *router, *first_candidate, *best_router;
@@ -1019,13 +1019,9 @@ struct neigh_node *find_router(struct orig_node *orig_node,
1019 /* without bonding, the first node should 1019 /* without bonding, the first node should
1020 * always choose the default router. */ 1020 * always choose the default router. */
1021 1021
1022 if (!recv_if)
1023 return orig_node->router;
1024
1025 bat_priv = netdev_priv(recv_if->soft_iface);
1026 bonding_enabled = atomic_read(&bat_priv->bonding_enabled); 1022 bonding_enabled = atomic_read(&bat_priv->bonding_enabled);
1027 1023
1028 if (!bonding_enabled) 1024 if ((!recv_if) && (!bonding_enabled))
1029 return orig_node->router; 1025 return orig_node->router;
1030 1026
1031 router_orig = orig_node->router->orig_node; 1027 router_orig = orig_node->router->orig_node;
@@ -1154,7 +1150,7 @@ static int route_unicast_packet(struct sk_buff *skb,
1154 orig_node = ((struct orig_node *) 1150 orig_node = ((struct orig_node *)
1155 hash_find(bat_priv->orig_hash, unicast_packet->dest)); 1151 hash_find(bat_priv->orig_hash, unicast_packet->dest));
1156 1152
1157 router = find_router(orig_node, recv_if); 1153 router = find_router(bat_priv, orig_node, recv_if);
1158 1154
1159 if (!router) { 1155 if (!router) {
1160 spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); 1156 spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
diff --git a/drivers/staging/batman-adv/routing.h b/drivers/staging/batman-adv/routing.h
index 06ea99df3706..92674c8d9c03 100644
--- a/drivers/staging/batman-adv/routing.h
+++ b/drivers/staging/batman-adv/routing.h
@@ -38,8 +38,8 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if);
38int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if); 38int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if);
39int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if); 39int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if);
40int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if); 40int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if);
41struct neigh_node *find_router(struct orig_node *orig_node, 41struct neigh_node *find_router(struct bat_priv *bat_priv,
42 struct batman_if *recv_if); 42 struct orig_node *orig_node, struct batman_if *recv_if);
43void update_bonding_candidates(struct bat_priv *bat_priv, 43void update_bonding_candidates(struct bat_priv *bat_priv,
44 struct orig_node *orig_node); 44 struct orig_node *orig_node);
45 45
diff --git a/drivers/staging/batman-adv/unicast.c b/drivers/staging/batman-adv/unicast.c
index 0dac50d69c03..0459413ff67f 100644
--- a/drivers/staging/batman-adv/unicast.c
+++ b/drivers/staging/batman-adv/unicast.c
@@ -224,7 +224,7 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
224 if (!orig_node) 224 if (!orig_node)
225 orig_node = transtable_search(bat_priv, ethhdr->h_dest); 225 orig_node = transtable_search(bat_priv, ethhdr->h_dest);
226 226
227 router = find_router(orig_node, NULL); 227 router = find_router(bat_priv, orig_node, NULL);
228 228
229 if (!router) 229 if (!router)
230 goto unlock; 230 goto unlock;
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index 77fdfe24d999..fead9c56162e 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -1001,13 +1001,15 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
1001 } 1001 }
1002#endif 1002#endif
1003 case IOCTL_BE_BUCKET_SIZE: 1003 case IOCTL_BE_BUCKET_SIZE:
1004 Adapter->BEBucketSize = *(PULONG)arg; 1004 Status = 0;
1005 Status = STATUS_SUCCESS; 1005 if (get_user(Adapter->BEBucketSize, (unsigned long __user *)arg))
1006 Status = -EFAULT;
1006 break; 1007 break;
1007 1008
1008 case IOCTL_RTPS_BUCKET_SIZE: 1009 case IOCTL_RTPS_BUCKET_SIZE:
1009 Adapter->rtPSBucketSize = *(PULONG)arg; 1010 Status = 0;
1010 Status = STATUS_SUCCESS; 1011 if (get_user(Adapter->rtPSBucketSize, (unsigned long __user *)arg))
1012 Status = -EFAULT;
1011 break; 1013 break;
1012 case IOCTL_CHIP_RESET: 1014 case IOCTL_CHIP_RESET:
1013 { 1015 {
@@ -1028,11 +1030,15 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
1028 case IOCTL_QOS_THRESHOLD: 1030 case IOCTL_QOS_THRESHOLD:
1029 { 1031 {
1030 USHORT uiLoopIndex; 1032 USHORT uiLoopIndex;
1031 for(uiLoopIndex = 0 ; uiLoopIndex < NO_OF_QUEUES ; uiLoopIndex++) 1033
1032 { 1034 Status = 0;
1033 Adapter->PackInfo[uiLoopIndex].uiThreshold = *(PULONG)arg; 1035 for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES; uiLoopIndex++) {
1036 if (get_user(Adapter->PackInfo[uiLoopIndex].uiThreshold,
1037 (unsigned long __user *)arg)) {
1038 Status = -EFAULT;
1039 break;
1040 }
1034 } 1041 }
1035 Status = STATUS_SUCCESS;
1036 break; 1042 break;
1037 } 1043 }
1038 1044
@@ -1093,7 +1099,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
1093 } 1099 }
1094 case IOCTL_BCM_GET_CURRENT_STATUS: 1100 case IOCTL_BCM_GET_CURRENT_STATUS:
1095 { 1101 {
1096 LINK_STATE *plink_state = NULL; 1102 LINK_STATE plink_state;
1103
1097 /* Copy Ioctl Buffer structure */ 1104 /* Copy Ioctl Buffer structure */
1098 if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) 1105 if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
1099 { 1106 {
@@ -1101,13 +1108,19 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
1101 Status = -EFAULT; 1108 Status = -EFAULT;
1102 break; 1109 break;
1103 } 1110 }
1104 plink_state = (LINK_STATE*)arg; 1111 if (IoBuffer.OutputLength != sizeof(plink_state)) {
1105 plink_state->bIdleMode = (UCHAR)Adapter->IdleMode; 1112 Status = -EINVAL;
1106 plink_state->bShutdownMode = Adapter->bShutStatus; 1113 break;
1107 plink_state->ucLinkStatus = (UCHAR)Adapter->LinkStatus; 1114 }
1108 if(copy_to_user(IoBuffer.OutputBuffer, 1115
1109 (PUCHAR)plink_state, (UINT)IoBuffer.OutputLength)) 1116 if (copy_from_user(&plink_state, (void __user *)arg, sizeof(plink_state))) {
1110 { 1117 Status = -EFAULT;
1118 break;
1119 }
1120 plink_state.bIdleMode = (UCHAR)Adapter->IdleMode;
1121 plink_state.bShutdownMode = Adapter->bShutStatus;
1122 plink_state.ucLinkStatus = (UCHAR)Adapter->LinkStatus;
1123 if (copy_to_user(IoBuffer.OutputBuffer, &plink_state, IoBuffer.OutputLength)) {
1111 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n"); 1124 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n");
1112 Status = -EFAULT; 1125 Status = -EFAULT;
1113 break; 1126 break;
@@ -1331,7 +1344,9 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
1331 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Copy From User space failed. status :%d", Status); 1344 BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Copy From User space failed. status :%d", Status);
1332 return -EFAULT; 1345 return -EFAULT;
1333 } 1346 }
1334 uiSectorSize = *((PUINT)(IoBuffer.InputBuffer)); /* FIXME: unchecked __user access */ 1347 if (get_user(uiSectorSize, (unsigned int __user *)IoBuffer.InputBuffer))
1348 return -EFAULT;
1349
1335 if((uiSectorSize < MIN_SECTOR_SIZE) || (uiSectorSize > MAX_SECTOR_SIZE)) 1350 if((uiSectorSize < MIN_SECTOR_SIZE) || (uiSectorSize > MAX_SECTOR_SIZE))
1336 { 1351 {
1337 1352
diff --git a/drivers/staging/brcm80211/README b/drivers/staging/brcm80211/README
index c3ba9bb9b116..c8f1cf1b4409 100644
--- a/drivers/staging/brcm80211/README
+++ b/drivers/staging/brcm80211/README
@@ -90,5 +90,5 @@ Contact Info:
90============= 90=============
91Brett Rudley brudley@broadcom.com 91Brett Rudley brudley@broadcom.com
92Henry Ptasinski henryp@broadcom.com 92Henry Ptasinski henryp@broadcom.com
93Nohee Ko noheek@broadcom.com 93Dowan Kim dowan@broadcom.com
94 94
diff --git a/drivers/staging/brcm80211/TODO b/drivers/staging/brcm80211/TODO
index 8803d300b531..dbf904184899 100644
--- a/drivers/staging/brcm80211/TODO
+++ b/drivers/staging/brcm80211/TODO
@@ -45,5 +45,5 @@ Contact
45===== 45=====
46Brett Rudley <brudley@broadcom.com> 46Brett Rudley <brudley@broadcom.com>
47Henry Ptasinski <henryp@broadcom.com> 47Henry Ptasinski <henryp@broadcom.com>
48Nohee Ko <noheek@broadcom.com> 48Dowan Kim <dowan@broadcom.com>
49 49
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
index bbbe7c5f7492..9335f02029aa 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
@@ -2222,8 +2222,6 @@ int dhd_net_attach(dhd_pub_t *dhdp, int ifidx)
2222 ASSERT(net); 2222 ASSERT(net);
2223 2223
2224 ASSERT(!net->netdev_ops); 2224 ASSERT(!net->netdev_ops);
2225 net->netdev_ops = &dhd_ops_virt;
2226
2227 net->netdev_ops = &dhd_ops_pri; 2225 net->netdev_ops = &dhd_ops_pri;
2228 2226
2229 /* 2227 /*
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
index 3f29488d9c72..ea0825238d53 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
@@ -95,12 +95,12 @@ static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy,
95 struct net_device *dev, 95 struct net_device *dev,
96 u8 key_idx); 96 u8 key_idx);
97static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, 97static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
98 u8 key_idx, const u8 *mac_addr, 98 u8 key_idx, bool pairwise, const u8 *mac_addr,
99 struct key_params *params); 99 struct key_params *params);
100static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, 100static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
101 u8 key_idx, const u8 *mac_addr); 101 u8 key_idx, bool pairwise, const u8 *mac_addr);
102static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, 102static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
103 u8 key_idx, const u8 *mac_addr, 103 u8 key_idx, bool pairwise, const u8 *mac_addr,
104 void *cookie, void (*callback) (void *cookie, 104 void *cookie, void (*callback) (void *cookie,
105 struct 105 struct
106 key_params * 106 key_params *
@@ -1615,7 +1615,7 @@ wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
1615 1615
1616static s32 1616static s32
1617wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, 1617wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
1618 u8 key_idx, const u8 *mac_addr, 1618 u8 key_idx, bool pairwise, const u8 *mac_addr,
1619 struct key_params *params) 1619 struct key_params *params)
1620{ 1620{
1621 struct wl_wsec_key key; 1621 struct wl_wsec_key key;
@@ -1700,7 +1700,7 @@ wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
1700 1700
1701static s32 1701static s32
1702wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, 1702wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
1703 u8 key_idx, const u8 *mac_addr) 1703 u8 key_idx, bool pairwise, const u8 *mac_addr)
1704{ 1704{
1705 struct wl_wsec_key key; 1705 struct wl_wsec_key key;
1706 s32 err = 0; 1706 s32 err = 0;
@@ -1756,7 +1756,7 @@ wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
1756 1756
1757static s32 1757static s32
1758wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, 1758wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
1759 u8 key_idx, const u8 *mac_addr, void *cookie, 1759 u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie,
1760 void (*callback) (void *cookie, struct key_params * params)) 1760 void (*callback) (void *cookie, struct key_params * params))
1761{ 1761{
1762 struct key_params params; 1762 struct key_params params;
diff --git a/drivers/staging/cpia/cpia.c b/drivers/staging/cpia/cpia.c
index 933ae4c8cb9a..0e740b8dafc3 100644
--- a/drivers/staging/cpia/cpia.c
+++ b/drivers/staging/cpia/cpia.c
@@ -3184,13 +3184,9 @@ static int cpia_open(struct file *file)
3184 goto oops; 3184 goto oops;
3185 } 3185 }
3186 3186
3187 err = -EINTR;
3188 if(signal_pending(current))
3189 goto oops;
3190
3191 /* Set ownership of /proc/cpia/videoX to current user */ 3187 /* Set ownership of /proc/cpia/videoX to current user */
3192 if(cam->proc_entry) 3188 if(cam->proc_entry)
3193 cam->proc_entry->uid = current_uid(); 3189 cam->proc_entry->uid = current_euid();
3194 3190
3195 /* set mark for loading first frame uncompressed */ 3191 /* set mark for loading first frame uncompressed */
3196 cam->first_frame = 1; 3192 cam->first_frame = 1;
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c b/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c
index 87a6487531c2..20d509836d9e 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c
@@ -286,7 +286,6 @@ int ft1000_CreateDevice(struct ft1000_device *dev)
286 pid = kernel_thread (exec_mknod, (void *)info, 0); 286 pid = kernel_thread (exec_mknod, (void *)info, 0);
287 287
288 // initialize application information 288 // initialize application information
289 info->appcnt = 0;
290 289
291// if (ft1000_flarion_cnt == 0) { 290// if (ft1000_flarion_cnt == 0) {
292// 291//
diff --git a/drivers/staging/hv/hv_utils.c b/drivers/staging/hv/hv_utils.c
index 702a478d5542..a99e900ec4c9 100644
--- a/drivers/staging/hv/hv_utils.c
+++ b/drivers/staging/hv/hv_utils.c
@@ -212,9 +212,6 @@ static void heartbeat_onchannelcallback(void *context)
212 recvlen, requestid); 212 recvlen, requestid);
213 213
214 icmsghdrp = (struct icmsg_hdr *)&buf[ 214 icmsghdrp = (struct icmsg_hdr *)&buf[
215 sizeof(struct vmbuspipe_hdr)];
216
217 icmsghdrp = (struct icmsg_hdr *)&buf[
218 sizeof(struct vmbuspipe_hdr)]; 215 sizeof(struct vmbuspipe_hdr)];
219 216
220 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { 217 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
diff --git a/drivers/staging/intel_sst/intel_sst_app_interface.c b/drivers/staging/intel_sst/intel_sst_app_interface.c
index 463e5cba8307..9618c7997461 100644
--- a/drivers/staging/intel_sst/intel_sst_app_interface.c
+++ b/drivers/staging/intel_sst/intel_sst_app_interface.c
@@ -244,12 +244,12 @@ static int intel_sst_mmap_play_capture(u32 str_id,
244 int retval, i; 244 int retval, i;
245 struct stream_info *stream; 245 struct stream_info *stream;
246 struct snd_sst_mmap_buff_entry *buf_entry; 246 struct snd_sst_mmap_buff_entry *buf_entry;
247 struct snd_sst_mmap_buff_entry *tmp_buf;
247 248
248 pr_debug("sst:called for str_id %d\n", str_id); 249 pr_debug("sst:called for str_id %d\n", str_id);
249 retval = sst_validate_strid(str_id); 250 retval = sst_validate_strid(str_id);
250 if (retval) 251 if (retval)
251 return -EINVAL; 252 return -EINVAL;
252 BUG_ON(!mmap_buf);
253 253
254 stream = &sst_drv_ctx->streams[str_id]; 254 stream = &sst_drv_ctx->streams[str_id];
255 if (stream->mmapped != true) 255 if (stream->mmapped != true)
@@ -262,14 +262,24 @@ static int intel_sst_mmap_play_capture(u32 str_id,
262 stream->curr_bytes = 0; 262 stream->curr_bytes = 0;
263 stream->cumm_bytes = 0; 263 stream->cumm_bytes = 0;
264 264
265 tmp_buf = kcalloc(mmap_buf->entries, sizeof(*tmp_buf), GFP_KERNEL);
266 if (!tmp_buf)
267 return -ENOMEM;
268 if (copy_from_user(tmp_buf, (void __user *)mmap_buf->buff,
269 mmap_buf->entries * sizeof(*tmp_buf))) {
270 retval = -EFAULT;
271 goto out_free;
272 }
273
265 pr_debug("sst:new buffers count %d status %d\n", 274 pr_debug("sst:new buffers count %d status %d\n",
266 mmap_buf->entries, stream->status); 275 mmap_buf->entries, stream->status);
267 buf_entry = mmap_buf->buff; 276 buf_entry = tmp_buf;
268 for (i = 0; i < mmap_buf->entries; i++) { 277 for (i = 0; i < mmap_buf->entries; i++) {
269 BUG_ON(!buf_entry);
270 bufs = kzalloc(sizeof(*bufs), GFP_KERNEL); 278 bufs = kzalloc(sizeof(*bufs), GFP_KERNEL);
271 if (!bufs) 279 if (!bufs) {
272 return -ENOMEM; 280 retval = -ENOMEM;
281 goto out_free;
282 }
273 bufs->size = buf_entry->size; 283 bufs->size = buf_entry->size;
274 bufs->offset = buf_entry->offset; 284 bufs->offset = buf_entry->offset;
275 bufs->addr = sst_drv_ctx->mmap_mem; 285 bufs->addr = sst_drv_ctx->mmap_mem;
@@ -293,13 +303,15 @@ static int intel_sst_mmap_play_capture(u32 str_id,
293 if (sst_play_frame(str_id) < 0) { 303 if (sst_play_frame(str_id) < 0) {
294 pr_warn("sst: play frames fail\n"); 304 pr_warn("sst: play frames fail\n");
295 mutex_unlock(&stream->lock); 305 mutex_unlock(&stream->lock);
296 return -EIO; 306 retval = -EIO;
307 goto out_free;
297 } 308 }
298 } else if (stream->ops == STREAM_OPS_CAPTURE) { 309 } else if (stream->ops == STREAM_OPS_CAPTURE) {
299 if (sst_capture_frame(str_id) < 0) { 310 if (sst_capture_frame(str_id) < 0) {
300 pr_warn("sst: capture frame fail\n"); 311 pr_warn("sst: capture frame fail\n");
301 mutex_unlock(&stream->lock); 312 mutex_unlock(&stream->lock);
302 return -EIO; 313 retval = -EIO;
314 goto out_free;
303 } 315 }
304 } 316 }
305 } 317 }
@@ -314,6 +326,9 @@ static int intel_sst_mmap_play_capture(u32 str_id,
314 if (retval >= 0) 326 if (retval >= 0)
315 retval = stream->cumm_bytes; 327 retval = stream->cumm_bytes;
316 pr_debug("sst:end of play/rec ioctl bytes = %d!!\n", retval); 328 pr_debug("sst:end of play/rec ioctl bytes = %d!!\n", retval);
329
330out_free:
331 kfree(tmp_buf);
317 return retval; 332 return retval;
318} 333}
319 334
@@ -377,7 +392,7 @@ static int snd_sst_fill_kernel_list(struct stream_info *stream,
377{ 392{
378 struct sst_stream_bufs *stream_bufs; 393 struct sst_stream_bufs *stream_bufs;
379 unsigned long index, mmap_len; 394 unsigned long index, mmap_len;
380 unsigned char *bufp; 395 unsigned char __user *bufp;
381 unsigned long size, copied_size; 396 unsigned long size, copied_size;
382 int retval = 0, add_to_list = 0; 397 int retval = 0, add_to_list = 0;
383 static int sent_offset; 398 static int sent_offset;
@@ -512,9 +527,7 @@ static int snd_sst_copy_userbuf_capture(struct stream_info *stream,
512 /* copy to user */ 527 /* copy to user */
513 list_for_each_entry_safe(entry, _entry, 528 list_for_each_entry_safe(entry, _entry,
514 copy_to_list, node) { 529 copy_to_list, node) {
515 if (copy_to_user((void *) 530 if (copy_to_user(iovec[entry->iov_index].iov_base + entry->iov_offset,
516 iovec[entry->iov_index].iov_base +
517 entry->iov_offset,
518 kbufs->addr + entry->offset, 531 kbufs->addr + entry->offset,
519 entry->size)) { 532 entry->size)) {
520 /* Clean up the list and return error */ 533 /* Clean up the list and return error */
@@ -590,7 +603,7 @@ static int intel_sst_read_write(unsigned int str_id, char __user *buf,
590 buf, (int) count, (int) stream->status); 603 buf, (int) count, (int) stream->status);
591 604
592 stream->buf_type = SST_BUF_USER_STATIC; 605 stream->buf_type = SST_BUF_USER_STATIC;
593 iovec.iov_base = (void *)buf; 606 iovec.iov_base = buf;
594 iovec.iov_len = count; 607 iovec.iov_len = count;
595 nr_segs = 1; 608 nr_segs = 1;
596 609
@@ -838,7 +851,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
838 break; 851 break;
839 852
840 case _IOC_NR(SNDRV_SST_STREAM_SET_PARAMS): { 853 case _IOC_NR(SNDRV_SST_STREAM_SET_PARAMS): {
841 struct snd_sst_params *str_param = (struct snd_sst_params *)arg; 854 struct snd_sst_params str_param;
842 855
843 pr_debug("sst: IOCTL_SET_PARAMS recieved!\n"); 856 pr_debug("sst: IOCTL_SET_PARAMS recieved!\n");
844 if (minor != STREAM_MODULE) { 857 if (minor != STREAM_MODULE) {
@@ -846,17 +859,25 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
846 break; 859 break;
847 } 860 }
848 861
862 if (copy_from_user(&str_param, (void __user *)arg,
863 sizeof(str_param))) {
864 retval = -EFAULT;
865 break;
866 }
867
849 if (!str_id) { 868 if (!str_id) {
850 869
851 retval = sst_get_stream(str_param); 870 retval = sst_get_stream(&str_param);
852 if (retval > 0) { 871 if (retval > 0) {
853 struct stream_info *str_info; 872 struct stream_info *str_info;
873 char __user *dest;
874
854 sst_drv_ctx->stream_cnt++; 875 sst_drv_ctx->stream_cnt++;
855 data->str_id = retval; 876 data->str_id = retval;
856 str_info = &sst_drv_ctx->streams[retval]; 877 str_info = &sst_drv_ctx->streams[retval];
857 str_info->src = SST_DRV; 878 str_info->src = SST_DRV;
858 retval = copy_to_user(&str_param->stream_id, 879 dest = (char __user *)arg + offsetof(struct snd_sst_params, stream_id);
859 &retval, sizeof(__u32)); 880 retval = copy_to_user(dest, &retval, sizeof(__u32));
860 if (retval) 881 if (retval)
861 retval = -EFAULT; 882 retval = -EFAULT;
862 } else { 883 } else {
@@ -866,16 +887,14 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
866 } else { 887 } else {
867 pr_debug("sst: SET_STREAM_PARAMS recieved!\n"); 888 pr_debug("sst: SET_STREAM_PARAMS recieved!\n");
868 /* allocated set params only */ 889 /* allocated set params only */
869 retval = sst_set_stream_param(str_id, str_param); 890 retval = sst_set_stream_param(str_id, &str_param);
870 /* Block the call for reply */ 891 /* Block the call for reply */
871 if (!retval) { 892 if (!retval) {
872 int sfreq = 0, word_size = 0, num_channel = 0; 893 int sfreq = 0, word_size = 0, num_channel = 0;
873 sfreq = str_param->sparams.uc.pcm_params.sfreq; 894 sfreq = str_param.sparams.uc.pcm_params.sfreq;
874 word_size = str_param->sparams. 895 word_size = str_param.sparams.uc.pcm_params.pcm_wd_sz;
875 uc.pcm_params.pcm_wd_sz; 896 num_channel = str_param.sparams.uc.pcm_params.num_chan;
876 num_channel = str_param-> 897 if (str_param.ops == STREAM_OPS_CAPTURE) {
877 sparams.uc.pcm_params.num_chan;
878 if (str_param->ops == STREAM_OPS_CAPTURE) {
879 sst_drv_ctx->scard_ops->\ 898 sst_drv_ctx->scard_ops->\
880 set_pcm_audio_params(sfreq, 899 set_pcm_audio_params(sfreq,
881 word_size, num_channel); 900 word_size, num_channel);
@@ -885,41 +904,39 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
885 break; 904 break;
886 } 905 }
887 case _IOC_NR(SNDRV_SST_SET_VOL): { 906 case _IOC_NR(SNDRV_SST_SET_VOL): {
888 struct snd_sst_vol *set_vol; 907 struct snd_sst_vol set_vol;
889 struct snd_sst_vol *rec_vol = (struct snd_sst_vol *)arg; 908
909 if (copy_from_user(&set_vol, (void __user *)arg,
910 sizeof(set_vol))) {
911 pr_debug("sst: copy failed\n");
912 retval = -EFAULT;
913 break;
914 }
890 pr_debug("sst: SET_VOLUME recieved for %d!\n", 915 pr_debug("sst: SET_VOLUME recieved for %d!\n",
891 rec_vol->stream_id); 916 set_vol.stream_id);
892 if (minor == STREAM_MODULE && rec_vol->stream_id == 0) { 917 if (minor == STREAM_MODULE && set_vol.stream_id == 0) {
893 pr_debug("sst: invalid operation!\n"); 918 pr_debug("sst: invalid operation!\n");
894 retval = -EPERM; 919 retval = -EPERM;
895 break; 920 break;
896 } 921 }
897 set_vol = kzalloc(sizeof(*set_vol), GFP_ATOMIC); 922 retval = sst_set_vol(&set_vol);
898 if (!set_vol) {
899 pr_debug("sst: mem allocation failed\n");
900 retval = -ENOMEM;
901 break;
902 }
903 if (copy_from_user(set_vol, rec_vol, sizeof(*set_vol))) {
904 pr_debug("sst: copy failed\n");
905 retval = -EFAULT;
906 break;
907 }
908 retval = sst_set_vol(set_vol);
909 kfree(set_vol);
910 break; 923 break;
911 } 924 }
912 case _IOC_NR(SNDRV_SST_GET_VOL): { 925 case _IOC_NR(SNDRV_SST_GET_VOL): {
913 struct snd_sst_vol *rec_vol = (struct snd_sst_vol *)arg;
914 struct snd_sst_vol get_vol; 926 struct snd_sst_vol get_vol;
927
928 if (copy_from_user(&get_vol, (void __user *)arg,
929 sizeof(get_vol))) {
930 retval = -EFAULT;
931 break;
932 }
915 pr_debug("sst: IOCTL_GET_VOLUME recieved for stream = %d!\n", 933 pr_debug("sst: IOCTL_GET_VOLUME recieved for stream = %d!\n",
916 rec_vol->stream_id); 934 get_vol.stream_id);
917 if (minor == STREAM_MODULE && rec_vol->stream_id == 0) { 935 if (minor == STREAM_MODULE && get_vol.stream_id == 0) {
918 pr_debug("sst: invalid operation!\n"); 936 pr_debug("sst: invalid operation!\n");
919 retval = -EPERM; 937 retval = -EPERM;
920 break; 938 break;
921 } 939 }
922 get_vol.stream_id = rec_vol->stream_id;
923 retval = sst_get_vol(&get_vol); 940 retval = sst_get_vol(&get_vol);
924 if (retval) { 941 if (retval) {
925 retval = -EIO; 942 retval = -EIO;
@@ -928,7 +945,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
928 pr_debug("sst: id:%d\n, vol:%d, ramp_dur:%d, ramp_type:%d\n", 945 pr_debug("sst: id:%d\n, vol:%d, ramp_dur:%d, ramp_type:%d\n",
929 get_vol.stream_id, get_vol.volume, 946 get_vol.stream_id, get_vol.volume,
930 get_vol.ramp_duration, get_vol.ramp_type); 947 get_vol.ramp_duration, get_vol.ramp_type);
931 if (copy_to_user((struct snd_sst_vol *)arg, 948 if (copy_to_user((struct snd_sst_vol __user *)arg,
932 &get_vol, sizeof(get_vol))) { 949 &get_vol, sizeof(get_vol))) {
933 retval = -EFAULT; 950 retval = -EFAULT;
934 break; 951 break;
@@ -938,25 +955,20 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
938 } 955 }
939 956
940 case _IOC_NR(SNDRV_SST_MUTE): { 957 case _IOC_NR(SNDRV_SST_MUTE): {
941 struct snd_sst_mute *set_mute; 958 struct snd_sst_mute set_mute;
942 struct snd_sst_vol *rec_mute = (struct snd_sst_vol *)arg; 959
943 pr_debug("sst: SNDRV_SST_SET_VOLUME recieved for %d!\n", 960 if (copy_from_user(&set_mute, (void __user *)arg,
944 rec_mute->stream_id); 961 sizeof(set_mute))) {
945 if (minor == STREAM_MODULE && rec_mute->stream_id == 0) { 962 retval = -EFAULT;
946 retval = -EPERM;
947 break;
948 }
949 set_mute = kzalloc(sizeof(*set_mute), GFP_ATOMIC);
950 if (!set_mute) {
951 retval = -ENOMEM;
952 break; 963 break;
953 } 964 }
954 if (copy_from_user(set_mute, rec_mute, sizeof(*set_mute))) { 965 pr_debug("sst: SNDRV_SST_SET_VOLUME recieved for %d!\n",
955 retval = -EFAULT; 966 set_mute.stream_id);
967 if (minor == STREAM_MODULE && set_mute.stream_id == 0) {
968 retval = -EPERM;
956 break; 969 break;
957 } 970 }
958 retval = sst_set_mute(set_mute); 971 retval = sst_set_mute(&set_mute);
959 kfree(set_mute);
960 break; 972 break;
961 } 973 }
962 case _IOC_NR(SNDRV_SST_STREAM_GET_PARAMS): { 974 case _IOC_NR(SNDRV_SST_STREAM_GET_PARAMS): {
@@ -973,7 +985,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
973 retval = -EIO; 985 retval = -EIO;
974 break; 986 break;
975 } 987 }
976 if (copy_to_user((struct snd_sst_get_stream_params *)arg, 988 if (copy_to_user((struct snd_sst_get_stream_params __user *)arg,
977 &get_params, sizeof(get_params))) { 989 &get_params, sizeof(get_params))) {
978 retval = -EFAULT; 990 retval = -EFAULT;
979 break; 991 break;
@@ -983,16 +995,22 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
983 } 995 }
984 996
985 case _IOC_NR(SNDRV_SST_MMAP_PLAY): 997 case _IOC_NR(SNDRV_SST_MMAP_PLAY):
986 case _IOC_NR(SNDRV_SST_MMAP_CAPTURE): 998 case _IOC_NR(SNDRV_SST_MMAP_CAPTURE): {
999 struct snd_sst_mmap_buffs mmap_buf;
1000
987 pr_debug("sst: SNDRV_SST_MMAP_PLAY/CAPTURE recieved!\n"); 1001 pr_debug("sst: SNDRV_SST_MMAP_PLAY/CAPTURE recieved!\n");
988 if (minor != STREAM_MODULE) { 1002 if (minor != STREAM_MODULE) {
989 retval = -EBADRQC; 1003 retval = -EBADRQC;
990 break; 1004 break;
991 } 1005 }
992 retval = intel_sst_mmap_play_capture(str_id, 1006 if (copy_from_user(&mmap_buf, (void __user *)arg,
993 (struct snd_sst_mmap_buffs *)arg); 1007 sizeof(mmap_buf))) {
1008 retval = -EFAULT;
1009 break;
1010 }
1011 retval = intel_sst_mmap_play_capture(str_id, &mmap_buf);
994 break; 1012 break;
995 1013 }
996 case _IOC_NR(SNDRV_SST_STREAM_DROP): 1014 case _IOC_NR(SNDRV_SST_STREAM_DROP):
997 pr_debug("sst: SNDRV_SST_IOCTL_DROP recieved!\n"); 1015 pr_debug("sst: SNDRV_SST_IOCTL_DROP recieved!\n");
998 if (minor != STREAM_MODULE) { 1016 if (minor != STREAM_MODULE) {
@@ -1003,7 +1021,6 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1003 break; 1021 break;
1004 1022
1005 case _IOC_NR(SNDRV_SST_STREAM_GET_TSTAMP): { 1023 case _IOC_NR(SNDRV_SST_STREAM_GET_TSTAMP): {
1006 unsigned long long *ms = (unsigned long long *)arg;
1007 struct snd_sst_tstamp tstamp = {0}; 1024 struct snd_sst_tstamp tstamp = {0};
1008 unsigned long long time, freq, mod; 1025 unsigned long long time, freq, mod;
1009 1026
@@ -1013,14 +1030,14 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1013 break; 1030 break;
1014 } 1031 }
1015 memcpy_fromio(&tstamp, 1032 memcpy_fromio(&tstamp,
1016 ((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP) 1033 sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp),
1017 +(str_id * sizeof(tstamp))),
1018 sizeof(tstamp)); 1034 sizeof(tstamp));
1019 time = tstamp.samples_rendered; 1035 time = tstamp.samples_rendered;
1020 freq = (unsigned long long) tstamp.sampling_frequency; 1036 freq = (unsigned long long) tstamp.sampling_frequency;
1021 time = time * 1000; /* converting it to ms */ 1037 time = time * 1000; /* converting it to ms */
1022 mod = do_div(time, freq); 1038 mod = do_div(time, freq);
1023 if (copy_to_user(ms, &time, sizeof(*ms))) 1039 if (copy_to_user((void __user *)arg, &time,
1040 sizeof(unsigned long long)))
1024 retval = -EFAULT; 1041 retval = -EFAULT;
1025 break; 1042 break;
1026 } 1043 }
@@ -1065,92 +1082,118 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1065 } 1082 }
1066 1083
1067 case _IOC_NR(SNDRV_SST_SET_TARGET_DEVICE): { 1084 case _IOC_NR(SNDRV_SST_SET_TARGET_DEVICE): {
1068 struct snd_sst_target_device *target_device; 1085 struct snd_sst_target_device target_device;
1069 1086
1070 pr_debug("sst: SET_TARGET_DEVICE recieved!\n"); 1087 pr_debug("sst: SET_TARGET_DEVICE recieved!\n");
1071 target_device = (struct snd_sst_target_device *)arg; 1088 if (copy_from_user(&target_device, (void __user *)arg,
1072 BUG_ON(!target_device); 1089 sizeof(target_device))) {
1090 retval = -EFAULT;
1091 break;
1092 }
1073 if (minor != AM_MODULE) { 1093 if (minor != AM_MODULE) {
1074 retval = -EBADRQC; 1094 retval = -EBADRQC;
1075 break; 1095 break;
1076 } 1096 }
1077 retval = sst_target_device_select(target_device); 1097 retval = sst_target_device_select(&target_device);
1078 break; 1098 break;
1079 } 1099 }
1080 1100
1081 case _IOC_NR(SNDRV_SST_DRIVER_INFO): { 1101 case _IOC_NR(SNDRV_SST_DRIVER_INFO): {
1082 struct snd_sst_driver_info *info = 1102 struct snd_sst_driver_info info;
1083 (struct snd_sst_driver_info *)arg;
1084 1103
1085 pr_debug("sst: SNDRV_SST_DRIVER_INFO recived\n"); 1104 pr_debug("sst: SNDRV_SST_DRIVER_INFO recived\n");
1086 info->version = SST_VERSION_NUM; 1105 info.version = SST_VERSION_NUM;
1087 /* hard coding, shud get sumhow later */ 1106 /* hard coding, shud get sumhow later */
1088 info->active_pcm_streams = sst_drv_ctx->stream_cnt - 1107 info.active_pcm_streams = sst_drv_ctx->stream_cnt -
1089 sst_drv_ctx->encoded_cnt; 1108 sst_drv_ctx->encoded_cnt;
1090 info->active_enc_streams = sst_drv_ctx->encoded_cnt; 1109 info.active_enc_streams = sst_drv_ctx->encoded_cnt;
1091 info->max_pcm_streams = MAX_ACTIVE_STREAM - MAX_ENC_STREAM; 1110 info.max_pcm_streams = MAX_ACTIVE_STREAM - MAX_ENC_STREAM;
1092 info->max_enc_streams = MAX_ENC_STREAM; 1111 info.max_enc_streams = MAX_ENC_STREAM;
1093 info->buf_per_stream = sst_drv_ctx->mmap_len; 1112 info.buf_per_stream = sst_drv_ctx->mmap_len;
1113 if (copy_to_user((void __user *)arg, &info,
1114 sizeof(info)))
1115 retval = -EFAULT;
1094 break; 1116 break;
1095 } 1117 }
1096 1118
1097 case _IOC_NR(SNDRV_SST_STREAM_DECODE): { 1119 case _IOC_NR(SNDRV_SST_STREAM_DECODE): {
1098 struct snd_sst_dbufs *param = 1120 struct snd_sst_dbufs param;
1099 (struct snd_sst_dbufs *)arg, dbufs_local; 1121 struct snd_sst_dbufs dbufs_local;
1100 int i;
1101 struct snd_sst_buffs ibufs, obufs; 1122 struct snd_sst_buffs ibufs, obufs;
1102 struct snd_sst_buff_entry ibuf_temp[param->ibufs->entries], 1123 struct snd_sst_buff_entry *ibuf_tmp, *obuf_tmp;
1103 obuf_temp[param->obufs->entries]; 1124 char __user *dest;
1104 1125
1105 pr_debug("sst: SNDRV_SST_STREAM_DECODE recived\n"); 1126 pr_debug("sst: SNDRV_SST_STREAM_DECODE recived\n");
1106 if (minor != STREAM_MODULE) { 1127 if (minor != STREAM_MODULE) {
1107 retval = -EBADRQC; 1128 retval = -EBADRQC;
1108 break; 1129 break;
1109 } 1130 }
1110 if (!param) { 1131 if (copy_from_user(&param, (void __user *)arg,
1111 retval = -EINVAL; 1132 sizeof(param))) {
1133 retval = -EFAULT;
1112 break; 1134 break;
1113 } 1135 }
1114 1136
1115 dbufs_local.input_bytes_consumed = param->input_bytes_consumed; 1137 dbufs_local.input_bytes_consumed = param.input_bytes_consumed;
1116 dbufs_local.output_bytes_produced = 1138 dbufs_local.output_bytes_produced =
1117 param->output_bytes_produced; 1139 param.output_bytes_produced;
1118 dbufs_local.ibufs = &ibufs; 1140
1119 dbufs_local.obufs = &obufs; 1141 if (copy_from_user(&ibufs, (void __user *)param.ibufs, sizeof(ibufs))) {
1120 dbufs_local.ibufs->entries = param->ibufs->entries; 1142 retval = -EFAULT;
1121 dbufs_local.ibufs->type = param->ibufs->type; 1143 break;
1122 dbufs_local.obufs->entries = param->obufs->entries; 1144 }
1123 dbufs_local.obufs->type = param->obufs->type; 1145 if (copy_from_user(&obufs, (void __user *)param.obufs, sizeof(obufs))) {
1124 1146 retval = -EFAULT;
1125 dbufs_local.ibufs->buff_entry = ibuf_temp; 1147 break;
1126 for (i = 0; i < dbufs_local.ibufs->entries; i++) {
1127 ibuf_temp[i].buffer =
1128 param->ibufs->buff_entry[i].buffer;
1129 ibuf_temp[i].size =
1130 param->ibufs->buff_entry[i].size;
1131 } 1148 }
1132 dbufs_local.obufs->buff_entry = obuf_temp; 1149
1133 for (i = 0; i < dbufs_local.obufs->entries; i++) { 1150 ibuf_tmp = kcalloc(ibufs.entries, sizeof(*ibuf_tmp), GFP_KERNEL);
1134 obuf_temp[i].buffer = 1151 obuf_tmp = kcalloc(obufs.entries, sizeof(*obuf_tmp), GFP_KERNEL);
1135 param->obufs->buff_entry[i].buffer; 1152 if (!ibuf_tmp || !obuf_tmp) {
1136 obuf_temp[i].size = 1153 retval = -ENOMEM;
1137 param->obufs->buff_entry[i].size; 1154 goto free_iobufs;
1155 }
1156
1157 if (copy_from_user(ibuf_tmp, (void __user *)ibufs.buff_entry,
1158 ibufs.entries * sizeof(*ibuf_tmp))) {
1159 retval = -EFAULT;
1160 goto free_iobufs;
1138 } 1161 }
1162 ibufs.buff_entry = ibuf_tmp;
1163 dbufs_local.ibufs = &ibufs;
1164
1165 if (copy_from_user(obuf_tmp, (void __user *)obufs.buff_entry,
1166 obufs.entries * sizeof(*obuf_tmp))) {
1167 retval = -EFAULT;
1168 goto free_iobufs;
1169 }
1170 obufs.buff_entry = obuf_tmp;
1171 dbufs_local.obufs = &obufs;
1172
1139 retval = sst_decode(str_id, &dbufs_local); 1173 retval = sst_decode(str_id, &dbufs_local);
1140 if (retval) 1174 if (retval) {
1141 retval = -EAGAIN; 1175 retval = -EAGAIN;
1142 if (copy_to_user(&param->input_bytes_consumed, 1176 goto free_iobufs;
1177 }
1178
1179 dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed);
1180 if (copy_to_user(dest,
1143 &dbufs_local.input_bytes_consumed, 1181 &dbufs_local.input_bytes_consumed,
1144 sizeof(unsigned long long))) { 1182 sizeof(unsigned long long))) {
1145 retval = -EFAULT; 1183 retval = -EFAULT;
1146 break; 1184 goto free_iobufs;
1147 } 1185 }
1148 if (copy_to_user(&param->output_bytes_produced, 1186
1187 dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed);
1188 if (copy_to_user(dest,
1149 &dbufs_local.output_bytes_produced, 1189 &dbufs_local.output_bytes_produced,
1150 sizeof(unsigned long long))) { 1190 sizeof(unsigned long long))) {
1151 retval = -EFAULT; 1191 retval = -EFAULT;
1152 break; 1192 goto free_iobufs;
1153 } 1193 }
1194free_iobufs:
1195 kfree(ibuf_tmp);
1196 kfree(obuf_tmp);
1154 break; 1197 break;
1155 } 1198 }
1156 1199
@@ -1164,7 +1207,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1164 break; 1207 break;
1165 1208
1166 case _IOC_NR(SNDRV_SST_STREAM_BYTES_DECODED): { 1209 case _IOC_NR(SNDRV_SST_STREAM_BYTES_DECODED): {
1167 unsigned long long *bytes = (unsigned long long *)arg; 1210 unsigned long long __user *bytes = (unsigned long long __user *)arg;
1168 struct snd_sst_tstamp tstamp = {0}; 1211 struct snd_sst_tstamp tstamp = {0};
1169 1212
1170 pr_debug("sst: STREAM_BYTES_DECODED recieved!\n"); 1213 pr_debug("sst: STREAM_BYTES_DECODED recieved!\n");
@@ -1173,8 +1216,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1173 break; 1216 break;
1174 } 1217 }
1175 memcpy_fromio(&tstamp, 1218 memcpy_fromio(&tstamp,
1176 ((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP) 1219 sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp),
1177 +(str_id * sizeof(tstamp))),
1178 sizeof(tstamp)); 1220 sizeof(tstamp));
1179 if (copy_to_user(bytes, &tstamp.bytes_processed, 1221 if (copy_to_user(bytes, &tstamp.bytes_processed,
1180 sizeof(*bytes))) 1222 sizeof(*bytes)))
@@ -1197,7 +1239,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
1197 kfree(fw_info); 1239 kfree(fw_info);
1198 break; 1240 break;
1199 } 1241 }
1200 if (copy_to_user((struct snd_sst_dbufs *)arg, 1242 if (copy_to_user((struct snd_sst_dbufs __user *)arg,
1201 fw_info, sizeof(*fw_info))) { 1243 fw_info, sizeof(*fw_info))) {
1202 kfree(fw_info); 1244 kfree(fw_info);
1203 retval = -EFAULT; 1245 retval = -EFAULT;
diff --git a/drivers/staging/intel_sst/intel_sst_common.h b/drivers/staging/intel_sst/intel_sst_common.h
index 73a98c851e4a..bf0ead78bfae 100644
--- a/drivers/staging/intel_sst/intel_sst_common.h
+++ b/drivers/staging/intel_sst/intel_sst_common.h
@@ -231,8 +231,8 @@ struct stream_info {
231 spinlock_t pcm_lock; 231 spinlock_t pcm_lock;
232 bool mmapped; 232 bool mmapped;
233 unsigned int sg_index; /* current buf Index */ 233 unsigned int sg_index; /* current buf Index */
234 unsigned char *cur_ptr; /* Current static bufs */ 234 unsigned char __user *cur_ptr; /* Current static bufs */
235 struct snd_sst_buf_entry *buf_entry; 235 struct snd_sst_buf_entry __user *buf_entry;
236 struct sst_block data_blk; /* stream ops block */ 236 struct sst_block data_blk; /* stream ops block */
237 struct sst_block ctrl_blk; /* stream control cmd block */ 237 struct sst_block ctrl_blk; /* stream control cmd block */
238 enum snd_sst_buf_type buf_type; 238 enum snd_sst_buf_type buf_type;
diff --git a/drivers/staging/keucr/init.c b/drivers/staging/keucr/init.c
index 1934805844f2..978bf87ff13d 100644
--- a/drivers/staging/keucr/init.c
+++ b/drivers/staging/keucr/init.c
@@ -22,7 +22,7 @@ int ENE_InitMedia(struct us_data *us)
22 int result; 22 int result;
23 BYTE MiscReg03 = 0; 23 BYTE MiscReg03 = 0;
24 24
25 printk("--- Initial Nedia ---\n"); 25 printk("--- Init Media ---\n");
26 result = ENE_Read_BYTE(us, REG_CARD_STATUS, &MiscReg03); 26 result = ENE_Read_BYTE(us, REG_CARD_STATUS, &MiscReg03);
27 if (result != USB_STOR_XFER_GOOD) 27 if (result != USB_STOR_XFER_GOOD)
28 { 28 {
@@ -64,7 +64,7 @@ int ENE_Read_BYTE(struct us_data *us, WORD index, void *buf)
64 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; 64 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
65 int result; 65 int result;
66 66
67 memset(bcb, 0, sizeof(bcb)); 67 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
68 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 68 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
69 bcb->DataTransferLength = 0x01; 69 bcb->DataTransferLength = 0x01;
70 bcb->Flags = 0x80; 70 bcb->Flags = 0x80;
@@ -92,7 +92,7 @@ int ENE_SDInit(struct us_data *us)
92 return USB_STOR_TRANSPORT_ERROR; 92 return USB_STOR_TRANSPORT_ERROR;
93 } 93 }
94 94
95 memset(bcb, 0, sizeof(bcb)); 95 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
96 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 96 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
97 bcb->Flags = 0x80; 97 bcb->Flags = 0x80;
98 bcb->CDB[0] = 0xF2; 98 bcb->CDB[0] = 0xF2;
@@ -112,7 +112,7 @@ int ENE_SDInit(struct us_data *us)
112 return USB_STOR_TRANSPORT_ERROR; 112 return USB_STOR_TRANSPORT_ERROR;
113 } 113 }
114 114
115 memset(bcb, 0, sizeof(bcb)); 115 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
116 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 116 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
117 bcb->DataTransferLength = 0x200; 117 bcb->DataTransferLength = 0x200;
118 bcb->Flags = 0x80; 118 bcb->Flags = 0x80;
@@ -161,7 +161,7 @@ int ENE_MSInit(struct us_data *us)
161 return USB_STOR_TRANSPORT_ERROR; 161 return USB_STOR_TRANSPORT_ERROR;
162 } 162 }
163 163
164 memset(bcb, 0, sizeof(bcb)); 164 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
165 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 165 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
166 bcb->DataTransferLength = 0x200; 166 bcb->DataTransferLength = 0x200;
167 bcb->Flags = 0x80; 167 bcb->Flags = 0x80;
@@ -219,7 +219,7 @@ int ENE_SMInit(struct us_data *us)
219 return USB_STOR_TRANSPORT_ERROR; 219 return USB_STOR_TRANSPORT_ERROR;
220 } 220 }
221 221
222 memset(bcb, 0, sizeof(bcb)); 222 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
223 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 223 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
224 bcb->DataTransferLength = 0x200; 224 bcb->DataTransferLength = 0x200;
225 bcb->Flags = 0x80; 225 bcb->Flags = 0x80;
@@ -341,7 +341,7 @@ int ENE_LoadBinCode(struct us_data *us, BYTE flag)
341 break; 341 break;
342 } 342 }
343 343
344 memset(bcb, 0, sizeof(bcb)); 344 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
345 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 345 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
346 bcb->DataTransferLength = 0x800; 346 bcb->DataTransferLength = 0x800;
347 bcb->Flags =0x00; 347 bcb->Flags =0x00;
@@ -433,7 +433,7 @@ int ENE_Read_Data(struct us_data *us, void *buf, unsigned int length)
433 433
434 //printk("transport --- ENE_Read_Data\n"); 434 //printk("transport --- ENE_Read_Data\n");
435 // set up the command wrapper 435 // set up the command wrapper
436 memset(bcb, 0, sizeof(bcb)); 436 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
437 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 437 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
438 bcb->DataTransferLength = length; 438 bcb->DataTransferLength = length;
439 bcb->Flags =0x80; 439 bcb->Flags =0x80;
@@ -470,7 +470,7 @@ int ENE_Write_Data(struct us_data *us, void *buf, unsigned int length)
470 470
471 //printk("transport --- ENE_Write_Data\n"); 471 //printk("transport --- ENE_Write_Data\n");
472 // set up the command wrapper 472 // set up the command wrapper
473 memset(bcb, 0, sizeof(bcb)); 473 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
474 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 474 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
475 bcb->DataTransferLength = length; 475 bcb->DataTransferLength = length;
476 bcb->Flags =0x00; 476 bcb->Flags =0x00;
diff --git a/drivers/staging/keucr/ms.c b/drivers/staging/keucr/ms.c
index d4340a9da87d..9a3fdb4e4fe4 100644
--- a/drivers/staging/keucr/ms.c
+++ b/drivers/staging/keucr/ms.c
@@ -15,7 +15,7 @@ int MS_ReaderCopyBlock(struct us_data *us, WORD oldphy, WORD newphy, WORD PhyBlo
15 if (result != USB_STOR_XFER_GOOD) 15 if (result != USB_STOR_XFER_GOOD)
16 return USB_STOR_TRANSPORT_ERROR; 16 return USB_STOR_TRANSPORT_ERROR;
17 17
18 memset(bcb, 0, sizeof(bcb)); 18 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
19 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 19 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
20 bcb->DataTransferLength = 0x200*len; 20 bcb->DataTransferLength = 0x200*len;
21 bcb->Flags = 0x00; 21 bcb->Flags = 0x00;
@@ -53,7 +53,7 @@ int MS_ReaderReadPage(struct us_data *us, DWORD PhyBlockAddr, BYTE PageNum, PDWO
53 return USB_STOR_TRANSPORT_ERROR; 53 return USB_STOR_TRANSPORT_ERROR;
54 54
55 // Read Page Data 55 // Read Page Data
56 memset(bcb, 0, sizeof(bcb)); 56 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
57 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 57 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
58 bcb->DataTransferLength = 0x200; 58 bcb->DataTransferLength = 0x200;
59 bcb->Flags = 0x80; 59 bcb->Flags = 0x80;
@@ -69,7 +69,7 @@ int MS_ReaderReadPage(struct us_data *us, DWORD PhyBlockAddr, BYTE PageNum, PDWO
69 return USB_STOR_TRANSPORT_ERROR; 69 return USB_STOR_TRANSPORT_ERROR;
70 70
71 // Read Extra Data 71 // Read Extra Data
72 memset(bcb, 0, sizeof(bcb)); 72 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
73 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 73 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
74 bcb->DataTransferLength = 0x4; 74 bcb->DataTransferLength = 0x4;
75 bcb->Flags = 0x80; 75 bcb->Flags = 0x80;
@@ -108,7 +108,7 @@ int MS_ReaderEraseBlock(struct us_data *us, DWORD PhyBlockAddr)
108 if (result != USB_STOR_XFER_GOOD) 108 if (result != USB_STOR_XFER_GOOD)
109 return USB_STOR_TRANSPORT_ERROR; 109 return USB_STOR_TRANSPORT_ERROR;
110 110
111 memset(bcb, 0, sizeof(bcb)); 111 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
112 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 112 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
113 bcb->DataTransferLength = 0x200; 113 bcb->DataTransferLength = 0x200;
114 bcb->Flags = 0x80; 114 bcb->Flags = 0x80;
@@ -673,7 +673,7 @@ int MS_LibReadExtraBlock(struct us_data *us, DWORD PhyBlock, BYTE PageNum, BYTE
673 //printk("MS_LibReadExtraBlock --- PhyBlock = %x, PageNum = %x, blen = %x\n", PhyBlock, PageNum, blen); 673 //printk("MS_LibReadExtraBlock --- PhyBlock = %x, PageNum = %x, blen = %x\n", PhyBlock, PageNum, blen);
674 674
675 // Read Extra Data 675 // Read Extra Data
676 memset(bcb, 0, sizeof(bcb)); 676 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
677 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 677 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
678 bcb->DataTransferLength = 0x4 * blen; 678 bcb->DataTransferLength = 0x4 * blen;
679 bcb->Flags = 0x80; 679 bcb->Flags = 0x80;
@@ -700,7 +700,7 @@ int MS_LibReadExtra(struct us_data *us, DWORD PhyBlock, BYTE PageNum, MS_LibType
700 BYTE ExtBuf[4]; 700 BYTE ExtBuf[4];
701 701
702 //printk("MS_LibReadExtra --- PhyBlock = %x, PageNum = %x\n", PhyBlock, PageNum); 702 //printk("MS_LibReadExtra --- PhyBlock = %x, PageNum = %x\n", PhyBlock, PageNum);
703 memset(bcb, 0, sizeof(bcb)); 703 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
704 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 704 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
705 bcb->DataTransferLength = 0x4; 705 bcb->DataTransferLength = 0x4;
706 bcb->Flags = 0x80; 706 bcb->Flags = 0x80;
@@ -807,7 +807,7 @@ int MS_LibOverwriteExtra(struct us_data *us, DWORD PhyBlockAddr, BYTE PageNum, B
807 if (result != USB_STOR_XFER_GOOD) 807 if (result != USB_STOR_XFER_GOOD)
808 return USB_STOR_TRANSPORT_ERROR; 808 return USB_STOR_TRANSPORT_ERROR;
809 809
810 memset(bcb, 0, sizeof(bcb)); 810 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
811 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 811 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
812 bcb->DataTransferLength = 0x4; 812 bcb->DataTransferLength = 0x4;
813 bcb->Flags = 0x80; 813 bcb->Flags = 0x80;
diff --git a/drivers/staging/keucr/msscsi.c b/drivers/staging/keucr/msscsi.c
index ad0c5c629935..cb92d25acee0 100644
--- a/drivers/staging/keucr/msscsi.c
+++ b/drivers/staging/keucr/msscsi.c
@@ -145,7 +145,7 @@ int MS_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
145 } 145 }
146 146
147 // set up the command wrapper 147 // set up the command wrapper
148 memset(bcb, 0, sizeof(bcb)); 148 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
149 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 149 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
150 bcb->DataTransferLength = blenByte; 150 bcb->DataTransferLength = blenByte;
151 bcb->Flags = 0x80; 151 bcb->Flags = 0x80;
@@ -193,7 +193,7 @@ int MS_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
193 blkno = phyblk * 0x20 + PageNum; 193 blkno = phyblk * 0x20 + PageNum;
194 194
195 // set up the command wrapper 195 // set up the command wrapper
196 memset(bcb, 0, sizeof(bcb)); 196 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
197 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 197 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
198 bcb->DataTransferLength = 0x200 * len; 198 bcb->DataTransferLength = 0x200 * len;
199 bcb->Flags = 0x80; 199 bcb->Flags = 0x80;
@@ -250,7 +250,7 @@ int MS_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb)
250 } 250 }
251 251
252 // set up the command wrapper 252 // set up the command wrapper
253 memset(bcb, 0, sizeof(bcb)); 253 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
254 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 254 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
255 bcb->DataTransferLength = blenByte; 255 bcb->DataTransferLength = blenByte;
256 bcb->Flags = 0x00; 256 bcb->Flags = 0x00;
diff --git a/drivers/staging/keucr/sdscsi.c b/drivers/staging/keucr/sdscsi.c
index 6c332f850ebe..d646507a3611 100644
--- a/drivers/staging/keucr/sdscsi.c
+++ b/drivers/staging/keucr/sdscsi.c
@@ -152,7 +152,7 @@ int SD_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb)
152 bnByte = bn; 152 bnByte = bn;
153 153
154 // set up the command wrapper 154 // set up the command wrapper
155 memset(bcb, 0, sizeof(bcb)); 155 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
156 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 156 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
157 bcb->DataTransferLength = blenByte; 157 bcb->DataTransferLength = blenByte;
158 bcb->Flags = 0x80; 158 bcb->Flags = 0x80;
@@ -192,7 +192,7 @@ int SD_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb)
192 bnByte = bn; 192 bnByte = bn;
193 193
194 // set up the command wrapper 194 // set up the command wrapper
195 memset(bcb, 0, sizeof(bcb)); 195 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
196 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 196 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
197 bcb->DataTransferLength = blenByte; 197 bcb->DataTransferLength = blenByte;
198 bcb->Flags = 0x00; 198 bcb->Flags = 0x00;
diff --git a/drivers/staging/keucr/smilsub.c b/drivers/staging/keucr/smilsub.c
index 844b65988636..1b52535a388f 100644
--- a/drivers/staging/keucr/smilsub.c
+++ b/drivers/staging/keucr/smilsub.c
@@ -266,7 +266,7 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf,BYTE *redundant)
266 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; 266 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
267 267
268 // Read sect data 268 // Read sect data
269 memset(bcb, 0, sizeof(bcb)); 269 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
270 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 270 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
271 bcb->DataTransferLength = 0x200; 271 bcb->DataTransferLength = 0x200;
272 bcb->Flags = 0x80; 272 bcb->Flags = 0x80;
@@ -281,7 +281,7 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf,BYTE *redundant)
281 return USB_STOR_TRANSPORT_ERROR; 281 return USB_STOR_TRANSPORT_ERROR;
282 282
283 // Read redundant 283 // Read redundant
284 memset(bcb, 0, sizeof(bcb)); 284 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
285 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 285 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
286 bcb->DataTransferLength = 0x10; 286 bcb->DataTransferLength = 0x10;
287 bcb->Flags = 0x80; 287 bcb->Flags = 0x80;
@@ -319,7 +319,7 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant)
319 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; 319 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
320 320
321 // Read sect data 321 // Read sect data
322 memset(bcb, 0, sizeof(bcb)); 322 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
323 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 323 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
324 bcb->DataTransferLength = 0x200*count; 324 bcb->DataTransferLength = 0x200*count;
325 bcb->Flags = 0x80; 325 bcb->Flags = 0x80;
@@ -334,7 +334,7 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant)
334 return USB_STOR_TRANSPORT_ERROR; 334 return USB_STOR_TRANSPORT_ERROR;
335 335
336 // Read redundant 336 // Read redundant
337 memset(bcb, 0, sizeof(bcb)); 337 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
338 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 338 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
339 bcb->DataTransferLength = 0x10; 339 bcb->DataTransferLength = 0x10;
340 bcb->Flags = 0x80; 340 bcb->Flags = 0x80;
@@ -536,7 +536,7 @@ int Ssfdc_D_CopyBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant)
536 WriteAddr = WriteAddr*(WORD)Ssfdc.MaxSectors; 536 WriteAddr = WriteAddr*(WORD)Ssfdc.MaxSectors;
537 537
538 // Write sect data 538 // Write sect data
539 memset(bcb, 0, sizeof(bcb)); 539 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
540 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 540 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
541 bcb->DataTransferLength = 0x200*count; 541 bcb->DataTransferLength = 0x200*count;
542 bcb->Flags = 0x00; 542 bcb->Flags = 0x00;
@@ -754,7 +754,7 @@ int Ssfdc_D_WriteSectForCopy(struct us_data *us, BYTE *buf, BYTE *redundant)
754 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; 754 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
755 755
756 // Write sect data 756 // Write sect data
757 memset(bcb, 0, sizeof(bcb)); 757 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
758 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 758 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
759 bcb->DataTransferLength = 0x200; 759 bcb->DataTransferLength = 0x200;
760 bcb->Flags = 0x00; 760 bcb->Flags = 0x00;
@@ -791,7 +791,7 @@ int Ssfdc_D_EraseBlock(struct us_data *us)
791 addr=(WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock; 791 addr=(WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
792 addr=addr*(WORD)Ssfdc.MaxSectors; 792 addr=addr*(WORD)Ssfdc.MaxSectors;
793 793
794 memset(bcb, 0, sizeof(bcb)); 794 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
795 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 795 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
796 bcb->DataTransferLength = 0x200; 796 bcb->DataTransferLength = 0x200;
797 bcb->Flags = 0x80; 797 bcb->Flags = 0x80;
@@ -827,7 +827,7 @@ int Ssfdc_D_ReadRedtData(struct us_data *us, BYTE *redundant)
827 addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock; 827 addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
828 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; 828 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
829 829
830 memset(bcb, 0, sizeof(bcb)); 830 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
831 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 831 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
832 bcb->DataTransferLength = 0x10; 832 bcb->DataTransferLength = 0x10;
833 bcb->Flags = 0x80; 833 bcb->Flags = 0x80;
@@ -870,7 +870,7 @@ int Ssfdc_D_WriteRedtData(struct us_data *us, BYTE *redundant)
870 addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock; 870 addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
871 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; 871 addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
872 872
873 memset(bcb, 0, sizeof(bcb)); 873 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
874 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); 874 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
875 bcb->DataTransferLength = 0x10; 875 bcb->DataTransferLength = 0x10;
876 bcb->Flags = 0x80; 876 bcb->Flags = 0x80;
diff --git a/drivers/staging/keucr/transport.c b/drivers/staging/keucr/transport.c
index fd98df643ab0..111160cce441 100644
--- a/drivers/staging/keucr/transport.c
+++ b/drivers/staging/keucr/transport.c
@@ -40,7 +40,7 @@ static int usb_stor_msg_common(struct us_data *us, int timeout)
40 us->current_urb->error_count = 0; 40 us->current_urb->error_count = 0;
41 us->current_urb->status = 0; 41 us->current_urb->status = 0;
42 42
43// us->current_urb->transfer_flags = URB_NO_SETUP_DMA_MAP; 43 us->current_urb->transfer_flags = 0;
44 if (us->current_urb->transfer_buffer == us->iobuf) 44 if (us->current_urb->transfer_buffer == us->iobuf)
45 us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 45 us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
46 us->current_urb->transfer_dma = us->iobuf_dma; 46 us->current_urb->transfer_dma = us->iobuf_dma;
diff --git a/drivers/staging/rt2860/common/cmm_aes.c b/drivers/staging/rt2860/common/cmm_aes.c
index 1d159ff82fd2..a99879bada42 100644
--- a/drivers/staging/rt2860/common/cmm_aes.c
+++ b/drivers/staging/rt2860/common/cmm_aes.c
@@ -330,8 +330,6 @@ void construct_mic_iv(unsigned char *mic_iv,
330 for (i = 8; i < 14; i++) 330 for (i = 8; i < 14; i++)
331 mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */ 331 mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */
332#endif 332#endif
333 i = (payload_length / 256);
334 i = (payload_length % 256);
335 mic_iv[14] = (unsigned char)(payload_length / 256); 333 mic_iv[14] = (unsigned char)(payload_length / 256);
336 mic_iv[15] = (unsigned char)(payload_length % 256); 334 mic_iv[15] = (unsigned char)(payload_length % 256);
337 335
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
index ebf9074a9083..ddacfc6c4861 100644
--- a/drivers/staging/rt2860/usb_main_dev.c
+++ b/drivers/staging/rt2860/usb_main_dev.c
@@ -65,6 +65,7 @@ struct usb_device_id rtusb_usb_id[] = {
65 {USB_DEVICE(0x14B2, 0x3C07)}, /* AL */ 65 {USB_DEVICE(0x14B2, 0x3C07)}, /* AL */
66 {USB_DEVICE(0x050D, 0x8053)}, /* Belkin */ 66 {USB_DEVICE(0x050D, 0x8053)}, /* Belkin */
67 {USB_DEVICE(0x050D, 0x825B)}, /* Belkin */ 67 {USB_DEVICE(0x050D, 0x825B)}, /* Belkin */
68 {USB_DEVICE(0x050D, 0x935A)}, /* Belkin F6D4050 v1 */
68 {USB_DEVICE(0x050D, 0x935B)}, /* Belkin F6D4050 v2 */ 69 {USB_DEVICE(0x050D, 0x935B)}, /* Belkin F6D4050 v2 */
69 {USB_DEVICE(0x14B2, 0x3C23)}, /* Airlink */ 70 {USB_DEVICE(0x14B2, 0x3C23)}, /* Airlink */
70 {USB_DEVICE(0x14B2, 0x3C27)}, /* Airlink */ 71 {USB_DEVICE(0x14B2, 0x3C27)}, /* Airlink */
diff --git a/drivers/staging/rtl8192e/r8192E_core.c b/drivers/staging/rtl8192e/r8192E_core.c
index a202194b5cbb..b1786dcac245 100644
--- a/drivers/staging/rtl8192e/r8192E_core.c
+++ b/drivers/staging/rtl8192e/r8192E_core.c
@@ -5829,6 +5829,9 @@ static void rtl8192_rx(struct net_device *dev)
5829 } 5829 }
5830 } 5830 }
5831 5831
5832 pci_unmap_single(priv->pdev, *((dma_addr_t *) skb->cb),
5833 priv->rxbuffersize, PCI_DMA_FROMDEVICE);
5834
5832 skb = new_skb; 5835 skb = new_skb;
5833 priv->rx_buf[priv->rx_idx] = skb; 5836 priv->rx_buf[priv->rx_idx] = skb;
5834 *((dma_addr_t *) skb->cb) = pci_map_single(priv->pdev, skb_tail_pointer(skb), priv->rxbuffersize, PCI_DMA_FROMDEVICE); 5837 *((dma_addr_t *) skb->cb) = pci_map_single(priv->pdev, skb_tail_pointer(skb), priv->rxbuffersize, PCI_DMA_FROMDEVICE);
diff --git a/drivers/staging/stradis/stradis.c b/drivers/staging/stradis/stradis.c
index a057824e7ebc..807dd7eb748f 100644
--- a/drivers/staging/stradis/stradis.c
+++ b/drivers/staging/stradis/stradis.c
@@ -1286,6 +1286,7 @@ static long saa_ioctl(struct file *file,
1286 case VIDIOCGCAP: 1286 case VIDIOCGCAP:
1287 { 1287 {
1288 struct video_capability b; 1288 struct video_capability b;
1289 memset(&b, 0, sizeof(b));
1289 strcpy(b.name, saa->video_dev.name); 1290 strcpy(b.name, saa->video_dev.name);
1290 b.type = VID_TYPE_CAPTURE | VID_TYPE_OVERLAY | 1291 b.type = VID_TYPE_CAPTURE | VID_TYPE_OVERLAY |
1291 VID_TYPE_CLIPPING | VID_TYPE_FRAMERAM | 1292 VID_TYPE_CLIPPING | VID_TYPE_FRAMERAM |
@@ -1416,6 +1417,7 @@ static long saa_ioctl(struct file *file,
1416 case VIDIOCGWIN: 1417 case VIDIOCGWIN:
1417 { 1418 {
1418 struct video_window vw; 1419 struct video_window vw;
1420 memset(&vw, 0, sizeof(vw));
1419 vw.x = saa->win.x; 1421 vw.x = saa->win.x;
1420 vw.y = saa->win.y; 1422 vw.y = saa->win.y;
1421 vw.width = saa->win.width; 1423 vw.width = saa->win.width;
@@ -1448,6 +1450,7 @@ static long saa_ioctl(struct file *file,
1448 case VIDIOCGFBUF: 1450 case VIDIOCGFBUF:
1449 { 1451 {
1450 struct video_buffer v; 1452 struct video_buffer v;
1453 memset(&v, 0, sizeof(v));
1451 v.base = (void *)saa->win.vidadr; 1454 v.base = (void *)saa->win.vidadr;
1452 v.height = saa->win.sheight; 1455 v.height = saa->win.sheight;
1453 v.width = saa->win.swidth; 1456 v.width = saa->win.swidth;
@@ -1492,6 +1495,7 @@ static long saa_ioctl(struct file *file,
1492 case VIDIOCGAUDIO: 1495 case VIDIOCGAUDIO:
1493 { 1496 {
1494 struct video_audio v; 1497 struct video_audio v;
1498 memset(&v, 0, sizeof(v));
1495 v = saa->audio_dev; 1499 v = saa->audio_dev;
1496 v.flags &= ~(VIDEO_AUDIO_MUTE | VIDEO_AUDIO_MUTABLE); 1500 v.flags &= ~(VIDEO_AUDIO_MUTE | VIDEO_AUDIO_MUTABLE);
1497 v.flags |= VIDEO_AUDIO_MUTABLE | VIDEO_AUDIO_VOLUME; 1501 v.flags |= VIDEO_AUDIO_MUTABLE | VIDEO_AUDIO_VOLUME;
@@ -1534,6 +1538,7 @@ static long saa_ioctl(struct file *file,
1534 case VIDIOCGUNIT: 1538 case VIDIOCGUNIT:
1535 { 1539 {
1536 struct video_unit vu; 1540 struct video_unit vu;
1541 memset(&vu, 0, sizeof(vu));
1537 vu.video = saa->video_dev.minor; 1542 vu.video = saa->video_dev.minor;
1538 vu.vbi = VIDEO_NO_UNIT; 1543 vu.vbi = VIDEO_NO_UNIT;
1539 vu.radio = VIDEO_NO_UNIT; 1544 vu.radio = VIDEO_NO_UNIT;
@@ -1888,6 +1893,7 @@ static int saa_open(struct file *file)
1888 1893
1889 saa->user++; 1894 saa->user++;
1890 if (saa->user > 1) { 1895 if (saa->user > 1) {
1896 saa->user--;
1891 unlock_kernel(); 1897 unlock_kernel();
1892 return 0; /* device open already, don't reset */ 1898 return 0; /* device open already, don't reset */
1893 } 1899 }
@@ -2000,10 +2006,13 @@ static int __devinit configure_saa7146(struct pci_dev *pdev, int num)
2000 if (retval < 0) { 2006 if (retval < 0) {
2001 dev_err(&pdev->dev, "%d: error in registering video device!\n", 2007 dev_err(&pdev->dev, "%d: error in registering video device!\n",
2002 num); 2008 num);
2003 goto errio; 2009 goto errirq;
2004 } 2010 }
2005 2011
2006 return 0; 2012 return 0;
2013
2014errirq:
2015 free_irq(saa->irq, saa);
2007errio: 2016errio:
2008 iounmap(saa->saa7146_mem); 2017 iounmap(saa->saa7146_mem);
2009err: 2018err:
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index ff64d464143c..93de4f2e8bf8 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -6,7 +6,6 @@ menuconfig TIDSPBRIDGE
6 tristate "DSP Bridge driver" 6 tristate "DSP Bridge driver"
7 depends on ARCH_OMAP3 7 depends on ARCH_OMAP3
8 select OMAP_MBOX_FWK 8 select OMAP_MBOX_FWK
9 select OMAP_IOMMU
10 help 9 help
11 DSP/BIOS Bridge is designed for platforms that contain a GPP and 10 DSP/BIOS Bridge is designed for platforms that contain a GPP and
12 one or more attached DSPs. The GPP is considered the master or 11 one or more attached DSPs. The GPP is considered the master or
diff --git a/drivers/staging/tidspbridge/Makefile b/drivers/staging/tidspbridge/Makefile
index 50decc2935c5..41c644c3318f 100644
--- a/drivers/staging/tidspbridge/Makefile
+++ b/drivers/staging/tidspbridge/Makefile
@@ -2,18 +2,19 @@ obj-$(CONFIG_TIDSPBRIDGE) += bridgedriver.o
2 2
3libgen = gen/gb.o gen/gs.o gen/gh.o gen/uuidutil.o 3libgen = gen/gb.o gen/gs.o gen/gh.o gen/uuidutil.o
4libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \ 4libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \
5 core/tiomap3430_pwr.o core/tiomap_io.o core/dsp-mmu.o \ 5 core/tiomap3430_pwr.o core/tiomap_io.o \
6 core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o 6 core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o
7libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/dspapi.o \ 7libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/dspapi.o \
8 pmgr/cmm.o pmgr/dbll.o 8 pmgr/dmm.o pmgr/cmm.o pmgr/dbll.o
9librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \ 9librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \
10 rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \ 10 rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \
11 rmgr/nldr.o rmgr/drv_interface.o 11 rmgr/nldr.o rmgr/drv_interface.o
12libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \ 12libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \
13 dynload/tramp.o 13 dynload/tramp.o
14libhw = hw/hw_mmu.o
14 15
15bridgedriver-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \ 16bridgedriver-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \
16 $(libdload) 17 $(libdload) $(libhw)
17 18
18#Machine dependent 19#Machine dependent
19ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \ 20ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \
diff --git a/drivers/staging/tidspbridge/core/_deh.h b/drivers/staging/tidspbridge/core/_deh.h
index 8ae263387a87..16723cd34831 100644
--- a/drivers/staging/tidspbridge/core/_deh.h
+++ b/drivers/staging/tidspbridge/core/_deh.h
@@ -27,8 +27,9 @@
27struct deh_mgr { 27struct deh_mgr {
28 struct bridge_dev_context *hbridge_context; /* Bridge context. */ 28 struct bridge_dev_context *hbridge_context; /* Bridge context. */
29 struct ntfy_object *ntfy_obj; /* NTFY object */ 29 struct ntfy_object *ntfy_obj; /* NTFY object */
30};
31 30
32int mmu_fault_isr(struct iommu *mmu); 31 /* MMU Fault DPC */
32 struct tasklet_struct dpc_tasklet;
33};
33 34
34#endif /* _DEH_ */ 35#endif /* _DEH_ */
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h
index e0a801c1cb98..1c1f157e167a 100644
--- a/drivers/staging/tidspbridge/core/_tiomap.h
+++ b/drivers/staging/tidspbridge/core/_tiomap.h
@@ -23,8 +23,8 @@
23#include <plat/clockdomain.h> 23#include <plat/clockdomain.h>
24#include <mach-omap2/prm-regbits-34xx.h> 24#include <mach-omap2/prm-regbits-34xx.h>
25#include <mach-omap2/cm-regbits-34xx.h> 25#include <mach-omap2/cm-regbits-34xx.h>
26#include <dspbridge/dsp-mmu.h>
27#include <dspbridge/devdefs.h> 26#include <dspbridge/devdefs.h>
27#include <hw_defs.h>
28#include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */ 28#include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */
29#include <dspbridge/sync.h> 29#include <dspbridge/sync.h>
30#include <dspbridge/clk.h> 30#include <dspbridge/clk.h>
@@ -306,18 +306,6 @@ static const struct bpwr_clk_t bpwr_clks[] = {
306 306
307#define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index))) 307#define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index)))
308 308
309struct shm_segs {
310 u32 seg0_da;
311 u32 seg0_pa;
312 u32 seg0_va;
313 u32 seg0_size;
314 u32 seg1_da;
315 u32 seg1_pa;
316 u32 seg1_va;
317 u32 seg1_size;
318};
319
320
321/* This Bridge driver's device context: */ 309/* This Bridge driver's device context: */
322struct bridge_dev_context { 310struct bridge_dev_context {
323 struct dev_object *hdev_obj; /* Handle to Bridge device object. */ 311 struct dev_object *hdev_obj; /* Handle to Bridge device object. */
@@ -328,6 +316,7 @@ struct bridge_dev_context {
328 */ 316 */
329 u32 dw_dsp_ext_base_addr; /* See the comment above */ 317 u32 dw_dsp_ext_base_addr; /* See the comment above */
330 u32 dw_api_reg_base; /* API mem map'd registers */ 318 u32 dw_api_reg_base; /* API mem map'd registers */
319 void __iomem *dw_dsp_mmu_base; /* DSP MMU Mapped registers */
331 u32 dw_api_clk_base; /* CLK Registers */ 320 u32 dw_api_clk_base; /* CLK Registers */
332 u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */ 321 u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */
333 u32 dw_public_rhea; /* Pub Rhea */ 322 u32 dw_public_rhea; /* Pub Rhea */
@@ -339,8 +328,7 @@ struct bridge_dev_context {
339 u32 dw_internal_size; /* Internal memory size */ 328 u32 dw_internal_size; /* Internal memory size */
340 329
341 struct omap_mbox *mbox; /* Mail box handle */ 330 struct omap_mbox *mbox; /* Mail box handle */
342 struct iommu *dsp_mmu; /* iommu for iva2 handler */ 331
343 struct shm_segs sh_s;
344 struct cfg_hostres *resources; /* Host Resources */ 332 struct cfg_hostres *resources; /* Host Resources */
345 333
346 /* 334 /*
@@ -353,6 +341,7 @@ struct bridge_dev_context {
353 341
354 /* TC Settings */ 342 /* TC Settings */
355 bool tc_word_swap_on; /* Traffic Controller Word Swap */ 343 bool tc_word_swap_on; /* Traffic Controller Word Swap */
344 struct pg_table_attrs *pt_attrs;
356 u32 dsp_per_clks; 345 u32 dsp_per_clks;
357}; 346};
358 347
diff --git a/drivers/staging/tidspbridge/core/dsp-mmu.c b/drivers/staging/tidspbridge/core/dsp-mmu.c
deleted file mode 100644
index 983c95adc8ff..000000000000
--- a/drivers/staging/tidspbridge/core/dsp-mmu.c
+++ /dev/null
@@ -1,317 +0,0 @@
1/*
2 * dsp-mmu.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * DSP iommu.
7 *
8 * Copyright (C) 2010 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#include <dspbridge/host_os.h>
20#include <plat/dmtimer.h>
21#include <dspbridge/dbdefs.h>
22#include <dspbridge/dev.h>
23#include <dspbridge/io_sm.h>
24#include <dspbridge/dspdeh.h>
25#include "_tiomap.h"
26
27#include <dspbridge/dsp-mmu.h>
28
29#define MMU_CNTL_TWL_EN (1 << 2)
30
31static struct tasklet_struct mmu_tasklet;
32
33#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
34static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
35{
36 void *dummy_addr;
37 u32 fa, tmp;
38 struct iotlb_entry e;
39 struct iommu *mmu = dev_context->dsp_mmu;
40 dummy_addr = (void *)__get_free_page(GFP_ATOMIC);
41
42 /*
43 * Before acking the MMU fault, let's make sure MMU can only
44 * access entry #0. Then add a new entry so that the DSP OS
45 * can continue in order to dump the stack.
46 */
47 tmp = iommu_read_reg(mmu, MMU_CNTL);
48 tmp &= ~MMU_CNTL_TWL_EN;
49 iommu_write_reg(mmu, tmp, MMU_CNTL);
50 fa = iommu_read_reg(mmu, MMU_FAULT_AD);
51 e.da = fa & PAGE_MASK;
52 e.pa = virt_to_phys(dummy_addr);
53 e.valid = 1;
54 e.prsvd = 1;
55 e.pgsz = IOVMF_PGSZ_4K & MMU_CAM_PGSZ_MASK;
56 e.endian = MMU_RAM_ENDIAN_LITTLE;
57 e.elsz = MMU_RAM_ELSZ_32;
58 e.mixed = 0;
59
60 load_iotlb_entry(mmu, &e);
61
62 dsp_clk_enable(DSP_CLK_GPT8);
63
64 dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
65
66 /* Clear MMU interrupt */
67 tmp = iommu_read_reg(mmu, MMU_IRQSTATUS);
68 iommu_write_reg(mmu, tmp, MMU_IRQSTATUS);
69
70 dump_dsp_stack(dev_context);
71 dsp_clk_disable(DSP_CLK_GPT8);
72
73 iopgtable_clear_entry(mmu, fa);
74 free_page((unsigned long)dummy_addr);
75}
76#endif
77
78
79static void fault_tasklet(unsigned long data)
80{
81 struct iommu *mmu = (struct iommu *)data;
82 struct bridge_dev_context *dev_ctx;
83 struct deh_mgr *dm;
84 u32 fa;
85 dev_get_deh_mgr(dev_get_first(), &dm);
86 dev_get_bridge_context(dev_get_first(), &dev_ctx);
87
88 if (!dm || !dev_ctx)
89 return;
90
91 fa = iommu_read_reg(mmu, MMU_FAULT_AD);
92
93#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
94 print_dsp_trace_buffer(dev_ctx);
95 dump_dl_modules(dev_ctx);
96 mmu_fault_print_stack(dev_ctx);
97#endif
98
99 bridge_deh_notify(dm, DSP_MMUFAULT, fa);
100}
101
102/*
103 * ======== mmu_fault_isr ========
104 * ISR to be triggered by a DSP MMU fault interrupt.
105 */
106static int mmu_fault_callback(struct iommu *mmu)
107{
108 if (!mmu)
109 return -EPERM;
110
111 iommu_write_reg(mmu, 0, MMU_IRQENABLE);
112 tasklet_schedule(&mmu_tasklet);
113 return 0;
114}
115
116/**
117 * dsp_mmu_init() - initialize dsp_mmu module and returns a handle
118 *
119 * This function initialize dsp mmu module and returns a struct iommu
120 * handle to use it for dsp maps.
121 *
122 */
123struct iommu *dsp_mmu_init()
124{
125 struct iommu *mmu;
126
127 mmu = iommu_get("iva2");
128
129 if (!IS_ERR(mmu)) {
130 tasklet_init(&mmu_tasklet, fault_tasklet, (unsigned long)mmu);
131 mmu->isr = mmu_fault_callback;
132 }
133
134 return mmu;
135}
136
137/**
138 * dsp_mmu_exit() - destroy dsp mmu module
139 * @mmu: Pointer to iommu handle.
140 *
141 * This function destroys dsp mmu module.
142 *
143 */
144void dsp_mmu_exit(struct iommu *mmu)
145{
146 if (mmu)
147 iommu_put(mmu);
148 tasklet_kill(&mmu_tasklet);
149}
150
151/**
152 * user_va2_pa() - get physical address from userspace address.
153 * @mm: mm_struct Pointer of the process.
154 * @address: Virtual user space address.
155 *
156 */
157static u32 user_va2_pa(struct mm_struct *mm, u32 address)
158{
159 pgd_t *pgd;
160 pmd_t *pmd;
161 pte_t *ptep, pte;
162
163 pgd = pgd_offset(mm, address);
164 if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
165 pmd = pmd_offset(pgd, address);
166 if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
167 ptep = pte_offset_map(pmd, address);
168 if (ptep) {
169 pte = *ptep;
170 if (pte_present(pte))
171 return pte & PAGE_MASK;
172 }
173 }
174 }
175
176 return 0;
177}
178
179/**
180 * get_io_pages() - pin and get pages of io user's buffer.
181 * @mm: mm_struct Pointer of the process.
182 * @uva: Virtual user space address.
183 * @pages Pages to be pined.
184 * @usr_pgs struct page array pointer where the user pages will be stored
185 *
186 */
187static int get_io_pages(struct mm_struct *mm, u32 uva, unsigned pages,
188 struct page **usr_pgs)
189{
190 u32 pa;
191 int i;
192 struct page *pg;
193
194 for (i = 0; i < pages; i++) {
195 pa = user_va2_pa(mm, uva);
196
197 if (!pfn_valid(__phys_to_pfn(pa)))
198 break;
199
200 pg = phys_to_page(pa);
201 usr_pgs[i] = pg;
202 get_page(pg);
203 }
204 return i;
205}
206
207/**
208 * user_to_dsp_map() - maps user to dsp virtual address
209 * @mmu: Pointer to iommu handle.
210 * @uva: Virtual user space address.
211 * @da DSP address
212 * @size Buffer size to map.
213 * @usr_pgs struct page array pointer where the user pages will be stored
214 *
215 * This function maps a user space buffer into DSP virtual address.
216 *
217 */
218u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
219 struct page **usr_pgs)
220{
221 int res, w;
222 unsigned pages;
223 int i;
224 struct vm_area_struct *vma;
225 struct mm_struct *mm = current->mm;
226 struct sg_table *sgt;
227 struct scatterlist *sg;
228
229 if (!size || !usr_pgs)
230 return -EINVAL;
231
232 pages = size / PG_SIZE4K;
233
234 down_read(&mm->mmap_sem);
235 vma = find_vma(mm, uva);
236 while (vma && (uva + size > vma->vm_end))
237 vma = find_vma(mm, vma->vm_end + 1);
238
239 if (!vma) {
240 pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
241 __func__, uva, size);
242 up_read(&mm->mmap_sem);
243 return -EINVAL;
244 }
245 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
246 w = 1;
247
248 if (vma->vm_flags & VM_IO)
249 i = get_io_pages(mm, uva, pages, usr_pgs);
250 else
251 i = get_user_pages(current, mm, uva, pages, w, 1,
252 usr_pgs, NULL);
253 up_read(&mm->mmap_sem);
254
255 if (i < 0)
256 return i;
257
258 if (i < pages) {
259 res = -EFAULT;
260 goto err_pages;
261 }
262
263 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
264 if (!sgt) {
265 res = -ENOMEM;
266 goto err_pages;
267 }
268
269 res = sg_alloc_table(sgt, pages, GFP_KERNEL);
270
271 if (res < 0)
272 goto err_sg;
273
274 for_each_sg(sgt->sgl, sg, sgt->nents, i)
275 sg_set_page(sg, usr_pgs[i], PAGE_SIZE, 0);
276
277 da = iommu_vmap(mmu, da, sgt, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
278
279 if (!IS_ERR_VALUE(da))
280 return da;
281 res = (int)da;
282
283 sg_free_table(sgt);
284err_sg:
285 kfree(sgt);
286 i = pages;
287err_pages:
288 while (i--)
289 put_page(usr_pgs[i]);
290 return res;
291}
292
293/**
294 * user_to_dsp_unmap() - unmaps DSP virtual buffer.
295 * @mmu: Pointer to iommu handle.
296 * @da DSP address
297 *
298 * This function unmaps a user space buffer into DSP virtual address.
299 *
300 */
301int user_to_dsp_unmap(struct iommu *mmu, u32 da)
302{
303 unsigned i;
304 struct sg_table *sgt;
305 struct scatterlist *sg;
306
307 sgt = iommu_vunmap(mmu, da);
308 if (!sgt)
309 return -EFAULT;
310
311 for_each_sg(sgt->sgl, sg, sgt->nents, i)
312 put_page(sg_page(sg));
313 sg_free_table(sgt);
314 kfree(sgt);
315
316 return 0;
317}
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index 194badaba0ed..571864555ddd 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -39,6 +39,10 @@
39#include <dspbridge/ntfy.h> 39#include <dspbridge/ntfy.h>
40#include <dspbridge/sync.h> 40#include <dspbridge/sync.h>
41 41
42/* Hardware Abstraction Layer */
43#include <hw_defs.h>
44#include <hw_mmu.h>
45
42/* Bridge Driver */ 46/* Bridge Driver */
43#include <dspbridge/dspdeh.h> 47#include <dspbridge/dspdeh.h>
44#include <dspbridge/dspio.h> 48#include <dspbridge/dspio.h>
@@ -287,7 +291,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
287 struct cod_manager *cod_man; 291 struct cod_manager *cod_man;
288 struct chnl_mgr *hchnl_mgr; 292 struct chnl_mgr *hchnl_mgr;
289 struct msg_mgr *hmsg_mgr; 293 struct msg_mgr *hmsg_mgr;
290 struct shm_segs *sm_sg;
291 u32 ul_shm_base; 294 u32 ul_shm_base;
292 u32 ul_shm_base_offset; 295 u32 ul_shm_base_offset;
293 u32 ul_shm_limit; 296 u32 ul_shm_limit;
@@ -310,9 +313,18 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
310 struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB]; 313 struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
311 struct cfg_hostres *host_res; 314 struct cfg_hostres *host_res;
312 struct bridge_dev_context *pbridge_context; 315 struct bridge_dev_context *pbridge_context;
316 u32 map_attrs;
313 u32 shm0_end; 317 u32 shm0_end;
314 u32 ul_dyn_ext_base; 318 u32 ul_dyn_ext_base;
315 u32 ul_seg1_size = 0; 319 u32 ul_seg1_size = 0;
320 u32 pa_curr = 0;
321 u32 va_curr = 0;
322 u32 gpp_va_curr = 0;
323 u32 num_bytes = 0;
324 u32 all_bits = 0;
325 u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
326 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
327 };
316 328
317 status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context); 329 status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context);
318 if (!pbridge_context) { 330 if (!pbridge_context) {
@@ -325,8 +337,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
325 status = -EFAULT; 337 status = -EFAULT;
326 goto func_end; 338 goto func_end;
327 } 339 }
328 sm_sg = &pbridge_context->sh_s;
329
330 status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man); 340 status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man);
331 if (!cod_man) { 341 if (!cod_man) {
332 status = -EFAULT; 342 status = -EFAULT;
@@ -461,14 +471,129 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
461 if (status) 471 if (status)
462 goto func_end; 472 goto func_end;
463 473
464 sm_sg->seg1_pa = ul_gpp_pa; 474 pa_curr = ul_gpp_pa;
465 sm_sg->seg1_da = ul_dyn_ext_base; 475 va_curr = ul_dyn_ext_base * hio_mgr->word_size;
466 sm_sg->seg1_va = ul_gpp_va; 476 gpp_va_curr = ul_gpp_va;
467 sm_sg->seg1_size = ul_seg1_size; 477 num_bytes = ul_seg1_size;
468 sm_sg->seg0_pa = ul_gpp_pa + ul_pad_size + ul_seg1_size; 478
469 sm_sg->seg0_da = ul_dsp_va; 479 /*
470 sm_sg->seg0_va = ul_gpp_va + ul_pad_size + ul_seg1_size; 480 * Try to fit into TLB entries. If not possible, push them to page
471 sm_sg->seg0_size = ul_seg_size; 481 * tables. It is quite possible that if sections are not on
482 * bigger page boundary, we may end up making several small pages.
483 * So, push them onto page tables, if that is the case.
484 */
485 map_attrs = 0x00000000;
486 map_attrs = DSP_MAPLITTLEENDIAN;
487 map_attrs |= DSP_MAPPHYSICALADDR;
488 map_attrs |= DSP_MAPELEMSIZE32;
489 map_attrs |= DSP_MAPDONOTLOCK;
490
491 while (num_bytes) {
492 /*
493 * To find the max. page size with which both PA & VA are
494 * aligned.
495 */
496 all_bits = pa_curr | va_curr;
497 dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, "
498 "num_bytes %x\n", all_bits, pa_curr, va_curr,
499 num_bytes);
500 for (i = 0; i < 4; i++) {
501 if ((num_bytes >= page_size[i]) && ((all_bits &
502 (page_size[i] -
503 1)) == 0)) {
504 status =
505 hio_mgr->intf_fxns->
506 pfn_brd_mem_map(hio_mgr->hbridge_context,
507 pa_curr, va_curr,
508 page_size[i], map_attrs,
509 NULL);
510 if (status)
511 goto func_end;
512 pa_curr += page_size[i];
513 va_curr += page_size[i];
514 gpp_va_curr += page_size[i];
515 num_bytes -= page_size[i];
516 /*
517 * Don't try smaller sizes. Hopefully we have
518 * reached an address aligned to a bigger page
519 * size.
520 */
521 break;
522 }
523 }
524 }
525 pa_curr += ul_pad_size;
526 va_curr += ul_pad_size;
527 gpp_va_curr += ul_pad_size;
528
529 /* Configure the TLB entries for the next cacheable segment */
530 num_bytes = ul_seg_size;
531 va_curr = ul_dsp_va * hio_mgr->word_size;
532 while (num_bytes) {
533 /*
534 * To find the max. page size with which both PA & VA are
535 * aligned.
536 */
537 all_bits = pa_curr | va_curr;
538 dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, "
539 "va_curr %x, num_bytes %x\n", all_bits, pa_curr,
540 va_curr, num_bytes);
541 for (i = 0; i < 4; i++) {
542 if (!(num_bytes >= page_size[i]) ||
543 !((all_bits & (page_size[i] - 1)) == 0))
544 continue;
545 if (ndx < MAX_LOCK_TLB_ENTRIES) {
546 /*
547 * This is the physical address written to
548 * DSP MMU.
549 */
550 ae_proc[ndx].ul_gpp_pa = pa_curr;
551 /*
552 * This is the virtual uncached ioremapped
553 * address!!!
554 */
555 ae_proc[ndx].ul_gpp_va = gpp_va_curr;
556 ae_proc[ndx].ul_dsp_va =
557 va_curr / hio_mgr->word_size;
558 ae_proc[ndx].ul_size = page_size[i];
559 ae_proc[ndx].endianism = HW_LITTLE_ENDIAN;
560 ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT;
561 ae_proc[ndx].mixed_mode = HW_MMU_CPUES;
562 dev_dbg(bridge, "shm MMU TLB entry PA %x"
563 " VA %x DSP_VA %x Size %x\n",
564 ae_proc[ndx].ul_gpp_pa,
565 ae_proc[ndx].ul_gpp_va,
566 ae_proc[ndx].ul_dsp_va *
567 hio_mgr->word_size, page_size[i]);
568 ndx++;
569 } else {
570 status =
571 hio_mgr->intf_fxns->
572 pfn_brd_mem_map(hio_mgr->hbridge_context,
573 pa_curr, va_curr,
574 page_size[i], map_attrs,
575 NULL);
576 dev_dbg(bridge,
577 "shm MMU PTE entry PA %x"
578 " VA %x DSP_VA %x Size %x\n",
579 ae_proc[ndx].ul_gpp_pa,
580 ae_proc[ndx].ul_gpp_va,
581 ae_proc[ndx].ul_dsp_va *
582 hio_mgr->word_size, page_size[i]);
583 if (status)
584 goto func_end;
585 }
586 pa_curr += page_size[i];
587 va_curr += page_size[i];
588 gpp_va_curr += page_size[i];
589 num_bytes -= page_size[i];
590 /*
591 * Don't try smaller sizes. Hopefully we have reached
592 * an address aligned to a bigger page size.
593 */
594 break;
595 }
596 }
472 597
473 /* 598 /*
474 * Copy remaining entries from CDB. All entries are 1 MB and 599 * Copy remaining entries from CDB. All entries are 1 MB and
@@ -509,12 +634,38 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
509 "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa, 634 "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa,
510 ae_proc[ndx].ul_dsp_va); 635 ae_proc[ndx].ul_dsp_va);
511 ndx++; 636 ndx++;
637 } else {
638 status = hio_mgr->intf_fxns->pfn_brd_mem_map
639 (hio_mgr->hbridge_context,
640 hio_mgr->ext_proc_info.ty_tlb[i].
641 ul_gpp_phys,
642 hio_mgr->ext_proc_info.ty_tlb[i].
643 ul_dsp_virt, 0x100000, map_attrs,
644 NULL);
512 } 645 }
513 } 646 }
514 if (status) 647 if (status)
515 goto func_end; 648 goto func_end;
516 } 649 }
517 650
651 map_attrs = 0x00000000;
652 map_attrs = DSP_MAPLITTLEENDIAN;
653 map_attrs |= DSP_MAPPHYSICALADDR;
654 map_attrs |= DSP_MAPELEMSIZE32;
655 map_attrs |= DSP_MAPDONOTLOCK;
656
657 /* Map the L4 peripherals */
658 i = 0;
659 while (l4_peripheral_table[i].phys_addr) {
660 status = hio_mgr->intf_fxns->pfn_brd_mem_map
661 (hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr,
662 l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
663 map_attrs, NULL);
664 if (status)
665 goto func_end;
666 i++;
667 }
668
518 for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) { 669 for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
519 ae_proc[i].ul_dsp_va = 0; 670 ae_proc[i].ul_dsp_va = 0;
520 ae_proc[i].ul_gpp_pa = 0; 671 ae_proc[i].ul_gpp_pa = 0;
@@ -537,12 +688,12 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
537 status = -EFAULT; 688 status = -EFAULT;
538 goto func_end; 689 goto func_end;
539 } else { 690 } else {
540 if (sm_sg->seg0_da > ul_shm_base) { 691 if (ae_proc[0].ul_dsp_va > ul_shm_base) {
541 status = -EPERM; 692 status = -EPERM;
542 goto func_end; 693 goto func_end;
543 } 694 }
544 /* ul_shm_base may not be at ul_dsp_va address */ 695 /* ul_shm_base may not be at ul_dsp_va address */
545 ul_shm_base_offset = (ul_shm_base - sm_sg->seg0_da) * 696 ul_shm_base_offset = (ul_shm_base - ae_proc[0].ul_dsp_va) *
546 hio_mgr->word_size; 697 hio_mgr->word_size;
547 /* 698 /*
548 * bridge_dev_ctrl() will set dev context dsp-mmu info. In 699 * bridge_dev_ctrl() will set dev context dsp-mmu info. In
@@ -566,7 +717,8 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
566 goto func_end; 717 goto func_end;
567 } 718 }
568 /* Register SM */ 719 /* Register SM */
569 status = register_shm_segs(hio_mgr, cod_man, sm_sg->seg0_pa); 720 status =
721 register_shm_segs(hio_mgr, cod_man, ae_proc[0].ul_gpp_pa);
570 } 722 }
571 723
572 hio_mgr->shared_mem = (struct shm *)ul_shm_base; 724 hio_mgr->shared_mem = (struct shm *)ul_shm_base;
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index f22bc12bc0d3..1be081f917a7 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -23,7 +23,6 @@
23#include <dspbridge/host_os.h> 23#include <dspbridge/host_os.h>
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/mmzone.h> 25#include <linux/mmzone.h>
26#include <plat/control.h>
27 26
28/* ----------------------------------- DSP/BIOS Bridge */ 27/* ----------------------------------- DSP/BIOS Bridge */
29#include <dspbridge/dbdefs.h> 28#include <dspbridge/dbdefs.h>
@@ -35,6 +34,10 @@
35#include <dspbridge/drv.h> 34#include <dspbridge/drv.h>
36#include <dspbridge/sync.h> 35#include <dspbridge/sync.h>
37 36
37/* ------------------------------------ Hardware Abstraction Layer */
38#include <hw_defs.h>
39#include <hw_mmu.h>
40
38/* ----------------------------------- Link Driver */ 41/* ----------------------------------- Link Driver */
39#include <dspbridge/dspdefs.h> 42#include <dspbridge/dspdefs.h>
40#include <dspbridge/dspchnl.h> 43#include <dspbridge/dspchnl.h>
@@ -47,6 +50,7 @@
47/* ----------------------------------- Platform Manager */ 50/* ----------------------------------- Platform Manager */
48#include <dspbridge/dev.h> 51#include <dspbridge/dev.h>
49#include <dspbridge/dspapi.h> 52#include <dspbridge/dspapi.h>
53#include <dspbridge/dmm.h>
50#include <dspbridge/wdt.h> 54#include <dspbridge/wdt.h>
51 55
52/* ----------------------------------- Local */ 56/* ----------------------------------- Local */
@@ -67,6 +71,20 @@
67#define MMU_SMALL_PAGE_MASK 0xFFFFF000 71#define MMU_SMALL_PAGE_MASK 0xFFFFF000
68#define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00 72#define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
69#define PAGES_II_LVL_TABLE 512 73#define PAGES_II_LVL_TABLE 512
74#define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT)
75
76/*
77 * This is a totally ugly layer violation, but needed until
78 * omap_ctrl_set_dsp_boot*() are provided.
79 */
80#define OMAP3_IVA2_BOOTMOD_IDLE 1
81#define OMAP2_CONTROL_GENERAL 0x270
82#define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190)
83#define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194)
84
85#define OMAP343X_CTRL_REGADDR(reg) \
86 OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg))
87
70 88
71/* Forward Declarations: */ 89/* Forward Declarations: */
72static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt); 90static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
@@ -91,6 +109,12 @@ static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
91static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, 109static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
92 u8 *host_buff, u32 dsp_addr, 110 u8 *host_buff, u32 dsp_addr,
93 u32 ul_num_bytes, u32 mem_type); 111 u32 ul_num_bytes, u32 mem_type);
112static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
113 u32 ul_mpu_addr, u32 virt_addr,
114 u32 ul_num_bytes, u32 ul_map_attr,
115 struct page **mapped_pages);
116static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
117 u32 virt_addr, u32 ul_num_bytes);
94static int bridge_dev_create(struct bridge_dev_context 118static int bridge_dev_create(struct bridge_dev_context
95 **dev_cntxt, 119 **dev_cntxt,
96 struct dev_object *hdev_obj, 120 struct dev_object *hdev_obj,
@@ -98,8 +122,57 @@ static int bridge_dev_create(struct bridge_dev_context
98static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, 122static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
99 u32 dw_cmd, void *pargs); 123 u32 dw_cmd, void *pargs);
100static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt); 124static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
125static u32 user_va2_pa(struct mm_struct *mm, u32 address);
126static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
127 u32 va, u32 size,
128 struct hw_mmu_map_attrs_t *map_attrs);
129static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
130 u32 size, struct hw_mmu_map_attrs_t *attrs);
131static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
132 u32 ul_mpu_addr, u32 virt_addr,
133 u32 ul_num_bytes,
134 struct hw_mmu_map_attrs_t *hw_attrs);
135
101bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr); 136bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
102 137
138/* ----------------------------------- Globals */
139
140/* Attributes of L2 page tables for DSP MMU */
141struct page_info {
142 u32 num_entries; /* Number of valid PTEs in the L2 PT */
143};
144
145/* Attributes used to manage the DSP MMU page tables */
146struct pg_table_attrs {
147 spinlock_t pg_lock; /* Critical section object handle */
148
149 u32 l1_base_pa; /* Physical address of the L1 PT */
150 u32 l1_base_va; /* Virtual address of the L1 PT */
151 u32 l1_size; /* Size of the L1 PT */
152 u32 l1_tbl_alloc_pa;
153 /* Physical address of Allocated mem for L1 table. May not be aligned */
154 u32 l1_tbl_alloc_va;
155 /* Virtual address of Allocated mem for L1 table. May not be aligned */
156 u32 l1_tbl_alloc_sz;
157 /* Size of consistent memory allocated for L1 table.
158 * May not be aligned */
159
160 u32 l2_base_pa; /* Physical address of the L2 PT */
161 u32 l2_base_va; /* Virtual address of the L2 PT */
162 u32 l2_size; /* Size of the L2 PT */
163 u32 l2_tbl_alloc_pa;
164 /* Physical address of Allocated mem for L2 table. May not be aligned */
165 u32 l2_tbl_alloc_va;
166 /* Virtual address of Allocated mem for L2 table. May not be aligned */
167 u32 l2_tbl_alloc_sz;
168 /* Size of consistent memory allocated for L2 table.
169 * May not be aligned */
170
171 u32 l2_num_pages; /* Number of allocated L2 PT */
172 /* Array [l2_num_pages] of L2 PT info structs */
173 struct page_info *pg_info;
174};
175
103/* 176/*
104 * This Bridge driver's function interface table. 177 * This Bridge driver's function interface table.
105 */ 178 */
@@ -119,6 +192,8 @@ static struct bridge_drv_interface drv_interface_fxns = {
119 bridge_brd_set_state, 192 bridge_brd_set_state,
120 bridge_brd_mem_copy, 193 bridge_brd_mem_copy,
121 bridge_brd_mem_write, 194 bridge_brd_mem_write,
195 bridge_brd_mem_map,
196 bridge_brd_mem_un_map,
122 /* The following CHNL functions are provided by chnl_io.lib: */ 197 /* The following CHNL functions are provided by chnl_io.lib: */
123 bridge_chnl_create, 198 bridge_chnl_create,
124 bridge_chnl_destroy, 199 bridge_chnl_destroy,
@@ -148,6 +223,27 @@ static struct bridge_drv_interface drv_interface_fxns = {
148 bridge_msg_set_queue_id, 223 bridge_msg_set_queue_id,
149}; 224};
150 225
226static inline void flush_all(struct bridge_dev_context *dev_context)
227{
228 if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
229 dev_context->dw_brd_state == BRD_HIBERNATION)
230 wake_dsp(dev_context, NULL);
231
232 hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base);
233}
234
235static void bad_page_dump(u32 pa, struct page *pg)
236{
237 pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
238 pr_emerg("Bad page state in process '%s'\n"
239 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
240 "Backtrace:\n",
241 current->comm, pg, (int)(2 * sizeof(unsigned long)),
242 (unsigned long)pg->flags, pg->mapping,
243 page_mapcount(pg), page_count(pg));
244 dump_stack();
245}
246
151/* 247/*
152 * ======== bridge_drv_entry ======== 248 * ======== bridge_drv_entry ========
153 * purpose: 249 * purpose:
@@ -203,7 +299,8 @@ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
203 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, 299 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
204 OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); 300 OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
205 } 301 }
206 302 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
303 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
207 dsp_clk_enable(DSP_CLK_IVA2); 304 dsp_clk_enable(DSP_CLK_IVA2);
208 305
209 /* set the device state to IDLE */ 306 /* set the device state to IDLE */
@@ -274,17 +371,14 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
274{ 371{
275 int status = 0; 372 int status = 0;
276 struct bridge_dev_context *dev_context = dev_ctxt; 373 struct bridge_dev_context *dev_context = dev_ctxt;
277 struct iommu *mmu = NULL;
278 struct shm_segs *sm_sg;
279 int l4_i = 0, tlb_i = 0;
280 u32 sg0_da = 0, sg1_da = 0;
281 struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
282 u32 dw_sync_addr = 0; 374 u32 dw_sync_addr = 0;
283 u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */ 375 u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */
284 u32 ul_shm_base_virt; /* Dsp Virt SM base addr */ 376 u32 ul_shm_base_virt; /* Dsp Virt SM base addr */
285 u32 ul_tlb_base_virt; /* Base of MMU TLB entry */ 377 u32 ul_tlb_base_virt; /* Base of MMU TLB entry */
286 /* Offset of shm_base_virt from tlb_base_virt */ 378 /* Offset of shm_base_virt from tlb_base_virt */
287 u32 ul_shm_offset_virt; 379 u32 ul_shm_offset_virt;
380 s32 entry_ndx;
381 s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */
288 struct cfg_hostres *resources = NULL; 382 struct cfg_hostres *resources = NULL;
289 u32 temp; 383 u32 temp;
290 u32 ul_dsp_clk_rate; 384 u32 ul_dsp_clk_rate;
@@ -305,12 +399,12 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
305 ul_shm_base_virt *= DSPWORDSIZE; 399 ul_shm_base_virt *= DSPWORDSIZE;
306 DBC_ASSERT(ul_shm_base_virt != 0); 400 DBC_ASSERT(ul_shm_base_virt != 0);
307 /* DSP Virtual address */ 401 /* DSP Virtual address */
308 ul_tlb_base_virt = dev_context->sh_s.seg0_da; 402 ul_tlb_base_virt = dev_context->atlb_entry[0].ul_dsp_va;
309 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); 403 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
310 ul_shm_offset_virt = 404 ul_shm_offset_virt =
311 ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE); 405 ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
312 /* Kernel logical address */ 406 /* Kernel logical address */
313 ul_shm_base = dev_context->sh_s.seg0_va + ul_shm_offset_virt; 407 ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt;
314 408
315 DBC_ASSERT(ul_shm_base != 0); 409 DBC_ASSERT(ul_shm_base != 0);
316 /* 2nd wd is used as sync field */ 410 /* 2nd wd is used as sync field */
@@ -345,83 +439,78 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
345 OMAP343X_CONTROL_IVA2_BOOTMOD)); 439 OMAP343X_CONTROL_IVA2_BOOTMOD));
346 } 440 }
347 } 441 }
348
349 if (!status) { 442 if (!status) {
443 /* Reset and Unreset the RST2, so that BOOTADDR is copied to
444 * IVA2 SYSC register */
445 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
446 OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
447 udelay(100);
350 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, 448 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
351 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); 449 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
352 mmu = dev_context->dsp_mmu; 450 udelay(100);
353 if (mmu) 451
354 dsp_mmu_exit(mmu); 452 /* Disbale the DSP MMU */
355 mmu = dsp_mmu_init(); 453 hw_mmu_disable(resources->dw_dmmu_base);
356 if (IS_ERR(mmu)) { 454 /* Disable TWL */
357 dev_err(bridge, "dsp_mmu_init failed!\n"); 455 hw_mmu_twl_disable(resources->dw_dmmu_base);
358 dev_context->dsp_mmu = NULL; 456
359 status = (int)mmu; 457 /* Only make TLB entry if both addresses are non-zero */
360 } 458 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
361 } 459 entry_ndx++) {
362 if (!status) { 460 struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
363 dev_context->dsp_mmu = mmu; 461 struct hw_mmu_map_attrs_t map_attrs = {
364 sm_sg = &dev_context->sh_s; 462 .endianism = e->endianism,
365 sg0_da = iommu_kmap(mmu, sm_sg->seg0_da, sm_sg->seg0_pa, 463 .element_size = e->elem_size,
366 sm_sg->seg0_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); 464 .mixed_size = e->mixed_mode,
367 if (IS_ERR_VALUE(sg0_da)) { 465 };
368 status = (int)sg0_da; 466
369 sg0_da = 0; 467 if (!e->ul_gpp_pa || !e->ul_dsp_va)
370 }
371 }
372 if (!status) {
373 sg1_da = iommu_kmap(mmu, sm_sg->seg1_da, sm_sg->seg1_pa,
374 sm_sg->seg1_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
375 if (IS_ERR_VALUE(sg1_da)) {
376 status = (int)sg1_da;
377 sg1_da = 0;
378 }
379 }
380 if (!status) {
381 u32 da;
382 for (tlb_i = 0; tlb_i < BRDIOCTL_NUMOFMMUTLB; tlb_i++) {
383 if (!tlb[tlb_i].ul_gpp_pa)
384 continue; 468 continue;
385 469
386 dev_dbg(bridge, "IOMMU %d GppPa: 0x%x DspVa 0x%x Size" 470 dev_dbg(bridge,
387 " 0x%x\n", tlb_i, tlb[tlb_i].ul_gpp_pa, 471 "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
388 tlb[tlb_i].ul_dsp_va, tlb[tlb_i].ul_size); 472 itmp_entry_ndx,
389 473 e->ul_gpp_pa,
390 da = iommu_kmap(mmu, tlb[tlb_i].ul_dsp_va, 474 e->ul_dsp_va,
391 tlb[tlb_i].ul_gpp_pa, PAGE_SIZE, 475 e->ul_size);
392 IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); 476
393 if (IS_ERR_VALUE(da)) { 477 hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base,
394 status = (int)da; 478 e->ul_gpp_pa,
395 break; 479 e->ul_dsp_va,
396 } 480 e->ul_size,
397 } 481 itmp_entry_ndx,
398 } 482 &map_attrs, 1, 1);
399 if (!status) { 483
400 u32 da; 484 itmp_entry_ndx++;
401 l4_i = 0;
402 while (l4_peripheral_table[l4_i].phys_addr) {
403 da = iommu_kmap(mmu, l4_peripheral_table[l4_i].
404 dsp_virt_addr, l4_peripheral_table[l4_i].
405 phys_addr, PAGE_SIZE,
406 IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
407 if (IS_ERR_VALUE(da)) {
408 status = (int)da;
409 break;
410 }
411 l4_i++;
412 } 485 }
413 } 486 }
414 487
415 /* Lock the above TLB entries and get the BIOS and load monitor timer 488 /* Lock the above TLB entries and get the BIOS and load monitor timer
416 * information */ 489 * information */
417 if (!status) { 490 if (!status) {
491 hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx);
492 hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx);
493 hw_mmu_ttb_set(resources->dw_dmmu_base,
494 dev_context->pt_attrs->l1_base_pa);
495 hw_mmu_twl_enable(resources->dw_dmmu_base);
496 /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
497
498 temp = __raw_readl((resources->dw_dmmu_base) + 0x10);
499 temp = (temp & 0xFFFFFFEF) | 0x11;
500 __raw_writel(temp, (resources->dw_dmmu_base) + 0x10);
501
502 /* Let the DSP MMU run */
503 hw_mmu_enable(resources->dw_dmmu_base);
504
418 /* Enable the BIOS clock */ 505 /* Enable the BIOS clock */
419 (void)dev_get_symbol(dev_context->hdev_obj, 506 (void)dev_get_symbol(dev_context->hdev_obj,
420 BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer); 507 BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
421 (void)dev_get_symbol(dev_context->hdev_obj, 508 (void)dev_get_symbol(dev_context->hdev_obj,
422 BRIDGEINIT_LOADMON_GPTIMER, 509 BRIDGEINIT_LOADMON_GPTIMER,
423 &ul_load_monitor_timer); 510 &ul_load_monitor_timer);
511 }
424 512
513 if (!status) {
425 if (ul_load_monitor_timer != 0xFFFF) { 514 if (ul_load_monitor_timer != 0xFFFF) {
426 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | 515 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
427 ul_load_monitor_timer; 516 ul_load_monitor_timer;
@@ -430,7 +519,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
430 dev_dbg(bridge, "Not able to get the symbol for Load " 519 dev_dbg(bridge, "Not able to get the symbol for Load "
431 "Monitor Timer\n"); 520 "Monitor Timer\n");
432 } 521 }
522 }
433 523
524 if (!status) {
434 if (ul_bios_gp_timer != 0xFFFF) { 525 if (ul_bios_gp_timer != 0xFFFF) {
435 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | 526 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
436 ul_bios_gp_timer; 527 ul_bios_gp_timer;
@@ -439,7 +530,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
439 dev_dbg(bridge, 530 dev_dbg(bridge,
440 "Not able to get the symbol for BIOS Timer\n"); 531 "Not able to get the symbol for BIOS Timer\n");
441 } 532 }
533 }
442 534
535 if (!status) {
443 /* Set the DSP clock rate */ 536 /* Set the DSP clock rate */
444 (void)dev_get_symbol(dev_context->hdev_obj, 537 (void)dev_get_symbol(dev_context->hdev_obj,
445 "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr); 538 "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
@@ -492,6 +585,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
492 585
493 /* Let DSP go */ 586 /* Let DSP go */
494 dev_dbg(bridge, "%s Unreset\n", __func__); 587 dev_dbg(bridge, "%s Unreset\n", __func__);
588 /* Enable DSP MMU Interrupts */
589 hw_mmu_event_enable(resources->dw_dmmu_base,
590 HW_MMU_ALL_INTERRUPTS);
495 /* release the RST1, DSP starts executing now .. */ 591 /* release the RST1, DSP starts executing now .. */
496 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0, 592 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
497 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); 593 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
@@ -521,23 +617,11 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
521 617
522 /* update board state */ 618 /* update board state */
523 dev_context->dw_brd_state = BRD_RUNNING; 619 dev_context->dw_brd_state = BRD_RUNNING;
524 return 0; 620 /* (void)chnlsm_enable_interrupt(dev_context); */
525 } else { 621 } else {
526 dev_context->dw_brd_state = BRD_UNKNOWN; 622 dev_context->dw_brd_state = BRD_UNKNOWN;
527 } 623 }
528 } 624 }
529
530 while (tlb_i--) {
531 if (!tlb[tlb_i].ul_gpp_pa)
532 continue;
533 iommu_kunmap(mmu, tlb[tlb_i].ul_gpp_va);
534 }
535 while (l4_i--)
536 iommu_kunmap(mmu, l4_peripheral_table[l4_i].dsp_virt_addr);
537 if (sg0_da)
538 iommu_kunmap(mmu, sg0_da);
539 if (sg1_da)
540 iommu_kunmap(mmu, sg1_da);
541 return status; 625 return status;
542} 626}
543 627
@@ -553,9 +637,8 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
553{ 637{
554 int status = 0; 638 int status = 0;
555 struct bridge_dev_context *dev_context = dev_ctxt; 639 struct bridge_dev_context *dev_context = dev_ctxt;
640 struct pg_table_attrs *pt_attrs;
556 u32 dsp_pwr_state; 641 u32 dsp_pwr_state;
557 int i;
558 struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
559 struct omap_dsp_platform_data *pdata = 642 struct omap_dsp_platform_data *pdata =
560 omap_dspbridge_dev->dev.platform_data; 643 omap_dspbridge_dev->dev.platform_data;
561 644
@@ -591,37 +674,23 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
591 674
592 dsp_wdt_enable(false); 675 dsp_wdt_enable(false);
593 676
594 /* Reset DSP */ 677 /* This is a good place to clear the MMU page tables as well */
595 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 678 if (dev_context->pt_attrs) {
596 OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); 679 pt_attrs = dev_context->pt_attrs;
597 680 memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
681 memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
682 memset((u8 *) pt_attrs->pg_info, 0x00,
683 (pt_attrs->l2_num_pages * sizeof(struct page_info)));
684 }
598 /* Disable the mailbox interrupts */ 685 /* Disable the mailbox interrupts */
599 if (dev_context->mbox) { 686 if (dev_context->mbox) {
600 omap_mbox_disable_irq(dev_context->mbox, IRQ_RX); 687 omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
601 omap_mbox_put(dev_context->mbox); 688 omap_mbox_put(dev_context->mbox);
602 dev_context->mbox = NULL; 689 dev_context->mbox = NULL;
603 } 690 }
604 if (dev_context->dsp_mmu) { 691 /* Reset IVA2 clocks*/
605 pr_err("Proc stop mmu if statement\n"); 692 (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
606 for (i = 0; i < BRDIOCTL_NUMOFMMUTLB; i++) { 693 OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
607 if (!tlb[i].ul_gpp_pa)
608 continue;
609 iommu_kunmap(dev_context->dsp_mmu, tlb[i].ul_gpp_va);
610 }
611 i = 0;
612 while (l4_peripheral_table[i].phys_addr) {
613 iommu_kunmap(dev_context->dsp_mmu,
614 l4_peripheral_table[i].dsp_virt_addr);
615 i++;
616 }
617 iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg0_da);
618 iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg1_da);
619 dsp_mmu_exit(dev_context->dsp_mmu);
620 dev_context->dsp_mmu = NULL;
621 }
622 /* Reset IVA IOMMU*/
623 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
624 OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
625 694
626 dsp_clock_disable_all(dev_context->dsp_per_clks); 695 dsp_clock_disable_all(dev_context->dsp_per_clks);
627 dsp_clk_disable(DSP_CLK_IVA2); 696 dsp_clk_disable(DSP_CLK_IVA2);
@@ -681,6 +750,10 @@ static int bridge_dev_create(struct bridge_dev_context
681 struct bridge_dev_context *dev_context = NULL; 750 struct bridge_dev_context *dev_context = NULL;
682 s32 entry_ndx; 751 s32 entry_ndx;
683 struct cfg_hostres *resources = config_param; 752 struct cfg_hostres *resources = config_param;
753 struct pg_table_attrs *pt_attrs;
754 u32 pg_tbl_pa;
755 u32 pg_tbl_va;
756 u32 align_size;
684 struct drv_data *drv_datap = dev_get_drvdata(bridge); 757 struct drv_data *drv_datap = dev_get_drvdata(bridge);
685 758
686 /* Allocate and initialize a data structure to contain the bridge driver 759 /* Allocate and initialize a data structure to contain the bridge driver
@@ -711,8 +784,97 @@ static int bridge_dev_create(struct bridge_dev_context
711 if (!dev_context->dw_dsp_base_addr) 784 if (!dev_context->dw_dsp_base_addr)
712 status = -EPERM; 785 status = -EPERM;
713 786
787 pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
788 if (pt_attrs != NULL) {
789 /* Assuming that we use only DSP's memory map
790 * until 0x4000:0000 , we would need only 1024
791 * L1 enties i.e L1 size = 4K */
792 pt_attrs->l1_size = 0x1000;
793 align_size = pt_attrs->l1_size;
794 /* Align sizes are expected to be power of 2 */
795 /* we like to get aligned on L1 table size */
796 pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size,
797 align_size, &pg_tbl_pa);
798
799 /* Check if the PA is aligned for us */
800 if ((pg_tbl_pa) & (align_size - 1)) {
801 /* PA not aligned to page table size ,
802 * try with more allocation and align */
803 mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa,
804 pt_attrs->l1_size);
805 /* we like to get aligned on L1 table size */
806 pg_tbl_va =
807 (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2,
808 align_size, &pg_tbl_pa);
809 /* We should be able to get aligned table now */
810 pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
811 pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
812 pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2;
813 /* Align the PA to the next 'align' boundary */
814 pt_attrs->l1_base_pa =
815 ((pg_tbl_pa) +
816 (align_size - 1)) & (~(align_size - 1));
817 pt_attrs->l1_base_va =
818 pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa);
819 } else {
820 /* We got aligned PA, cool */
821 pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
822 pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
823 pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size;
824 pt_attrs->l1_base_pa = pg_tbl_pa;
825 pt_attrs->l1_base_va = pg_tbl_va;
826 }
827 if (pt_attrs->l1_base_va)
828 memset((u8 *) pt_attrs->l1_base_va, 0x00,
829 pt_attrs->l1_size);
830
831 /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
832 * L4 pages */
833 pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6);
834 pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE *
835 pt_attrs->l2_num_pages;
836 align_size = 4; /* Make it u32 aligned */
837 /* we like to get aligned on L1 table size */
838 pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size,
839 align_size, &pg_tbl_pa);
840 pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
841 pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
842 pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size;
843 pt_attrs->l2_base_pa = pg_tbl_pa;
844 pt_attrs->l2_base_va = pg_tbl_va;
845
846 if (pt_attrs->l2_base_va)
847 memset((u8 *) pt_attrs->l2_base_va, 0x00,
848 pt_attrs->l2_size);
849
850 pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages *
851 sizeof(struct page_info), GFP_KERNEL);
852 dev_dbg(bridge,
853 "L1 pa %x, va %x, size %x\n L2 pa %x, va "
854 "%x, size %x\n", pt_attrs->l1_base_pa,
855 pt_attrs->l1_base_va, pt_attrs->l1_size,
856 pt_attrs->l2_base_pa, pt_attrs->l2_base_va,
857 pt_attrs->l2_size);
858 dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n",
859 pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info);
860 }
861 if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) &&
862 (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL))
863 dev_context->pt_attrs = pt_attrs;
864 else
865 status = -ENOMEM;
866
714 if (!status) { 867 if (!status) {
868 spin_lock_init(&pt_attrs->pg_lock);
715 dev_context->tc_word_swap_on = drv_datap->tc_wordswapon; 869 dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
870
871 /* Set the Clock Divisor for the DSP module */
872 udelay(5);
873 /* MMU address is obtained from the host
874 * resources struct */
875 dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base;
876 }
877 if (!status) {
716 dev_context->hdev_obj = hdev_obj; 878 dev_context->hdev_obj = hdev_obj;
717 /* Store current board state. */ 879 /* Store current board state. */
718 dev_context->dw_brd_state = BRD_UNKNOWN; 880 dev_context->dw_brd_state = BRD_UNKNOWN;
@@ -722,6 +884,23 @@ static int bridge_dev_create(struct bridge_dev_context
722 /* Return ptr to our device state to the DSP API for storage */ 884 /* Return ptr to our device state to the DSP API for storage */
723 *dev_cntxt = dev_context; 885 *dev_cntxt = dev_context;
724 } else { 886 } else {
887 if (pt_attrs != NULL) {
888 kfree(pt_attrs->pg_info);
889
890 if (pt_attrs->l2_tbl_alloc_va) {
891 mem_free_phys_mem((void *)
892 pt_attrs->l2_tbl_alloc_va,
893 pt_attrs->l2_tbl_alloc_pa,
894 pt_attrs->l2_tbl_alloc_sz);
895 }
896 if (pt_attrs->l1_tbl_alloc_va) {
897 mem_free_phys_mem((void *)
898 pt_attrs->l1_tbl_alloc_va,
899 pt_attrs->l1_tbl_alloc_pa,
900 pt_attrs->l1_tbl_alloc_sz);
901 }
902 }
903 kfree(pt_attrs);
725 kfree(dev_context); 904 kfree(dev_context);
726 } 905 }
727func_end: 906func_end:
@@ -789,6 +968,7 @@ static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
789 */ 968 */
790static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) 969static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
791{ 970{
971 struct pg_table_attrs *pt_attrs;
792 int status = 0; 972 int status = 0;
793 struct bridge_dev_context *dev_context = (struct bridge_dev_context *) 973 struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
794 dev_ctxt; 974 dev_ctxt;
@@ -802,6 +982,23 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
802 982
803 /* first put the device to stop state */ 983 /* first put the device to stop state */
804 bridge_brd_stop(dev_context); 984 bridge_brd_stop(dev_context);
985 if (dev_context->pt_attrs) {
986 pt_attrs = dev_context->pt_attrs;
987 kfree(pt_attrs->pg_info);
988
989 if (pt_attrs->l2_tbl_alloc_va) {
990 mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va,
991 pt_attrs->l2_tbl_alloc_pa,
992 pt_attrs->l2_tbl_alloc_sz);
993 }
994 if (pt_attrs->l1_tbl_alloc_va) {
995 mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va,
996 pt_attrs->l1_tbl_alloc_pa,
997 pt_attrs->l1_tbl_alloc_sz);
998 }
999 kfree(pt_attrs);
1000
1001 }
805 1002
806 if (dev_context->resources) { 1003 if (dev_context->resources) {
807 host_res = dev_context->resources; 1004 host_res = dev_context->resources;
@@ -832,6 +1029,8 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
832 iounmap((void *)host_res->dw_mem_base[3]); 1029 iounmap((void *)host_res->dw_mem_base[3]);
833 if (host_res->dw_mem_base[4]) 1030 if (host_res->dw_mem_base[4])
834 iounmap((void *)host_res->dw_mem_base[4]); 1031 iounmap((void *)host_res->dw_mem_base[4]);
1032 if (host_res->dw_dmmu_base)
1033 iounmap(host_res->dw_dmmu_base);
835 if (host_res->dw_per_base) 1034 if (host_res->dw_per_base)
836 iounmap(host_res->dw_per_base); 1035 iounmap(host_res->dw_per_base);
837 if (host_res->dw_per_pm_base) 1036 if (host_res->dw_per_pm_base)
@@ -845,6 +1044,7 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
845 host_res->dw_mem_base[2] = (u32) NULL; 1044 host_res->dw_mem_base[2] = (u32) NULL;
846 host_res->dw_mem_base[3] = (u32) NULL; 1045 host_res->dw_mem_base[3] = (u32) NULL;
847 host_res->dw_mem_base[4] = (u32) NULL; 1046 host_res->dw_mem_base[4] = (u32) NULL;
1047 host_res->dw_dmmu_base = NULL;
848 host_res->dw_sys_ctrl_base = NULL; 1048 host_res->dw_sys_ctrl_base = NULL;
849 1049
850 kfree(host_res); 1050 kfree(host_res);
@@ -928,6 +1128,673 @@ static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
928} 1128}
929 1129
930/* 1130/*
1131 * ======== bridge_brd_mem_map ========
1132 * This function maps MPU buffer to the DSP address space. It performs
1133 * linear to physical address translation if required. It translates each
1134 * page since linear addresses can be physically non-contiguous
1135 * All address & size arguments are assumed to be page aligned (in proc.c)
1136 *
1137 * TODO: Disable MMU while updating the page tables (but that'll stall DSP)
1138 */
1139static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
1140 u32 ul_mpu_addr, u32 virt_addr,
1141 u32 ul_num_bytes, u32 ul_map_attr,
1142 struct page **mapped_pages)
1143{
1144 u32 attrs;
1145 int status = 0;
1146 struct bridge_dev_context *dev_context = dev_ctxt;
1147 struct hw_mmu_map_attrs_t hw_attrs;
1148 struct vm_area_struct *vma;
1149 struct mm_struct *mm = current->mm;
1150 u32 write = 0;
1151 u32 num_usr_pgs = 0;
1152 struct page *mapped_page, *pg;
1153 s32 pg_num;
1154 u32 va = virt_addr;
1155 struct task_struct *curr_task = current;
1156 u32 pg_i = 0;
1157 u32 mpu_addr, pa;
1158
1159 dev_dbg(bridge,
1160 "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
1161 __func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes,
1162 ul_map_attr);
1163 if (ul_num_bytes == 0)
1164 return -EINVAL;
1165
1166 if (ul_map_attr & DSP_MAP_DIR_MASK) {
1167 attrs = ul_map_attr;
1168 } else {
1169 /* Assign default attributes */
1170 attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16);
1171 }
1172 /* Take mapping properties */
1173 if (attrs & DSP_MAPBIGENDIAN)
1174 hw_attrs.endianism = HW_BIG_ENDIAN;
1175 else
1176 hw_attrs.endianism = HW_LITTLE_ENDIAN;
1177
1178 hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t)
1179 ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
1180 /* Ignore element_size if mixed_size is enabled */
1181 if (hw_attrs.mixed_size == 0) {
1182 if (attrs & DSP_MAPELEMSIZE8) {
1183 /* Size is 8 bit */
1184 hw_attrs.element_size = HW_ELEM_SIZE8BIT;
1185 } else if (attrs & DSP_MAPELEMSIZE16) {
1186 /* Size is 16 bit */
1187 hw_attrs.element_size = HW_ELEM_SIZE16BIT;
1188 } else if (attrs & DSP_MAPELEMSIZE32) {
1189 /* Size is 32 bit */
1190 hw_attrs.element_size = HW_ELEM_SIZE32BIT;
1191 } else if (attrs & DSP_MAPELEMSIZE64) {
1192 /* Size is 64 bit */
1193 hw_attrs.element_size = HW_ELEM_SIZE64BIT;
1194 } else {
1195 /*
1196 * Mixedsize isn't enabled, so size can't be
1197 * zero here
1198 */
1199 return -EINVAL;
1200 }
1201 }
1202 if (attrs & DSP_MAPDONOTLOCK)
1203 hw_attrs.donotlockmpupage = 1;
1204 else
1205 hw_attrs.donotlockmpupage = 0;
1206
1207 if (attrs & DSP_MAPVMALLOCADDR) {
1208 return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr,
1209 ul_num_bytes, &hw_attrs);
1210 }
1211 /*
1212 * Do OS-specific user-va to pa translation.
1213 * Combine physically contiguous regions to reduce TLBs.
1214 * Pass the translated pa to pte_update.
1215 */
1216 if ((attrs & DSP_MAPPHYSICALADDR)) {
1217 status = pte_update(dev_context, ul_mpu_addr, virt_addr,
1218 ul_num_bytes, &hw_attrs);
1219 goto func_cont;
1220 }
1221
1222 /*
1223 * Important Note: ul_mpu_addr is mapped from user application process
1224 * to current process - it must lie completely within the current
1225 * virtual memory address space in order to be of use to us here!
1226 */
1227 down_read(&mm->mmap_sem);
1228 vma = find_vma(mm, ul_mpu_addr);
1229 if (vma)
1230 dev_dbg(bridge,
1231 "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
1232 "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1233 ul_num_bytes, vma->vm_start, vma->vm_end,
1234 vma->vm_flags);
1235
1236 /*
1237 * It is observed that under some circumstances, the user buffer is
1238 * spread across several VMAs. So loop through and check if the entire
1239 * user buffer is covered
1240 */
1241 while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) {
1242 /* jump to the next VMA region */
1243 vma = find_vma(mm, vma->vm_end + 1);
1244 dev_dbg(bridge,
1245 "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
1246 "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1247 ul_num_bytes, vma->vm_start, vma->vm_end,
1248 vma->vm_flags);
1249 }
1250 if (!vma) {
1251 pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
1252 __func__, ul_mpu_addr, ul_num_bytes);
1253 status = -EINVAL;
1254 up_read(&mm->mmap_sem);
1255 goto func_cont;
1256 }
1257
1258 if (vma->vm_flags & VM_IO) {
1259 num_usr_pgs = ul_num_bytes / PG_SIZE4K;
1260 mpu_addr = ul_mpu_addr;
1261
1262 /* Get the physical addresses for user buffer */
1263 for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1264 pa = user_va2_pa(mm, mpu_addr);
1265 if (!pa) {
1266 status = -EPERM;
1267 pr_err("DSPBRIDGE: VM_IO mapping physical"
1268 "address is invalid\n");
1269 break;
1270 }
1271 if (pfn_valid(__phys_to_pfn(pa))) {
1272 pg = PHYS_TO_PAGE(pa);
1273 get_page(pg);
1274 if (page_count(pg) < 1) {
1275 pr_err("Bad page in VM_IO buffer\n");
1276 bad_page_dump(pa, pg);
1277 }
1278 }
1279 status = pte_set(dev_context->pt_attrs, pa,
1280 va, HW_PAGE_SIZE4KB, &hw_attrs);
1281 if (status)
1282 break;
1283
1284 va += HW_PAGE_SIZE4KB;
1285 mpu_addr += HW_PAGE_SIZE4KB;
1286 pa += HW_PAGE_SIZE4KB;
1287 }
1288 } else {
1289 num_usr_pgs = ul_num_bytes / PG_SIZE4K;
1290 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
1291 write = 1;
1292
1293 for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1294 pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1,
1295 write, 1, &mapped_page, NULL);
1296 if (pg_num > 0) {
1297 if (page_count(mapped_page) < 1) {
1298 pr_err("Bad page count after doing"
1299 "get_user_pages on"
1300 "user buffer\n");
1301 bad_page_dump(page_to_phys(mapped_page),
1302 mapped_page);
1303 }
1304 status = pte_set(dev_context->pt_attrs,
1305 page_to_phys(mapped_page), va,
1306 HW_PAGE_SIZE4KB, &hw_attrs);
1307 if (status)
1308 break;
1309
1310 if (mapped_pages)
1311 mapped_pages[pg_i] = mapped_page;
1312
1313 va += HW_PAGE_SIZE4KB;
1314 ul_mpu_addr += HW_PAGE_SIZE4KB;
1315 } else {
1316 pr_err("DSPBRIDGE: get_user_pages FAILED,"
1317 "MPU addr = 0x%x,"
1318 "vma->vm_flags = 0x%lx,"
1319 "get_user_pages Err"
1320 "Value = %d, Buffer"
1321 "size=0x%x\n", ul_mpu_addr,
1322 vma->vm_flags, pg_num, ul_num_bytes);
1323 status = -EPERM;
1324 break;
1325 }
1326 }
1327 }
1328 up_read(&mm->mmap_sem);
1329func_cont:
1330 if (status) {
1331 /*
1332 * Roll out the mapped pages incase it failed in middle of
1333 * mapping
1334 */
1335 if (pg_i) {
1336 bridge_brd_mem_un_map(dev_context, virt_addr,
1337 (pg_i * PG_SIZE4K));
1338 }
1339 status = -EPERM;
1340 }
1341 /*
1342 * In any case, flush the TLB
1343 * This is called from here instead from pte_update to avoid unnecessary
1344 * repetition while mapping non-contiguous physical regions of a virtual
1345 * region
1346 */
1347 flush_all(dev_context);
1348 dev_dbg(bridge, "%s status %x\n", __func__, status);
1349 return status;
1350}
1351
1352/*
1353 * ======== bridge_brd_mem_un_map ========
1354 * Invalidate the PTEs for the DSP VA block to be unmapped.
1355 *
1356 * PTEs of a mapped memory block are contiguous in any page table
1357 * So, instead of looking up the PTE address for every 4K block,
1358 * we clear consecutive PTEs until we unmap all the bytes
1359 */
1360static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
1361 u32 virt_addr, u32 ul_num_bytes)
1362{
1363 u32 l1_base_va;
1364 u32 l2_base_va;
1365 u32 l2_base_pa;
1366 u32 l2_page_num;
1367 u32 pte_val;
1368 u32 pte_size;
1369 u32 pte_count;
1370 u32 pte_addr_l1;
1371 u32 pte_addr_l2 = 0;
1372 u32 rem_bytes;
1373 u32 rem_bytes_l2;
1374 u32 va_curr;
1375 struct page *pg = NULL;
1376 int status = 0;
1377 struct bridge_dev_context *dev_context = dev_ctxt;
1378 struct pg_table_attrs *pt = dev_context->pt_attrs;
1379 u32 temp;
1380 u32 paddr;
1381 u32 numof4k_pages = 0;
1382
1383 va_curr = virt_addr;
1384 rem_bytes = ul_num_bytes;
1385 rem_bytes_l2 = 0;
1386 l1_base_va = pt->l1_base_va;
1387 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1388 dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
1389 "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
1390 ul_num_bytes, l1_base_va, pte_addr_l1);
1391
1392 while (rem_bytes && !status) {
1393 u32 va_curr_orig = va_curr;
1394 /* Find whether the L1 PTE points to a valid L2 PT */
1395 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1396 pte_val = *(u32 *) pte_addr_l1;
1397 pte_size = hw_mmu_pte_size_l1(pte_val);
1398
1399 if (pte_size != HW_MMU_COARSE_PAGE_SIZE)
1400 goto skip_coarse_page;
1401
1402 /*
1403 * Get the L2 PA from the L1 PTE, and find
1404 * corresponding L2 VA
1405 */
1406 l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1407 l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1408 l2_page_num =
1409 (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1410 /*
1411 * Find the L2 PTE address from which we will start
1412 * clearing, the number of PTEs to be cleared on this
1413 * page, and the size of VA space that needs to be
1414 * cleared on this L2 page
1415 */
1416 pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr);
1417 pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
1418 pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32);
1419 if (rem_bytes < (pte_count * PG_SIZE4K))
1420 pte_count = rem_bytes / PG_SIZE4K;
1421 rem_bytes_l2 = pte_count * PG_SIZE4K;
1422
1423 /*
1424 * Unmap the VA space on this L2 PT. A quicker way
1425 * would be to clear pte_count entries starting from
1426 * pte_addr_l2. However, below code checks that we don't
1427 * clear invalid entries or less than 64KB for a 64KB
1428 * entry. Similar checking is done for L1 PTEs too
1429 * below
1430 */
1431 while (rem_bytes_l2 && !status) {
1432 pte_val = *(u32 *) pte_addr_l2;
1433 pte_size = hw_mmu_pte_size_l2(pte_val);
1434 /* va_curr aligned to pte_size? */
1435 if (pte_size == 0 || rem_bytes_l2 < pte_size ||
1436 va_curr & (pte_size - 1)) {
1437 status = -EPERM;
1438 break;
1439 }
1440
1441 /* Collect Physical addresses from VA */
1442 paddr = (pte_val & ~(pte_size - 1));
1443 if (pte_size == HW_PAGE_SIZE64KB)
1444 numof4k_pages = 16;
1445 else
1446 numof4k_pages = 1;
1447 temp = 0;
1448 while (temp++ < numof4k_pages) {
1449 if (!pfn_valid(__phys_to_pfn(paddr))) {
1450 paddr += HW_PAGE_SIZE4KB;
1451 continue;
1452 }
1453 pg = PHYS_TO_PAGE(paddr);
1454 if (page_count(pg) < 1) {
1455 pr_info("DSPBRIDGE: UNMAP function: "
1456 "COUNT 0 FOR PA 0x%x, size = "
1457 "0x%x\n", paddr, ul_num_bytes);
1458 bad_page_dump(paddr, pg);
1459 } else {
1460 set_page_dirty(pg);
1461 page_cache_release(pg);
1462 }
1463 paddr += HW_PAGE_SIZE4KB;
1464 }
1465 if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) {
1466 status = -EPERM;
1467 goto EXIT_LOOP;
1468 }
1469
1470 status = 0;
1471 rem_bytes_l2 -= pte_size;
1472 va_curr += pte_size;
1473 pte_addr_l2 += (pte_size >> 12) * sizeof(u32);
1474 }
1475 spin_lock(&pt->pg_lock);
1476 if (rem_bytes_l2 == 0) {
1477 pt->pg_info[l2_page_num].num_entries -= pte_count;
1478 if (pt->pg_info[l2_page_num].num_entries == 0) {
1479 /*
1480 * Clear the L1 PTE pointing to the L2 PT
1481 */
1482 if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig,
1483 HW_MMU_COARSE_PAGE_SIZE))
1484 status = 0;
1485 else {
1486 status = -EPERM;
1487 spin_unlock(&pt->pg_lock);
1488 goto EXIT_LOOP;
1489 }
1490 }
1491 rem_bytes -= pte_count * PG_SIZE4K;
1492 } else
1493 status = -EPERM;
1494
1495 spin_unlock(&pt->pg_lock);
1496 continue;
1497skip_coarse_page:
1498 /* va_curr aligned to pte_size? */
1499 /* pte_size = 1 MB or 16 MB */
1500 if (pte_size == 0 || rem_bytes < pte_size ||
1501 va_curr & (pte_size - 1)) {
1502 status = -EPERM;
1503 break;
1504 }
1505
1506 if (pte_size == HW_PAGE_SIZE1MB)
1507 numof4k_pages = 256;
1508 else
1509 numof4k_pages = 4096;
1510 temp = 0;
1511 /* Collect Physical addresses from VA */
1512 paddr = (pte_val & ~(pte_size - 1));
1513 while (temp++ < numof4k_pages) {
1514 if (pfn_valid(__phys_to_pfn(paddr))) {
1515 pg = PHYS_TO_PAGE(paddr);
1516 if (page_count(pg) < 1) {
1517 pr_info("DSPBRIDGE: UNMAP function: "
1518 "COUNT 0 FOR PA 0x%x, size = "
1519 "0x%x\n", paddr, ul_num_bytes);
1520 bad_page_dump(paddr, pg);
1521 } else {
1522 set_page_dirty(pg);
1523 page_cache_release(pg);
1524 }
1525 }
1526 paddr += HW_PAGE_SIZE4KB;
1527 }
1528 if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) {
1529 status = 0;
1530 rem_bytes -= pte_size;
1531 va_curr += pte_size;
1532 } else {
1533 status = -EPERM;
1534 goto EXIT_LOOP;
1535 }
1536 }
1537 /*
1538 * It is better to flush the TLB here, so that any stale old entries
1539 * get flushed
1540 */
1541EXIT_LOOP:
1542 flush_all(dev_context);
1543 dev_dbg(bridge,
1544 "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
1545 " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1,
1546 pte_addr_l2, rem_bytes, rem_bytes_l2, status);
1547 return status;
1548}
1549
1550/*
1551 * ======== user_va2_pa ========
1552 * Purpose:
1553 * This function walks through the page tables to convert a userland
1554 * virtual address to physical address
1555 */
1556static u32 user_va2_pa(struct mm_struct *mm, u32 address)
1557{
1558 pgd_t *pgd;
1559 pmd_t *pmd;
1560 pte_t *ptep, pte;
1561
1562 pgd = pgd_offset(mm, address);
1563 if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
1564 pmd = pmd_offset(pgd, address);
1565 if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
1566 ptep = pte_offset_map(pmd, address);
1567 if (ptep) {
1568 pte = *ptep;
1569 if (pte_present(pte))
1570 return pte & PAGE_MASK;
1571 }
1572 }
1573 }
1574
1575 return 0;
1576}
1577
1578/*
1579 * ======== pte_update ========
1580 * This function calculates the optimum page-aligned addresses and sizes
1581 * Caller must pass page-aligned values
1582 */
1583static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
1584 u32 va, u32 size,
1585 struct hw_mmu_map_attrs_t *map_attrs)
1586{
1587 u32 i;
1588 u32 all_bits;
1589 u32 pa_curr = pa;
1590 u32 va_curr = va;
1591 u32 num_bytes = size;
1592 struct bridge_dev_context *dev_context = dev_ctxt;
1593 int status = 0;
1594 u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
1595 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
1596 };
1597
1598 while (num_bytes && !status) {
1599 /* To find the max. page size with which both PA & VA are
1600 * aligned */
1601 all_bits = pa_curr | va_curr;
1602
1603 for (i = 0; i < 4; i++) {
1604 if ((num_bytes >= page_size[i]) && ((all_bits &
1605 (page_size[i] -
1606 1)) == 0)) {
1607 status =
1608 pte_set(dev_context->pt_attrs, pa_curr,
1609 va_curr, page_size[i], map_attrs);
1610 pa_curr += page_size[i];
1611 va_curr += page_size[i];
1612 num_bytes -= page_size[i];
1613 /* Don't try smaller sizes. Hopefully we have
1614 * reached an address aligned to a bigger page
1615 * size */
1616 break;
1617 }
1618 }
1619 }
1620
1621 return status;
1622}
1623
1624/*
1625 * ======== pte_set ========
1626 * This function calculates PTE address (MPU virtual) to be updated
1627 * It also manages the L2 page tables
1628 */
1629static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
1630 u32 size, struct hw_mmu_map_attrs_t *attrs)
1631{
1632 u32 i;
1633 u32 pte_val;
1634 u32 pte_addr_l1;
1635 u32 pte_size;
1636 /* Base address of the PT that will be updated */
1637 u32 pg_tbl_va;
1638 u32 l1_base_va;
1639 /* Compiler warns that the next three variables might be used
1640 * uninitialized in this function. Doesn't seem so. Working around,
1641 * anyways. */
1642 u32 l2_base_va = 0;
1643 u32 l2_base_pa = 0;
1644 u32 l2_page_num = 0;
1645 int status = 0;
1646
1647 l1_base_va = pt->l1_base_va;
1648 pg_tbl_va = l1_base_va;
1649 if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) {
1650 /* Find whether the L1 PTE points to a valid L2 PT */
1651 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
1652 if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
1653 pte_val = *(u32 *) pte_addr_l1;
1654 pte_size = hw_mmu_pte_size_l1(pte_val);
1655 } else {
1656 return -EPERM;
1657 }
1658 spin_lock(&pt->pg_lock);
1659 if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1660 /* Get the L2 PA from the L1 PTE, and find
1661 * corresponding L2 VA */
1662 l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1663 l2_base_va =
1664 l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1665 l2_page_num =
1666 (l2_base_pa -
1667 pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1668 } else if (pte_size == 0) {
1669 /* L1 PTE is invalid. Allocate a L2 PT and
1670 * point the L1 PTE to it */
1671 /* Find a free L2 PT. */
1672 for (i = 0; (i < pt->l2_num_pages) &&
1673 (pt->pg_info[i].num_entries != 0); i++)
1674 ;;
1675 if (i < pt->l2_num_pages) {
1676 l2_page_num = i;
1677 l2_base_pa = pt->l2_base_pa + (l2_page_num *
1678 HW_MMU_COARSE_PAGE_SIZE);
1679 l2_base_va = pt->l2_base_va + (l2_page_num *
1680 HW_MMU_COARSE_PAGE_SIZE);
1681 /* Endianness attributes are ignored for
1682 * HW_MMU_COARSE_PAGE_SIZE */
1683 status =
1684 hw_mmu_pte_set(l1_base_va, l2_base_pa, va,
1685 HW_MMU_COARSE_PAGE_SIZE,
1686 attrs);
1687 } else {
1688 status = -ENOMEM;
1689 }
1690 } else {
1691 /* Found valid L1 PTE of another size.
1692 * Should not overwrite it. */
1693 status = -EPERM;
1694 }
1695 if (!status) {
1696 pg_tbl_va = l2_base_va;
1697 if (size == HW_PAGE_SIZE64KB)
1698 pt->pg_info[l2_page_num].num_entries += 16;
1699 else
1700 pt->pg_info[l2_page_num].num_entries++;
1701 dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
1702 "%x, num_entries %x\n", l2_base_va,
1703 l2_base_pa, l2_page_num,
1704 pt->pg_info[l2_page_num].num_entries);
1705 }
1706 spin_unlock(&pt->pg_lock);
1707 }
1708 if (!status) {
1709 dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
1710 pg_tbl_va, pa, va, size);
1711 dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
1712 "mixed_size %x\n", attrs->endianism,
1713 attrs->element_size, attrs->mixed_size);
1714 status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
1715 }
1716
1717 return status;
1718}
1719
1720/* Memory map kernel VA -- memory allocated with vmalloc */
1721static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
1722 u32 ul_mpu_addr, u32 virt_addr,
1723 u32 ul_num_bytes,
1724 struct hw_mmu_map_attrs_t *hw_attrs)
1725{
1726 int status = 0;
1727 struct page *page[1];
1728 u32 i;
1729 u32 pa_curr;
1730 u32 pa_next;
1731 u32 va_curr;
1732 u32 size_curr;
1733 u32 num_pages;
1734 u32 pa;
1735 u32 num_of4k_pages;
1736 u32 temp = 0;
1737
1738 /*
1739 * Do Kernel va to pa translation.
1740 * Combine physically contiguous regions to reduce TLBs.
1741 * Pass the translated pa to pte_update.
1742 */
1743 num_pages = ul_num_bytes / PAGE_SIZE; /* PAGE_SIZE = OS page size */
1744 i = 0;
1745 va_curr = ul_mpu_addr;
1746 page[0] = vmalloc_to_page((void *)va_curr);
1747 pa_next = page_to_phys(page[0]);
1748 while (!status && (i < num_pages)) {
1749 /*
1750 * Reuse pa_next from the previous iteraion to avoid
1751 * an extra va2pa call
1752 */
1753 pa_curr = pa_next;
1754 size_curr = PAGE_SIZE;
1755 /*
1756 * If the next page is physically contiguous,
1757 * map it with the current one by increasing
1758 * the size of the region to be mapped
1759 */
1760 while (++i < num_pages) {
1761 page[0] =
1762 vmalloc_to_page((void *)(va_curr + size_curr));
1763 pa_next = page_to_phys(page[0]);
1764
1765 if (pa_next == (pa_curr + size_curr))
1766 size_curr += PAGE_SIZE;
1767 else
1768 break;
1769
1770 }
1771 if (pa_next == 0) {
1772 status = -ENOMEM;
1773 break;
1774 }
1775 pa = pa_curr;
1776 num_of4k_pages = size_curr / HW_PAGE_SIZE4KB;
1777 while (temp++ < num_of4k_pages) {
1778 get_page(PHYS_TO_PAGE(pa));
1779 pa += HW_PAGE_SIZE4KB;
1780 }
1781 status = pte_update(dev_context, pa_curr, virt_addr +
1782 (va_curr - ul_mpu_addr), size_curr,
1783 hw_attrs);
1784 va_curr += size_curr;
1785 }
1786 /*
1787 * In any case, flush the TLB
1788 * This is called from here instead from pte_update to avoid unnecessary
1789 * repetition while mapping non-contiguous physical regions of a virtual
1790 * region
1791 */
1792 flush_all(dev_context);
1793 dev_dbg(bridge, "%s status %x\n", __func__, status);
1794 return status;
1795}
1796
1797/*
931 * ======== wait_for_start ======== 1798 * ======== wait_for_start ========
932 * Wait for the singal from DSP that it has started, or time out. 1799 * Wait for the singal from DSP that it has started, or time out.
933 */ 1800 */
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
index b57a9fd5e757..fb9026e1403c 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
@@ -31,6 +31,10 @@
31#include <dspbridge/dev.h> 31#include <dspbridge/dev.h>
32#include <dspbridge/iodefs.h> 32#include <dspbridge/iodefs.h>
33 33
34/* ------------------------------------ Hardware Abstraction Layer */
35#include <hw_defs.h>
36#include <hw_mmu.h>
37
34#include <dspbridge/pwr_sh.h> 38#include <dspbridge/pwr_sh.h>
35 39
36/* ----------------------------------- Bridge Driver */ 40/* ----------------------------------- Bridge Driver */
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c
index 66dbf02549e4..ba2961049dad 100644
--- a/drivers/staging/tidspbridge/core/tiomap_io.c
+++ b/drivers/staging/tidspbridge/core/tiomap_io.c
@@ -134,16 +134,17 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt,
134 134
135 if (!status) { 135 if (!status) {
136 ul_tlb_base_virt = 136 ul_tlb_base_virt =
137 dev_context->sh_s.seg0_da * DSPWORDSIZE; 137 dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
138 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); 138 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
139 dw_ext_prog_virt_mem = dev_context->sh_s.seg0_va; 139 dw_ext_prog_virt_mem =
140 dev_context->atlb_entry[0].ul_gpp_va;
140 141
141 if (!trace_read) { 142 if (!trace_read) {
142 ul_shm_offset_virt = 143 ul_shm_offset_virt =
143 ul_shm_base_virt - ul_tlb_base_virt; 144 ul_shm_base_virt - ul_tlb_base_virt;
144 ul_shm_offset_virt += 145 ul_shm_offset_virt +=
145 PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base + 146 PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base +
146 1, PAGE_SIZE * 16); 147 1, HW_PAGE_SIZE64KB);
147 dw_ext_prog_virt_mem -= ul_shm_offset_virt; 148 dw_ext_prog_virt_mem -= ul_shm_offset_virt;
148 dw_ext_prog_virt_mem += 149 dw_ext_prog_virt_mem +=
149 (ul_ext_base - ul_dyn_ext_base); 150 (ul_ext_base - ul_dyn_ext_base);
@@ -317,9 +318,8 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
317 ret = -EPERM; 318 ret = -EPERM;
318 319
319 if (!ret) { 320 if (!ret) {
320 ul_tlb_base_virt = dev_context->sh_s.seg0_da * 321 ul_tlb_base_virt =
321 DSPWORDSIZE; 322 dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
322
323 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); 323 DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
324 324
325 if (symbols_reloaded) { 325 if (symbols_reloaded) {
@@ -337,7 +337,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context,
337 ul_shm_base_virt - ul_tlb_base_virt; 337 ul_shm_base_virt - ul_tlb_base_virt;
338 if (trace_load) { 338 if (trace_load) {
339 dw_ext_prog_virt_mem = 339 dw_ext_prog_virt_mem =
340 dev_context->sh_s.seg0_va; 340 dev_context->atlb_entry[0].ul_gpp_va;
341 } else { 341 } else {
342 dw_ext_prog_virt_mem = host_res->dw_mem_base[1]; 342 dw_ext_prog_virt_mem = host_res->dw_mem_base[1];
343 dw_ext_prog_virt_mem += 343 dw_ext_prog_virt_mem +=
@@ -393,6 +393,7 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val)
393 omap_dspbridge_dev->dev.platform_data; 393 omap_dspbridge_dev->dev.platform_data;
394 struct cfg_hostres *resources = dev_context->resources; 394 struct cfg_hostres *resources = dev_context->resources;
395 int status = 0; 395 int status = 0;
396 u32 temp;
396 397
397 if (!dev_context->mbox) 398 if (!dev_context->mbox)
398 return 0; 399 return 0;
@@ -436,7 +437,7 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val)
436 omap_mbox_restore_ctx(dev_context->mbox); 437 omap_mbox_restore_ctx(dev_context->mbox);
437 438
438 /* Access MMU SYS CONFIG register to generate a short wakeup */ 439 /* Access MMU SYS CONFIG register to generate a short wakeup */
439 iommu_read_reg(dev_context->dsp_mmu, MMU_SYSCONFIG); 440 temp = readl(resources->dw_dmmu_base + 0x10);
440 441
441 dev_context->dw_brd_state = BRD_RUNNING; 442 dev_context->dw_brd_state = BRD_RUNNING;
442 } else if (dev_context->dw_brd_state == BRD_RETENTION) { 443 } else if (dev_context->dw_brd_state == BRD_RETENTION) {
diff --git a/drivers/staging/tidspbridge/core/ue_deh.c b/drivers/staging/tidspbridge/core/ue_deh.c
index e24ea0c73914..3430418190da 100644
--- a/drivers/staging/tidspbridge/core/ue_deh.c
+++ b/drivers/staging/tidspbridge/core/ue_deh.c
@@ -31,6 +31,57 @@
31#include <dspbridge/drv.h> 31#include <dspbridge/drv.h>
32#include <dspbridge/wdt.h> 32#include <dspbridge/wdt.h>
33 33
34static u32 fault_addr;
35
36static void mmu_fault_dpc(unsigned long data)
37{
38 struct deh_mgr *deh = (void *)data;
39
40 if (!deh)
41 return;
42
43 bridge_deh_notify(deh, DSP_MMUFAULT, 0);
44}
45
46static irqreturn_t mmu_fault_isr(int irq, void *data)
47{
48 struct deh_mgr *deh = data;
49 struct cfg_hostres *resources;
50 u32 event;
51
52 if (!deh)
53 return IRQ_HANDLED;
54
55 resources = deh->hbridge_context->resources;
56 if (!resources) {
57 dev_dbg(bridge, "%s: Failed to get Host Resources\n",
58 __func__);
59 return IRQ_HANDLED;
60 }
61
62 hw_mmu_event_status(resources->dw_dmmu_base, &event);
63 if (event == HW_MMU_TRANSLATION_FAULT) {
64 hw_mmu_fault_addr_read(resources->dw_dmmu_base, &fault_addr);
65 dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__,
66 event, fault_addr);
67 /*
68 * Schedule a DPC directly. In the future, it may be
69 * necessary to check if DSP MMU fault is intended for
70 * Bridge.
71 */
72 tasklet_schedule(&deh->dpc_tasklet);
73
74 /* Disable the MMU events, else once we clear it will
75 * start to raise INTs again */
76 hw_mmu_event_disable(resources->dw_dmmu_base,
77 HW_MMU_TRANSLATION_FAULT);
78 } else {
79 hw_mmu_event_disable(resources->dw_dmmu_base,
80 HW_MMU_ALL_INTERRUPTS);
81 }
82 return IRQ_HANDLED;
83}
84
34int bridge_deh_create(struct deh_mgr **ret_deh, 85int bridge_deh_create(struct deh_mgr **ret_deh,
35 struct dev_object *hdev_obj) 86 struct dev_object *hdev_obj)
36{ 87{
@@ -58,9 +109,18 @@ int bridge_deh_create(struct deh_mgr **ret_deh,
58 } 109 }
59 ntfy_init(deh->ntfy_obj); 110 ntfy_init(deh->ntfy_obj);
60 111
112 /* Create a MMUfault DPC */
113 tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh);
114
61 /* Fill in context structure */ 115 /* Fill in context structure */
62 deh->hbridge_context = hbridge_context; 116 deh->hbridge_context = hbridge_context;
63 117
118 /* Install ISR function for DSP MMU fault */
119 status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0,
120 "DspBridge\tiommu fault", deh);
121 if (status < 0)
122 goto err;
123
64 *ret_deh = deh; 124 *ret_deh = deh;
65 return 0; 125 return 0;
66 126
@@ -80,6 +140,11 @@ int bridge_deh_destroy(struct deh_mgr *deh)
80 ntfy_delete(deh->ntfy_obj); 140 ntfy_delete(deh->ntfy_obj);
81 kfree(deh->ntfy_obj); 141 kfree(deh->ntfy_obj);
82 } 142 }
143 /* Disable DSP MMU fault */
144 free_irq(INT_DSP_MMU_IRQ, deh);
145
146 /* Free DPC object */
147 tasklet_kill(&deh->dpc_tasklet);
83 148
84 /* Deallocate the DEH manager object */ 149 /* Deallocate the DEH manager object */
85 kfree(deh); 150 kfree(deh);
@@ -101,6 +166,48 @@ int bridge_deh_register_notify(struct deh_mgr *deh, u32 event_mask,
101 return ntfy_unregister(deh->ntfy_obj, hnotification); 166 return ntfy_unregister(deh->ntfy_obj, hnotification);
102} 167}
103 168
169#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
170static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
171{
172 struct cfg_hostres *resources;
173 struct hw_mmu_map_attrs_t map_attrs = {
174 .endianism = HW_LITTLE_ENDIAN,
175 .element_size = HW_ELEM_SIZE16BIT,
176 .mixed_size = HW_MMU_CPUES,
177 };
178 void *dummy_va_addr;
179
180 resources = dev_context->resources;
181 dummy_va_addr = (void*)__get_free_page(GFP_ATOMIC);
182
183 /*
184 * Before acking the MMU fault, let's make sure MMU can only
185 * access entry #0. Then add a new entry so that the DSP OS
186 * can continue in order to dump the stack.
187 */
188 hw_mmu_twl_disable(resources->dw_dmmu_base);
189 hw_mmu_tlb_flush_all(resources->dw_dmmu_base);
190
191 hw_mmu_tlb_add(resources->dw_dmmu_base,
192 virt_to_phys(dummy_va_addr), fault_addr,
193 HW_PAGE_SIZE4KB, 1,
194 &map_attrs, HW_SET, HW_SET);
195
196 dsp_clk_enable(DSP_CLK_GPT8);
197
198 dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
199
200 /* Clear MMU interrupt */
201 hw_mmu_event_ack(resources->dw_dmmu_base,
202 HW_MMU_TRANSLATION_FAULT);
203 dump_dsp_stack(dev_context);
204 dsp_clk_disable(DSP_CLK_GPT8);
205
206 hw_mmu_disable(resources->dw_dmmu_base);
207 free_page((unsigned long)dummy_va_addr);
208}
209#endif
210
104static inline const char *event_to_string(int event) 211static inline const char *event_to_string(int event)
105{ 212{
106 switch (event) { 213 switch (event) {
@@ -133,7 +240,13 @@ void bridge_deh_notify(struct deh_mgr *deh, int event, int info)
133#endif 240#endif
134 break; 241 break;
135 case DSP_MMUFAULT: 242 case DSP_MMUFAULT:
136 dev_err(bridge, "%s: %s, addr=0x%x", __func__, str, info); 243 dev_err(bridge, "%s: %s, addr=0x%x", __func__,
244 str, fault_addr);
245#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
246 print_dsp_trace_buffer(dev_context);
247 dump_dl_modules(dev_context);
248 mmu_fault_print_stack(dev_context);
249#endif
137 break; 250 break;
138 default: 251 default:
139 dev_err(bridge, "%s: %s", __func__, str); 252 dev_err(bridge, "%s: %s", __func__, str);
diff --git a/drivers/staging/tidspbridge/hw/EasiGlobal.h b/drivers/staging/tidspbridge/hw/EasiGlobal.h
new file mode 100644
index 000000000000..e48d7f67c60a
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/EasiGlobal.h
@@ -0,0 +1,41 @@
1/*
2 * EasiGlobal.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Copyright (C) 2007 Texas Instruments, Inc.
7 *
8 * This package is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#ifndef _EASIGLOBAL_H
18#define _EASIGLOBAL_H
19#include <linux/types.h>
20
21/*
22 * DEFINE: READ_ONLY, WRITE_ONLY & READ_WRITE
23 *
24 * DESCRIPTION: Defines used to describe register types for EASI-checker tests.
25 */
26
27#define READ_ONLY 1
28#define WRITE_ONLY 2
29#define READ_WRITE 3
30
31/*
32 * MACRO: _DEBUG_LEVEL1_EASI
33 *
34 * DESCRIPTION: A MACRO which can be used to indicate that a particular beach
35 * register access function was called.
36 *
37 * NOTE: We currently dont use this functionality.
38 */
39#define _DEBUG_LEVEL1_EASI(easi_num) ((void)0)
40
41#endif /* _EASIGLOBAL_H */
diff --git a/drivers/staging/tidspbridge/hw/MMUAccInt.h b/drivers/staging/tidspbridge/hw/MMUAccInt.h
new file mode 100644
index 000000000000..1cefca321d71
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/MMUAccInt.h
@@ -0,0 +1,76 @@
1/*
2 * MMUAccInt.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Copyright (C) 2007 Texas Instruments, Inc.
7 *
8 * This package is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#ifndef _MMU_ACC_INT_H
18#define _MMU_ACC_INT_H
19
20/* Mappings of level 1 EASI function numbers to function names */
21
22#define EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32 (MMU_BASE_EASIL1 + 3)
23#define EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32 (MMU_BASE_EASIL1 + 17)
24#define EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32 (MMU_BASE_EASIL1 + 39)
25#define EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 51)
26#define EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32 (MMU_BASE_EASIL1 + 102)
27#define EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 103)
28#define EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32 (MMU_BASE_EASIL1 + 156)
29#define EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32 (MMU_BASE_EASIL1 + 174)
30#define EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 180)
31#define EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 190)
32#define EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32 (MMU_BASE_EASIL1 + 194)
33#define EASIL1_MMUMMU_TTB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 198)
34#define EASIL1_MMUMMU_LOCK_READ_REGISTER32 (MMU_BASE_EASIL1 + 203)
35#define EASIL1_MMUMMU_LOCK_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 204)
36#define EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32 (MMU_BASE_EASIL1 + 205)
37#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32 (MMU_BASE_EASIL1 + 209)
38#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32 (MMU_BASE_EASIL1 + 211)
39#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32 (MMU_BASE_EASIL1 + 212)
40#define EASIL1_MMUMMU_LD_TLB_READ_REGISTER32 (MMU_BASE_EASIL1 + 213)
41#define EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 214)
42#define EASIL1_MMUMMU_CAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 226)
43#define EASIL1_MMUMMU_RAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 268)
44#define EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 322)
45
46/* Register offset address definitions */
47#define MMU_MMU_SYSCONFIG_OFFSET 0x10
48#define MMU_MMU_IRQSTATUS_OFFSET 0x18
49#define MMU_MMU_IRQENABLE_OFFSET 0x1c
50#define MMU_MMU_WALKING_ST_OFFSET 0x40
51#define MMU_MMU_CNTL_OFFSET 0x44
52#define MMU_MMU_FAULT_AD_OFFSET 0x48
53#define MMU_MMU_TTB_OFFSET 0x4c
54#define MMU_MMU_LOCK_OFFSET 0x50
55#define MMU_MMU_LD_TLB_OFFSET 0x54
56#define MMU_MMU_CAM_OFFSET 0x58
57#define MMU_MMU_RAM_OFFSET 0x5c
58#define MMU_MMU_GFLUSH_OFFSET 0x60
59#define MMU_MMU_FLUSH_ENTRY_OFFSET 0x64
60/* Bitfield mask and offset declarations */
61#define MMU_MMU_SYSCONFIG_IDLE_MODE_MASK 0x18
62#define MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET 3
63#define MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK 0x1
64#define MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET 0
65#define MMU_MMU_WALKING_ST_TWL_RUNNING_MASK 0x1
66#define MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET 0
67#define MMU_MMU_CNTL_TWL_ENABLE_MASK 0x4
68#define MMU_MMU_CNTL_TWL_ENABLE_OFFSET 2
69#define MMU_MMU_CNTL_MMU_ENABLE_MASK 0x2
70#define MMU_MMU_CNTL_MMU_ENABLE_OFFSET 1
71#define MMU_MMU_LOCK_BASE_VALUE_MASK 0xfc00
72#define MMU_MMU_LOCK_BASE_VALUE_OFFSET 10
73#define MMU_MMU_LOCK_CURRENT_VICTIM_MASK 0x3f0
74#define MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET 4
75
76#endif /* _MMU_ACC_INT_H */
diff --git a/drivers/staging/tidspbridge/hw/MMURegAcM.h b/drivers/staging/tidspbridge/hw/MMURegAcM.h
new file mode 100644
index 000000000000..ab1a16da731c
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/MMURegAcM.h
@@ -0,0 +1,225 @@
1/*
2 * MMURegAcM.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Copyright (C) 2007 Texas Instruments, Inc.
7 *
8 * This package is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
13 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
14 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#ifndef _MMU_REG_ACM_H
18#define _MMU_REG_ACM_H
19
20#include <linux/io.h>
21#include <EasiGlobal.h>
22
23#include "MMUAccInt.h"
24
25#if defined(USE_LEVEL_1_MACROS)
26
27#define MMUMMU_SYSCONFIG_READ_REGISTER32(base_address)\
28 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32),\
29 __raw_readl((base_address)+MMU_MMU_SYSCONFIG_OFFSET))
30
31#define MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32(base_address, value)\
32{\
33 const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
34 register u32 data = __raw_readl((base_address)+offset);\
35 register u32 new_value = (value);\
36 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32);\
37 data &= ~(MMU_MMU_SYSCONFIG_IDLE_MODE_MASK);\
38 new_value <<= MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET;\
39 new_value &= MMU_MMU_SYSCONFIG_IDLE_MODE_MASK;\
40 new_value |= data;\
41 __raw_writel(new_value, base_address+offset);\
42}
43
44#define MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32(base_address, value)\
45{\
46 const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
47 register u32 data = __raw_readl((base_address)+offset);\
48 register u32 new_value = (value);\
49 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32);\
50 data &= ~(MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK);\
51 new_value <<= MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET;\
52 new_value &= MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK;\
53 new_value |= data;\
54 __raw_writel(new_value, base_address+offset);\
55}
56
57#define MMUMMU_IRQSTATUS_READ_REGISTER32(base_address)\
58 (_DEBUG_LEVEL1_EASI(easil1_mmummu_irqstatus_read_register32),\
59 __raw_readl((base_address)+MMU_MMU_IRQSTATUS_OFFSET))
60
61#define MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, value)\
62{\
63 const u32 offset = MMU_MMU_IRQSTATUS_OFFSET;\
64 register u32 new_value = (value);\
65 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32);\
66 __raw_writel(new_value, (base_address)+offset);\
67}
68
69#define MMUMMU_IRQENABLE_READ_REGISTER32(base_address)\
70 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32),\
71 __raw_readl((base_address)+MMU_MMU_IRQENABLE_OFFSET))
72
73#define MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, value)\
74{\
75 const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\
76 register u32 new_value = (value);\
77 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32);\
78 __raw_writel(new_value, (base_address)+offset);\
79}
80
81#define MMUMMU_WALKING_STTWL_RUNNING_READ32(base_address)\
82 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32),\
83 (((__raw_readl(((base_address)+(MMU_MMU_WALKING_ST_OFFSET))))\
84 & MMU_MMU_WALKING_ST_TWL_RUNNING_MASK) >>\
85 MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET))
86
87#define MMUMMU_CNTLTWL_ENABLE_READ32(base_address)\
88 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32),\
89 (((__raw_readl(((base_address)+(MMU_MMU_CNTL_OFFSET)))) &\
90 MMU_MMU_CNTL_TWL_ENABLE_MASK) >>\
91 MMU_MMU_CNTL_TWL_ENABLE_OFFSET))
92
93#define MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, value)\
94{\
95 const u32 offset = MMU_MMU_CNTL_OFFSET;\
96 register u32 data = __raw_readl((base_address)+offset);\
97 register u32 new_value = (value);\
98 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32);\
99 data &= ~(MMU_MMU_CNTL_TWL_ENABLE_MASK);\
100 new_value <<= MMU_MMU_CNTL_TWL_ENABLE_OFFSET;\
101 new_value &= MMU_MMU_CNTL_TWL_ENABLE_MASK;\
102 new_value |= data;\
103 __raw_writel(new_value, base_address+offset);\
104}
105
106#define MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, value)\
107{\
108 const u32 offset = MMU_MMU_CNTL_OFFSET;\
109 register u32 data = __raw_readl((base_address)+offset);\
110 register u32 new_value = (value);\
111 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32);\
112 data &= ~(MMU_MMU_CNTL_MMU_ENABLE_MASK);\
113 new_value <<= MMU_MMU_CNTL_MMU_ENABLE_OFFSET;\
114 new_value &= MMU_MMU_CNTL_MMU_ENABLE_MASK;\
115 new_value |= data;\
116 __raw_writel(new_value, base_address+offset);\
117}
118
119#define MMUMMU_FAULT_AD_READ_REGISTER32(base_address)\
120 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32),\
121 __raw_readl((base_address)+MMU_MMU_FAULT_AD_OFFSET))
122
123#define MMUMMU_TTB_WRITE_REGISTER32(base_address, value)\
124{\
125 const u32 offset = MMU_MMU_TTB_OFFSET;\
126 register u32 new_value = (value);\
127 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_TTB_WRITE_REGISTER32);\
128 __raw_writel(new_value, (base_address)+offset);\
129}
130
131#define MMUMMU_LOCK_READ_REGISTER32(base_address)\
132 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_READ_REGISTER32),\
133 __raw_readl((base_address)+MMU_MMU_LOCK_OFFSET))
134
135#define MMUMMU_LOCK_WRITE_REGISTER32(base_address, value)\
136{\
137 const u32 offset = MMU_MMU_LOCK_OFFSET;\
138 register u32 new_value = (value);\
139 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_WRITE_REGISTER32);\
140 __raw_writel(new_value, (base_address)+offset);\
141}
142
143#define MMUMMU_LOCK_BASE_VALUE_READ32(base_address)\
144 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32),\
145 (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
146 MMU_MMU_LOCK_BASE_VALUE_MASK) >>\
147 MMU_MMU_LOCK_BASE_VALUE_OFFSET))
148
149#define MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, value)\
150{\
151 const u32 offset = MMU_MMU_LOCK_OFFSET;\
152 register u32 data = __raw_readl((base_address)+offset);\
153 register u32 new_value = (value);\
154 _DEBUG_LEVEL1_EASI(easil1_mmummu_lock_base_value_write32);\
155 data &= ~(MMU_MMU_LOCK_BASE_VALUE_MASK);\
156 new_value <<= MMU_MMU_LOCK_BASE_VALUE_OFFSET;\
157 new_value &= MMU_MMU_LOCK_BASE_VALUE_MASK;\
158 new_value |= data;\
159 __raw_writel(new_value, base_address+offset);\
160}
161
162#define MMUMMU_LOCK_CURRENT_VICTIM_READ32(base_address)\
163 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32),\
164 (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
165 MMU_MMU_LOCK_CURRENT_VICTIM_MASK) >>\
166 MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET))
167
168#define MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, value)\
169{\
170 const u32 offset = MMU_MMU_LOCK_OFFSET;\
171 register u32 data = __raw_readl((base_address)+offset);\
172 register u32 new_value = (value);\
173 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32);\
174 data &= ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK);\
175 new_value <<= MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET;\
176 new_value &= MMU_MMU_LOCK_CURRENT_VICTIM_MASK;\
177 new_value |= data;\
178 __raw_writel(new_value, base_address+offset);\
179}
180
181#define MMUMMU_LOCK_CURRENT_VICTIM_SET32(var, value)\
182 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32),\
183 (((var) & ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK)) |\
184 (((value) << MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET) &\
185 MMU_MMU_LOCK_CURRENT_VICTIM_MASK)))
186
187#define MMUMMU_LD_TLB_READ_REGISTER32(base_address)\
188 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_READ_REGISTER32),\
189 __raw_readl((base_address)+MMU_MMU_LD_TLB_OFFSET))
190
191#define MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, value)\
192{\
193 const u32 offset = MMU_MMU_LD_TLB_OFFSET;\
194 register u32 new_value = (value);\
195 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32);\
196 __raw_writel(new_value, (base_address)+offset);\
197}
198
199#define MMUMMU_CAM_WRITE_REGISTER32(base_address, value)\
200{\
201 const u32 offset = MMU_MMU_CAM_OFFSET;\
202 register u32 new_value = (value);\
203 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CAM_WRITE_REGISTER32);\
204 __raw_writel(new_value, (base_address)+offset);\
205}
206
207#define MMUMMU_RAM_WRITE_REGISTER32(base_address, value)\
208{\
209 const u32 offset = MMU_MMU_RAM_OFFSET;\
210 register u32 new_value = (value);\
211 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_RAM_WRITE_REGISTER32);\
212 __raw_writel(new_value, (base_address)+offset);\
213}
214
215#define MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, value)\
216{\
217 const u32 offset = MMU_MMU_FLUSH_ENTRY_OFFSET;\
218 register u32 new_value = (value);\
219 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32);\
220 __raw_writel(new_value, (base_address)+offset);\
221}
222
223#endif /* USE_LEVEL_1_MACROS */
224
225#endif /* _MMU_REG_ACM_H */
diff --git a/drivers/staging/tidspbridge/hw/hw_defs.h b/drivers/staging/tidspbridge/hw/hw_defs.h
new file mode 100644
index 000000000000..d5266d4c163f
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/hw_defs.h
@@ -0,0 +1,58 @@
1/*
2 * hw_defs.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Global HW definitions
7 *
8 * Copyright (C) 2007 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#ifndef _HW_DEFS_H
20#define _HW_DEFS_H
21
22/* Page size */
23#define HW_PAGE_SIZE4KB 0x1000
24#define HW_PAGE_SIZE64KB 0x10000
25#define HW_PAGE_SIZE1MB 0x100000
26#define HW_PAGE_SIZE16MB 0x1000000
27
28/* hw_status: return type for HW API */
29typedef long hw_status;
30
31/* Macro used to set and clear any bit */
32#define HW_CLEAR 0
33#define HW_SET 1
34
35/* hw_endianism_t: Enumerated Type used to specify the endianism
36 * Do NOT change these values. They are used as bit fields. */
37enum hw_endianism_t {
38 HW_LITTLE_ENDIAN,
39 HW_BIG_ENDIAN
40};
41
42/* hw_element_size_t: Enumerated Type used to specify the element size
43 * Do NOT change these values. They are used as bit fields. */
44enum hw_element_size_t {
45 HW_ELEM_SIZE8BIT,
46 HW_ELEM_SIZE16BIT,
47 HW_ELEM_SIZE32BIT,
48 HW_ELEM_SIZE64BIT
49};
50
51/* hw_idle_mode_t: Enumerated Type used to specify Idle modes */
52enum hw_idle_mode_t {
53 HW_FORCE_IDLE,
54 HW_NO_IDLE,
55 HW_SMART_IDLE
56};
57
58#endif /* _HW_DEFS_H */
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.c b/drivers/staging/tidspbridge/hw/hw_mmu.c
new file mode 100644
index 000000000000..014f5d5293ae
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/hw_mmu.c
@@ -0,0 +1,562 @@
1/*
2 * hw_mmu.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * API definitions to setup MMU TLB and PTE
7 *
8 * Copyright (C) 2007 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#include <linux/io.h>
20#include "MMURegAcM.h"
21#include <hw_defs.h>
22#include <hw_mmu.h>
23#include <linux/types.h>
24#include <linux/err.h>
25
26#define MMU_BASE_VAL_MASK 0xFC00
27#define MMU_PAGE_MAX 3
28#define MMU_ELEMENTSIZE_MAX 3
29#define MMU_ADDR_MASK 0xFFFFF000
30#define MMU_TTB_MASK 0xFFFFC000
31#define MMU_SECTION_ADDR_MASK 0xFFF00000
32#define MMU_SSECTION_ADDR_MASK 0xFF000000
33#define MMU_PAGE_TABLE_MASK 0xFFFFFC00
34#define MMU_LARGE_PAGE_MASK 0xFFFF0000
35#define MMU_SMALL_PAGE_MASK 0xFFFFF000
36
37#define MMU_LOAD_TLB 0x00000001
38#define MMU_GFLUSH 0x60
39
40/*
41 * hw_mmu_page_size_t: Enumerated Type used to specify the MMU Page Size(SLSS)
42 */
43enum hw_mmu_page_size_t {
44 HW_MMU_SECTION,
45 HW_MMU_LARGE_PAGE,
46 HW_MMU_SMALL_PAGE,
47 HW_MMU_SUPERSECTION
48};
49
50/*
51 * FUNCTION : mmu_flush_entry
52 *
53 * INPUTS:
54 *
55 * Identifier : base_address
56 * Type : const u32
57 * Description : Base Address of instance of MMU module
58 *
59 * RETURNS:
60 *
61 * Type : hw_status
62 * Description : 0 -- No errors occured
63 * RET_BAD_NULL_PARAM -- A Pointer
64 * Paramater was set to NULL
65 *
66 * PURPOSE: : Flush the TLB entry pointed by the
67 * lock counter register
68 * even if this entry is set protected
69 *
70 * METHOD: : Check the Input parameter and Flush a
71 * single entry in the TLB.
72 */
73static hw_status mmu_flush_entry(const void __iomem *base_address);
74
75/*
76 * FUNCTION : mmu_set_cam_entry
77 *
78 * INPUTS:
79 *
80 * Identifier : base_address
81 * TypE : const u32
82 * Description : Base Address of instance of MMU module
83 *
84 * Identifier : page_sz
85 * TypE : const u32
86 * Description : It indicates the page size
87 *
88 * Identifier : preserved_bit
89 * Type : const u32
90 * Description : It indicates the TLB entry is preserved entry
91 * or not
92 *
93 * Identifier : valid_bit
94 * Type : const u32
95 * Description : It indicates the TLB entry is valid entry or not
96 *
97 *
98 * Identifier : virtual_addr_tag
99 * Type : const u32
100 * Description : virtual Address
101 *
102 * RETURNS:
103 *
104 * Type : hw_status
105 * Description : 0 -- No errors occured
106 * RET_BAD_NULL_PARAM -- A Pointer Paramater
107 * was set to NULL
108 * RET_PARAM_OUT_OF_RANGE -- Input Parameter out
109 * of Range
110 *
111 * PURPOSE: : Set MMU_CAM reg
112 *
113 * METHOD: : Check the Input parameters and set the CAM entry.
114 */
115static hw_status mmu_set_cam_entry(const void __iomem *base_address,
116 const u32 page_sz,
117 const u32 preserved_bit,
118 const u32 valid_bit,
119 const u32 virtual_addr_tag);
120
121/*
122 * FUNCTION : mmu_set_ram_entry
123 *
124 * INPUTS:
125 *
126 * Identifier : base_address
127 * Type : const u32
128 * Description : Base Address of instance of MMU module
129 *
130 * Identifier : physical_addr
131 * Type : const u32
132 * Description : Physical Address to which the corresponding
133 * virtual Address shouldpoint
134 *
135 * Identifier : endianism
136 * Type : hw_endianism_t
137 * Description : endianism for the given page
138 *
139 * Identifier : element_size
140 * Type : hw_element_size_t
141 * Description : The element size ( 8,16, 32 or 64 bit)
142 *
143 * Identifier : mixed_size
144 * Type : hw_mmu_mixed_size_t
145 * Description : Element Size to follow CPU or TLB
146 *
147 * RETURNS:
148 *
149 * Type : hw_status
150 * Description : 0 -- No errors occured
151 * RET_BAD_NULL_PARAM -- A Pointer Paramater
152 * was set to NULL
153 * RET_PARAM_OUT_OF_RANGE -- Input Parameter
154 * out of Range
155 *
156 * PURPOSE: : Set MMU_CAM reg
157 *
158 * METHOD: : Check the Input parameters and set the RAM entry.
159 */
160static hw_status mmu_set_ram_entry(const void __iomem *base_address,
161 const u32 physical_addr,
162 enum hw_endianism_t endianism,
163 enum hw_element_size_t element_size,
164 enum hw_mmu_mixed_size_t mixed_size);
165
166/* HW FUNCTIONS */
167
168hw_status hw_mmu_enable(const void __iomem *base_address)
169{
170 hw_status status = 0;
171
172 MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_SET);
173
174 return status;
175}
176
177hw_status hw_mmu_disable(const void __iomem *base_address)
178{
179 hw_status status = 0;
180
181 MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_CLEAR);
182
183 return status;
184}
185
186hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
187 u32 num_locked_entries)
188{
189 hw_status status = 0;
190
191 MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, num_locked_entries);
192
193 return status;
194}
195
196hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
197 u32 victim_entry_num)
198{
199 hw_status status = 0;
200
201 MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victim_entry_num);
202
203 return status;
204}
205
206hw_status hw_mmu_event_ack(const void __iomem *base_address, u32 irq_mask)
207{
208 hw_status status = 0;
209
210 MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, irq_mask);
211
212 return status;
213}
214
215hw_status hw_mmu_event_disable(const void __iomem *base_address, u32 irq_mask)
216{
217 hw_status status = 0;
218 u32 irq_reg;
219
220 irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
221
222 MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg & ~irq_mask);
223
224 return status;
225}
226
227hw_status hw_mmu_event_enable(const void __iomem *base_address, u32 irq_mask)
228{
229 hw_status status = 0;
230 u32 irq_reg;
231
232 irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
233
234 MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg | irq_mask);
235
236 return status;
237}
238
239hw_status hw_mmu_event_status(const void __iomem *base_address, u32 *irq_mask)
240{
241 hw_status status = 0;
242
243 *irq_mask = MMUMMU_IRQSTATUS_READ_REGISTER32(base_address);
244
245 return status;
246}
247
248hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, u32 *addr)
249{
250 hw_status status = 0;
251
252 /* read values from register */
253 *addr = MMUMMU_FAULT_AD_READ_REGISTER32(base_address);
254
255 return status;
256}
257
258hw_status hw_mmu_ttb_set(const void __iomem *base_address, u32 ttb_phys_addr)
259{
260 hw_status status = 0;
261 u32 load_ttb;
262
263 load_ttb = ttb_phys_addr & ~0x7FUL;
264 /* write values to register */
265 MMUMMU_TTB_WRITE_REGISTER32(base_address, load_ttb);
266
267 return status;
268}
269
270hw_status hw_mmu_twl_enable(const void __iomem *base_address)
271{
272 hw_status status = 0;
273
274 MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_SET);
275
276 return status;
277}
278
279hw_status hw_mmu_twl_disable(const void __iomem *base_address)
280{
281 hw_status status = 0;
282
283 MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_CLEAR);
284
285 return status;
286}
287
288hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtual_addr,
289 u32 page_sz)
290{
291 hw_status status = 0;
292 u32 virtual_addr_tag;
293 enum hw_mmu_page_size_t pg_size_bits;
294
295 switch (page_sz) {
296 case HW_PAGE_SIZE4KB:
297 pg_size_bits = HW_MMU_SMALL_PAGE;
298 break;
299
300 case HW_PAGE_SIZE64KB:
301 pg_size_bits = HW_MMU_LARGE_PAGE;
302 break;
303
304 case HW_PAGE_SIZE1MB:
305 pg_size_bits = HW_MMU_SECTION;
306 break;
307
308 case HW_PAGE_SIZE16MB:
309 pg_size_bits = HW_MMU_SUPERSECTION;
310 break;
311
312 default:
313 return -EINVAL;
314 }
315
316 /* Generate the 20-bit tag from virtual address */
317 virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
318
319 mmu_set_cam_entry(base_address, pg_size_bits, 0, 0, virtual_addr_tag);
320
321 mmu_flush_entry(base_address);
322
323 return status;
324}
325
326hw_status hw_mmu_tlb_add(const void __iomem *base_address,
327 u32 physical_addr,
328 u32 virtual_addr,
329 u32 page_sz,
330 u32 entry_num,
331 struct hw_mmu_map_attrs_t *map_attrs,
332 s8 preserved_bit, s8 valid_bit)
333{
334 hw_status status = 0;
335 u32 lock_reg;
336 u32 virtual_addr_tag;
337 enum hw_mmu_page_size_t mmu_pg_size;
338
339 /*Check the input Parameters */
340 switch (page_sz) {
341 case HW_PAGE_SIZE4KB:
342 mmu_pg_size = HW_MMU_SMALL_PAGE;
343 break;
344
345 case HW_PAGE_SIZE64KB:
346 mmu_pg_size = HW_MMU_LARGE_PAGE;
347 break;
348
349 case HW_PAGE_SIZE1MB:
350 mmu_pg_size = HW_MMU_SECTION;
351 break;
352
353 case HW_PAGE_SIZE16MB:
354 mmu_pg_size = HW_MMU_SUPERSECTION;
355 break;
356
357 default:
358 return -EINVAL;
359 }
360
361 lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address);
362
363 /* Generate the 20-bit tag from virtual address */
364 virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
365
366 /* Write the fields in the CAM Entry Register */
367 mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, valid_bit,
368 virtual_addr_tag);
369
370 /* Write the different fields of the RAM Entry Register */
371 /* endianism of the page,Element Size of the page (8, 16, 32, 64 bit) */
372 mmu_set_ram_entry(base_address, physical_addr, map_attrs->endianism,
373 map_attrs->element_size, map_attrs->mixed_size);
374
375 /* Update the MMU Lock Register */
376 /* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */
377 MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, entry_num);
378
379 /* Enable loading of an entry in TLB by writing 1
380 into LD_TLB_REG register */
381 MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, MMU_LOAD_TLB);
382
383 MMUMMU_LOCK_WRITE_REGISTER32(base_address, lock_reg);
384
385 return status;
386}
387
388hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
389 u32 physical_addr,
390 u32 virtual_addr,
391 u32 page_sz, struct hw_mmu_map_attrs_t *map_attrs)
392{
393 hw_status status = 0;
394 u32 pte_addr, pte_val;
395 s32 num_entries = 1;
396
397 switch (page_sz) {
398 case HW_PAGE_SIZE4KB:
399 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
400 virtual_addr &
401 MMU_SMALL_PAGE_MASK);
402 pte_val =
403 ((physical_addr & MMU_SMALL_PAGE_MASK) |
404 (map_attrs->endianism << 9) | (map_attrs->
405 element_size << 4) |
406 (map_attrs->mixed_size << 11) | 2);
407 break;
408
409 case HW_PAGE_SIZE64KB:
410 num_entries = 16;
411 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
412 virtual_addr &
413 MMU_LARGE_PAGE_MASK);
414 pte_val =
415 ((physical_addr & MMU_LARGE_PAGE_MASK) |
416 (map_attrs->endianism << 9) | (map_attrs->
417 element_size << 4) |
418 (map_attrs->mixed_size << 11) | 1);
419 break;
420
421 case HW_PAGE_SIZE1MB:
422 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
423 virtual_addr &
424 MMU_SECTION_ADDR_MASK);
425 pte_val =
426 ((((physical_addr & MMU_SECTION_ADDR_MASK) |
427 (map_attrs->endianism << 15) | (map_attrs->
428 element_size << 10) |
429 (map_attrs->mixed_size << 17)) & ~0x40000) | 0x2);
430 break;
431
432 case HW_PAGE_SIZE16MB:
433 num_entries = 16;
434 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
435 virtual_addr &
436 MMU_SSECTION_ADDR_MASK);
437 pte_val =
438 (((physical_addr & MMU_SSECTION_ADDR_MASK) |
439 (map_attrs->endianism << 15) | (map_attrs->
440 element_size << 10) |
441 (map_attrs->mixed_size << 17)
442 ) | 0x40000 | 0x2);
443 break;
444
445 case HW_MMU_COARSE_PAGE_SIZE:
446 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
447 virtual_addr &
448 MMU_SECTION_ADDR_MASK);
449 pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1;
450 break;
451
452 default:
453 return -EINVAL;
454 }
455
456 while (--num_entries >= 0)
457 ((u32 *) pte_addr)[num_entries] = pte_val;
458
459 return status;
460}
461
462hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtual_addr, u32 page_size)
463{
464 hw_status status = 0;
465 u32 pte_addr;
466 s32 num_entries = 1;
467
468 switch (page_size) {
469 case HW_PAGE_SIZE4KB:
470 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
471 virtual_addr &
472 MMU_SMALL_PAGE_MASK);
473 break;
474
475 case HW_PAGE_SIZE64KB:
476 num_entries = 16;
477 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
478 virtual_addr &
479 MMU_LARGE_PAGE_MASK);
480 break;
481
482 case HW_PAGE_SIZE1MB:
483 case HW_MMU_COARSE_PAGE_SIZE:
484 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
485 virtual_addr &
486 MMU_SECTION_ADDR_MASK);
487 break;
488
489 case HW_PAGE_SIZE16MB:
490 num_entries = 16;
491 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
492 virtual_addr &
493 MMU_SSECTION_ADDR_MASK);
494 break;
495
496 default:
497 return -EINVAL;
498 }
499
500 while (--num_entries >= 0)
501 ((u32 *) pte_addr)[num_entries] = 0;
502
503 return status;
504}
505
506/* mmu_flush_entry */
507static hw_status mmu_flush_entry(const void __iomem *base_address)
508{
509 hw_status status = 0;
510 u32 flush_entry_data = 0x1;
511
512 /* write values to register */
513 MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, flush_entry_data);
514
515 return status;
516}
517
518/* mmu_set_cam_entry */
519static hw_status mmu_set_cam_entry(const void __iomem *base_address,
520 const u32 page_sz,
521 const u32 preserved_bit,
522 const u32 valid_bit,
523 const u32 virtual_addr_tag)
524{
525 hw_status status = 0;
526 u32 mmu_cam_reg;
527
528 mmu_cam_reg = (virtual_addr_tag << 12);
529 mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (valid_bit << 2) |
530 (preserved_bit << 3);
531
532 /* write values to register */
533 MMUMMU_CAM_WRITE_REGISTER32(base_address, mmu_cam_reg);
534
535 return status;
536}
537
538/* mmu_set_ram_entry */
539static hw_status mmu_set_ram_entry(const void __iomem *base_address,
540 const u32 physical_addr,
541 enum hw_endianism_t endianism,
542 enum hw_element_size_t element_size,
543 enum hw_mmu_mixed_size_t mixed_size)
544{
545 hw_status status = 0;
546 u32 mmu_ram_reg;
547
548 mmu_ram_reg = (physical_addr & MMU_ADDR_MASK);
549 mmu_ram_reg = (mmu_ram_reg) | ((endianism << 9) | (element_size << 7) |
550 (mixed_size << 6));
551
552 /* write values to register */
553 MMUMMU_RAM_WRITE_REGISTER32(base_address, mmu_ram_reg);
554
555 return status;
556
557}
558
559void hw_mmu_tlb_flush_all(const void __iomem *base)
560{
561 __raw_writeb(1, base + MMU_GFLUSH);
562}
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.h b/drivers/staging/tidspbridge/hw/hw_mmu.h
new file mode 100644
index 000000000000..1458a2c6027b
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/hw_mmu.h
@@ -0,0 +1,163 @@
1/*
2 * hw_mmu.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * MMU types and API declarations
7 *
8 * Copyright (C) 2007 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#ifndef _HW_MMU_H
20#define _HW_MMU_H
21
22#include <linux/types.h>
23
24/* Bitmasks for interrupt sources */
25#define HW_MMU_TRANSLATION_FAULT 0x2
26#define HW_MMU_ALL_INTERRUPTS 0x1F
27
28#define HW_MMU_COARSE_PAGE_SIZE 0x400
29
30/* hw_mmu_mixed_size_t: Enumerated Type used to specify whether to follow
31 CPU/TLB Element size */
32enum hw_mmu_mixed_size_t {
33 HW_MMU_TLBES,
34 HW_MMU_CPUES
35};
36
37/* hw_mmu_map_attrs_t: Struct containing MMU mapping attributes */
38struct hw_mmu_map_attrs_t {
39 enum hw_endianism_t endianism;
40 enum hw_element_size_t element_size;
41 enum hw_mmu_mixed_size_t mixed_size;
42 bool donotlockmpupage;
43};
44
45extern hw_status hw_mmu_enable(const void __iomem *base_address);
46
47extern hw_status hw_mmu_disable(const void __iomem *base_address);
48
49extern hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
50 u32 num_locked_entries);
51
52extern hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
53 u32 victim_entry_num);
54
55/* For MMU faults */
56extern hw_status hw_mmu_event_ack(const void __iomem *base_address,
57 u32 irq_mask);
58
59extern hw_status hw_mmu_event_disable(const void __iomem *base_address,
60 u32 irq_mask);
61
62extern hw_status hw_mmu_event_enable(const void __iomem *base_address,
63 u32 irq_mask);
64
65extern hw_status hw_mmu_event_status(const void __iomem *base_address,
66 u32 *irq_mask);
67
68extern hw_status hw_mmu_fault_addr_read(const void __iomem *base_address,
69 u32 *addr);
70
71/* Set the TT base address */
72extern hw_status hw_mmu_ttb_set(const void __iomem *base_address,
73 u32 ttb_phys_addr);
74
75extern hw_status hw_mmu_twl_enable(const void __iomem *base_address);
76
77extern hw_status hw_mmu_twl_disable(const void __iomem *base_address);
78
79extern hw_status hw_mmu_tlb_flush(const void __iomem *base_address,
80 u32 virtual_addr, u32 page_sz);
81
82extern hw_status hw_mmu_tlb_add(const void __iomem *base_address,
83 u32 physical_addr,
84 u32 virtual_addr,
85 u32 page_sz,
86 u32 entry_num,
87 struct hw_mmu_map_attrs_t *map_attrs,
88 s8 preserved_bit, s8 valid_bit);
89
90/* For PTEs */
91extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
92 u32 physical_addr,
93 u32 virtual_addr,
94 u32 page_sz,
95 struct hw_mmu_map_attrs_t *map_attrs);
96
97extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
98 u32 virtual_addr, u32 page_size);
99
100void hw_mmu_tlb_flush_all(const void __iomem *base);
101
102static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va)
103{
104 u32 pte_addr;
105 u32 va31_to20;
106
107 va31_to20 = va >> (20 - 2); /* Left-shift by 2 here itself */
108 va31_to20 &= 0xFFFFFFFCUL;
109 pte_addr = l1_base + va31_to20;
110
111 return pte_addr;
112}
113
114static inline u32 hw_mmu_pte_addr_l2(u32 l2_base, u32 va)
115{
116 u32 pte_addr;
117
118 pte_addr = (l2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC);
119
120 return pte_addr;
121}
122
123static inline u32 hw_mmu_pte_coarse_l1(u32 pte_val)
124{
125 u32 pte_coarse;
126
127 pte_coarse = pte_val & 0xFFFFFC00;
128
129 return pte_coarse;
130}
131
132static inline u32 hw_mmu_pte_size_l1(u32 pte_val)
133{
134 u32 pte_size = 0;
135
136 if ((pte_val & 0x3) == 0x1) {
137 /* Points to L2 PT */
138 pte_size = HW_MMU_COARSE_PAGE_SIZE;
139 }
140
141 if ((pte_val & 0x3) == 0x2) {
142 if (pte_val & (1 << 18))
143 pte_size = HW_PAGE_SIZE16MB;
144 else
145 pte_size = HW_PAGE_SIZE1MB;
146 }
147
148 return pte_size;
149}
150
151static inline u32 hw_mmu_pte_size_l2(u32 pte_val)
152{
153 u32 pte_size = 0;
154
155 if (pte_val & 0x2)
156 pte_size = HW_PAGE_SIZE4KB;
157 else if (pte_val & 0x1)
158 pte_size = HW_PAGE_SIZE64KB;
159
160 return pte_size;
161}
162
163#endif /* _HW_MMU_H */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
index dfb55cca34c7..38122dbf877a 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
@@ -68,6 +68,7 @@ struct cfg_hostres {
68 void __iomem *dw_per_base; 68 void __iomem *dw_per_base;
69 u32 dw_per_pm_base; 69 u32 dw_per_pm_base;
70 u32 dw_core_pm_base; 70 u32 dw_core_pm_base;
71 void __iomem *dw_dmmu_base;
71 void __iomem *dw_sys_ctrl_base; 72 void __iomem *dw_sys_ctrl_base;
72}; 73};
73 74
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dev.h b/drivers/staging/tidspbridge/include/dspbridge/dev.h
index 9bdd48f57429..357458fadd2a 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dev.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dev.h
@@ -27,6 +27,7 @@
27#include <dspbridge/nodedefs.h> 27#include <dspbridge/nodedefs.h>
28#include <dspbridge/dispdefs.h> 28#include <dspbridge/dispdefs.h>
29#include <dspbridge/dspdefs.h> 29#include <dspbridge/dspdefs.h>
30#include <dspbridge/dmm.h>
30#include <dspbridge/host_os.h> 31#include <dspbridge/host_os.h>
31 32
32/* ----------------------------------- This */ 33/* ----------------------------------- This */
@@ -233,6 +234,29 @@ extern int dev_get_cmm_mgr(struct dev_object *hdev_obj,
233 struct cmm_object **mgr); 234 struct cmm_object **mgr);
234 235
235/* 236/*
237 * ======== dev_get_dmm_mgr ========
238 * Purpose:
239 * Retrieve the handle to the dynamic memory manager created for this
240 * device.
241 * Parameters:
242 * hdev_obj: Handle to device object created with
243 * dev_create_device().
244 * *mgr: Ptr to location to store handle.
245 * Returns:
246 * 0: Success.
247 * -EFAULT: Invalid hdev_obj.
248 * Requires:
249 * mgr != NULL.
250 * DEV Initialized.
251 * Ensures:
252 * 0: *mgr contains a handle to a channel manager object,
253 * or NULL.
254 * else: *mgr is NULL.
255 */
256extern int dev_get_dmm_mgr(struct dev_object *hdev_obj,
257 struct dmm_object **mgr);
258
259/*
236 * ======== dev_get_cod_mgr ======== 260 * ======== dev_get_cod_mgr ========
237 * Purpose: 261 * Purpose:
238 * Retrieve the COD manager create for this device. 262 * Retrieve the COD manager create for this device.
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dmm.h b/drivers/staging/tidspbridge/include/dspbridge/dmm.h
new file mode 100644
index 000000000000..6c58335c5f60
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dmm.h
@@ -0,0 +1,75 @@
1/*
2 * dmm.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * The Dynamic Memory Mapping(DMM) module manages the DSP Virtual address
7 * space that can be directly mapped to any MPU buffer or memory region.
8 *
9 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 *
11 * This package is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
17 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
18 */
19
20#ifndef DMM_
21#define DMM_
22
23#include <dspbridge/dbdefs.h>
24
25struct dmm_object;
26
27/* DMM attributes used in dmm_create() */
28struct dmm_mgrattrs {
29 u32 reserved;
30};
31
32#define DMMPOOLSIZE 0x4000000
33
34/*
35 * ======== dmm_get_handle ========
36 * Purpose:
37 * Return the dynamic memory manager object for this device.
38 * This is typically called from the client process.
39 */
40
41extern int dmm_get_handle(void *hprocessor,
42 struct dmm_object **dmm_manager);
43
44extern int dmm_reserve_memory(struct dmm_object *dmm_mgr,
45 u32 size, u32 *prsv_addr);
46
47extern int dmm_un_reserve_memory(struct dmm_object *dmm_mgr,
48 u32 rsv_addr);
49
50extern int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr,
51 u32 size);
52
53extern int dmm_un_map_memory(struct dmm_object *dmm_mgr,
54 u32 addr, u32 *psize);
55
56extern int dmm_destroy(struct dmm_object *dmm_mgr);
57
58extern int dmm_delete_tables(struct dmm_object *dmm_mgr);
59
60extern int dmm_create(struct dmm_object **dmm_manager,
61 struct dev_object *hdev_obj,
62 const struct dmm_mgrattrs *mgr_attrts);
63
64extern bool dmm_init(void);
65
66extern void dmm_exit(void);
67
68extern int dmm_create_tables(struct dmm_object *dmm_mgr,
69 u32 addr, u32 size);
70
71#ifdef DSP_DMM_DEBUG
72u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr);
73#endif
74
75#endif /* DMM_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/drv.h b/drivers/staging/tidspbridge/include/dspbridge/drv.h
index 75a2c9b5c6f2..c1f363ec9afa 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/drv.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/drv.h
@@ -108,6 +108,12 @@ struct dmm_map_object {
108 struct bridge_dma_map_info dma_info; 108 struct bridge_dma_map_info dma_info;
109}; 109};
110 110
111/* Used for DMM reserved memory accounting */
112struct dmm_rsv_object {
113 struct list_head link;
114 u32 dsp_reserved_addr;
115};
116
111/* New structure (member of process context) abstracts DMM resource info */ 117/* New structure (member of process context) abstracts DMM resource info */
112struct dspheap_res_object { 118struct dspheap_res_object {
113 s32 heap_allocated; /* DMM status */ 119 s32 heap_allocated; /* DMM status */
@@ -159,6 +165,10 @@ struct process_context {
159 struct list_head dmm_map_list; 165 struct list_head dmm_map_list;
160 spinlock_t dmm_map_lock; 166 spinlock_t dmm_map_lock;
161 167
168 /* DMM reserved memory resources */
169 struct list_head dmm_rsv_list;
170 spinlock_t dmm_rsv_lock;
171
162 /* DSP Heap resources */ 172 /* DSP Heap resources */
163 struct dspheap_res_object *pdspheap_list; 173 struct dspheap_res_object *pdspheap_list;
164 174
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h b/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h
deleted file mode 100644
index cb38d4cc0734..000000000000
--- a/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * dsp-mmu.h
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * DSP iommu.
7 *
8 * Copyright (C) 2005-2010 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#ifndef _DSP_MMU_
20#define _DSP_MMU_
21
22#include <plat/iommu.h>
23#include <plat/iovmm.h>
24
25/**
26 * dsp_mmu_init() - initialize dsp_mmu module and returns a handle
27 *
28 * This function initialize dsp mmu module and returns a struct iommu
29 * handle to use it for dsp maps.
30 *
31 */
32struct iommu *dsp_mmu_init(void);
33
34/**
35 * dsp_mmu_exit() - destroy dsp mmu module
36 * @mmu: Pointer to iommu handle.
37 *
38 * This function destroys dsp mmu module.
39 *
40 */
41void dsp_mmu_exit(struct iommu *mmu);
42
43/**
44 * user_to_dsp_map() - maps user to dsp virtual address
45 * @mmu: Pointer to iommu handle.
46 * @uva: Virtual user space address.
47 * @da DSP address
48 * @size Buffer size to map.
49 * @usr_pgs struct page array pointer where the user pages will be stored
50 *
51 * This function maps a user space buffer into DSP virtual address.
52 *
53 */
54u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
55 struct page **usr_pgs);
56
57/**
58 * user_to_dsp_unmap() - unmaps DSP virtual buffer.
59 * @mmu: Pointer to iommu handle.
60 * @da DSP address
61 *
62 * This function unmaps a user space buffer into DSP virtual address.
63 *
64 */
65int user_to_dsp_unmap(struct iommu *mmu, u32 da);
66
67#endif
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
index 615363474810..0ae7d1646a1b 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
@@ -162,6 +162,48 @@ typedef int(*fxn_brd_memwrite) (struct bridge_dev_context
162 u32 mem_type); 162 u32 mem_type);
163 163
164/* 164/*
165 * ======== bridge_brd_mem_map ========
166 * Purpose:
167 * Map a MPU memory region to a DSP/IVA memory space
168 * Parameters:
169 * dev_ctxt: Handle to Bridge driver defined device info.
170 * ul_mpu_addr: MPU memory region start address.
171 * virt_addr: DSP/IVA memory region u8 address.
172 * ul_num_bytes: Number of bytes to map.
173 * map_attrs: Mapping attributes (e.g. endianness).
174 * Returns:
175 * 0: Success.
176 * -EPERM: Other, unspecified error.
177 * Requires:
178 * dev_ctxt != NULL;
179 * Ensures:
180 */
181typedef int(*fxn_brd_memmap) (struct bridge_dev_context
182 * dev_ctxt, u32 ul_mpu_addr,
183 u32 virt_addr, u32 ul_num_bytes,
184 u32 map_attr,
185 struct page **mapped_pages);
186
187/*
188 * ======== bridge_brd_mem_un_map ========
189 * Purpose:
190 * UnMap an MPU memory region from DSP/IVA memory space
191 * Parameters:
192 * dev_ctxt: Handle to Bridge driver defined device info.
193 * virt_addr: DSP/IVA memory region u8 address.
194 * ul_num_bytes: Number of bytes to unmap.
195 * Returns:
196 * 0: Success.
197 * -EPERM: Other, unspecified error.
198 * Requires:
199 * dev_ctxt != NULL;
200 * Ensures:
201 */
202typedef int(*fxn_brd_memunmap) (struct bridge_dev_context
203 * dev_ctxt,
204 u32 virt_addr, u32 ul_num_bytes);
205
206/*
165 * ======== bridge_brd_stop ======== 207 * ======== bridge_brd_stop ========
166 * Purpose: 208 * Purpose:
167 * Bring board to the BRD_STOPPED state. 209 * Bring board to the BRD_STOPPED state.
@@ -951,6 +993,8 @@ struct bridge_drv_interface {
951 fxn_brd_setstate pfn_brd_set_state; /* Sets the Board State */ 993 fxn_brd_setstate pfn_brd_set_state; /* Sets the Board State */
952 fxn_brd_memcopy pfn_brd_mem_copy; /* Copies DSP Memory */ 994 fxn_brd_memcopy pfn_brd_mem_copy; /* Copies DSP Memory */
953 fxn_brd_memwrite pfn_brd_mem_write; /* Write DSP Memory w/o halt */ 995 fxn_brd_memwrite pfn_brd_mem_write; /* Write DSP Memory w/o halt */
996 fxn_brd_memmap pfn_brd_mem_map; /* Maps MPU mem to DSP mem */
997 fxn_brd_memunmap pfn_brd_mem_un_map; /* Unmaps MPU mem to DSP mem */
954 fxn_chnl_create pfn_chnl_create; /* Create channel manager. */ 998 fxn_chnl_create pfn_chnl_create; /* Create channel manager. */
955 fxn_chnl_destroy pfn_chnl_destroy; /* Destroy channel manager. */ 999 fxn_chnl_destroy pfn_chnl_destroy; /* Destroy channel manager. */
956 fxn_chnl_open pfn_chnl_open; /* Create a new channel. */ 1000 fxn_chnl_open pfn_chnl_open; /* Create a new channel. */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
index bad180108ada..41e0594dff34 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
@@ -19,6 +19,10 @@
19#ifndef DSPIOCTL_ 19#ifndef DSPIOCTL_
20#define DSPIOCTL_ 20#define DSPIOCTL_
21 21
22/* ------------------------------------ Hardware Abstraction Layer */
23#include <hw_defs.h>
24#include <hw_mmu.h>
25
22/* 26/*
23 * Any IOCTLS at or above this value are reserved for standard Bridge driver 27 * Any IOCTLS at or above this value are reserved for standard Bridge driver
24 * interfaces. 28 * interfaces.
@@ -61,6 +65,9 @@ struct bridge_ioctl_extproc {
61 /* GPP virtual address. __va does not work for ioremapped addresses */ 65 /* GPP virtual address. __va does not work for ioremapped addresses */
62 u32 ul_gpp_va; 66 u32 ul_gpp_va;
63 u32 ul_size; /* Size of the mapped memory in bytes */ 67 u32 ul_size; /* Size of the mapped memory in bytes */
68 enum hw_endianism_t endianism;
69 enum hw_mmu_mixed_size_t mixed_mode;
70 enum hw_element_size_t elem_size;
64}; 71};
65 72
66#endif /* DSPIOCTL_ */ 73#endif /* DSPIOCTL_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/proc.h b/drivers/staging/tidspbridge/include/dspbridge/proc.h
index 2d12aab6b5bf..5e09fd165d9d 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/proc.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/proc.h
@@ -551,6 +551,29 @@ extern int proc_map(void *hprocessor,
551 struct process_context *pr_ctxt); 551 struct process_context *pr_ctxt);
552 552
553/* 553/*
554 * ======== proc_reserve_memory ========
555 * Purpose:
556 * Reserve a virtually contiguous region of DSP address space.
557 * Parameters:
558 * hprocessor : The processor handle.
559 * ul_size : Size of the address space to reserve.
560 * pp_rsv_addr : Ptr to DSP side reserved u8 address.
561 * Returns:
562 * 0 : Success.
563 * -EFAULT : Invalid processor handle.
564 * -EPERM : General failure.
565 * -ENOMEM : Cannot reserve chunk of this size.
566 * Requires:
567 * pp_rsv_addr is not NULL
568 * PROC Initialized.
569 * Ensures:
570 * Details:
571 */
572extern int proc_reserve_memory(void *hprocessor,
573 u32 ul_size, void **pp_rsv_addr,
574 struct process_context *pr_ctxt);
575
576/*
554 * ======== proc_un_map ======== 577 * ======== proc_un_map ========
555 * Purpose: 578 * Purpose:
556 * Removes a MPU buffer mapping from the DSP address space. 579 * Removes a MPU buffer mapping from the DSP address space.
@@ -572,4 +595,27 @@ extern int proc_map(void *hprocessor,
572extern int proc_un_map(void *hprocessor, void *map_addr, 595extern int proc_un_map(void *hprocessor, void *map_addr,
573 struct process_context *pr_ctxt); 596 struct process_context *pr_ctxt);
574 597
598/*
599 * ======== proc_un_reserve_memory ========
600 * Purpose:
601 * Frees a previously reserved region of DSP address space.
602 * Parameters:
603 * hprocessor : The processor handle.
604 * prsv_addr : Ptr to DSP side reservedBYTE address.
605 * Returns:
606 * 0 : Success.
607 * -EFAULT : Invalid processor handle.
608 * -EPERM : General failure.
609 * -ENOENT : Cannot find a reserved region starting with this
610 * : address.
611 * Requires:
612 * prsv_addr is not NULL
613 * PROC Initialized.
614 * Ensures:
615 * Details:
616 */
617extern int proc_un_reserve_memory(void *hprocessor,
618 void *prsv_addr,
619 struct process_context *pr_ctxt);
620
575#endif /* PROC_ */ 621#endif /* PROC_ */
diff --git a/drivers/staging/tidspbridge/pmgr/dev.c b/drivers/staging/tidspbridge/pmgr/dev.c
index 7b30267ef0e2..132e960967b9 100644
--- a/drivers/staging/tidspbridge/pmgr/dev.c
+++ b/drivers/staging/tidspbridge/pmgr/dev.c
@@ -34,6 +34,7 @@
34#include <dspbridge/cod.h> 34#include <dspbridge/cod.h>
35#include <dspbridge/drv.h> 35#include <dspbridge/drv.h>
36#include <dspbridge/proc.h> 36#include <dspbridge/proc.h>
37#include <dspbridge/dmm.h>
37 38
38/* ----------------------------------- Resource Manager */ 39/* ----------------------------------- Resource Manager */
39#include <dspbridge/mgr.h> 40#include <dspbridge/mgr.h>
@@ -74,6 +75,7 @@ struct dev_object {
74 struct msg_mgr *hmsg_mgr; /* Message manager. */ 75 struct msg_mgr *hmsg_mgr; /* Message manager. */
75 struct io_mgr *hio_mgr; /* IO manager (CHNL, msg_ctrl) */ 76 struct io_mgr *hio_mgr; /* IO manager (CHNL, msg_ctrl) */
76 struct cmm_object *hcmm_mgr; /* SM memory manager. */ 77 struct cmm_object *hcmm_mgr; /* SM memory manager. */
78 struct dmm_object *dmm_mgr; /* Dynamic memory manager. */
77 struct ldr_module *module_obj; /* Bridge Module handle. */ 79 struct ldr_module *module_obj; /* Bridge Module handle. */
78 u32 word_size; /* DSP word size: quick access. */ 80 u32 word_size; /* DSP word size: quick access. */
79 struct drv_object *hdrv_obj; /* Driver Object */ 81 struct drv_object *hdrv_obj; /* Driver Object */
@@ -248,6 +250,9 @@ int dev_create_device(struct dev_object **device_obj,
248 /* Instantiate the DEH module */ 250 /* Instantiate the DEH module */
249 status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj); 251 status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj);
250 } 252 }
253 /* Create DMM mgr . */
254 status = dmm_create(&dev_obj->dmm_mgr,
255 (struct dev_object *)dev_obj, NULL);
251 } 256 }
252 /* Add the new DEV_Object to the global list: */ 257 /* Add the new DEV_Object to the global list: */
253 if (!status) { 258 if (!status) {
@@ -273,6 +278,8 @@ leave:
273 kfree(dev_obj->proc_list); 278 kfree(dev_obj->proc_list);
274 if (dev_obj->cod_mgr) 279 if (dev_obj->cod_mgr)
275 cod_delete(dev_obj->cod_mgr); 280 cod_delete(dev_obj->cod_mgr);
281 if (dev_obj->dmm_mgr)
282 dmm_destroy(dev_obj->dmm_mgr);
276 kfree(dev_obj); 283 kfree(dev_obj);
277 } 284 }
278 285
@@ -382,6 +389,11 @@ int dev_destroy_device(struct dev_object *hdev_obj)
382 dev_obj->hcmm_mgr = NULL; 389 dev_obj->hcmm_mgr = NULL;
383 } 390 }
384 391
392 if (dev_obj->dmm_mgr) {
393 dmm_destroy(dev_obj->dmm_mgr);
394 dev_obj->dmm_mgr = NULL;
395 }
396
385 /* Call the driver's bridge_dev_destroy() function: */ 397 /* Call the driver's bridge_dev_destroy() function: */
386 /* Require of DevDestroy */ 398 /* Require of DevDestroy */
387 if (dev_obj->hbridge_context) { 399 if (dev_obj->hbridge_context) {
@@ -462,6 +474,32 @@ int dev_get_cmm_mgr(struct dev_object *hdev_obj,
462} 474}
463 475
464/* 476/*
477 * ======== dev_get_dmm_mgr ========
478 * Purpose:
479 * Retrieve the handle to the dynamic memory manager created for this
480 * device.
481 */
482int dev_get_dmm_mgr(struct dev_object *hdev_obj,
483 struct dmm_object **mgr)
484{
485 int status = 0;
486 struct dev_object *dev_obj = hdev_obj;
487
488 DBC_REQUIRE(refs > 0);
489 DBC_REQUIRE(mgr != NULL);
490
491 if (hdev_obj) {
492 *mgr = dev_obj->dmm_mgr;
493 } else {
494 *mgr = NULL;
495 status = -EFAULT;
496 }
497
498 DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
499 return status;
500}
501
502/*
465 * ======== dev_get_cod_mgr ======== 503 * ======== dev_get_cod_mgr ========
466 * Purpose: 504 * Purpose:
467 * Retrieve the COD manager create for this device. 505 * Retrieve the COD manager create for this device.
@@ -713,8 +751,10 @@ void dev_exit(void)
713 751
714 refs--; 752 refs--;
715 753
716 if (refs == 0) 754 if (refs == 0) {
717 cmm_exit(); 755 cmm_exit();
756 dmm_exit();
757 }
718 758
719 DBC_ENSURE(refs >= 0); 759 DBC_ENSURE(refs >= 0);
720} 760}
@@ -726,12 +766,25 @@ void dev_exit(void)
726 */ 766 */
727bool dev_init(void) 767bool dev_init(void)
728{ 768{
729 bool ret = true; 769 bool cmm_ret, dmm_ret, ret = true;
730 770
731 DBC_REQUIRE(refs >= 0); 771 DBC_REQUIRE(refs >= 0);
732 772
733 if (refs == 0) 773 if (refs == 0) {
734 ret = cmm_init(); 774 cmm_ret = cmm_init();
775 dmm_ret = dmm_init();
776
777 ret = cmm_ret && dmm_ret;
778
779 if (!ret) {
780 if (cmm_ret)
781 cmm_exit();
782
783 if (dmm_ret)
784 dmm_exit();
785
786 }
787 }
735 788
736 if (ret) 789 if (ret)
737 refs++; 790 refs++;
@@ -1065,6 +1118,8 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns,
1065 STORE_FXN(fxn_brd_setstate, pfn_brd_set_state); 1118 STORE_FXN(fxn_brd_setstate, pfn_brd_set_state);
1066 STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy); 1119 STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy);
1067 STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write); 1120 STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write);
1121 STORE_FXN(fxn_brd_memmap, pfn_brd_mem_map);
1122 STORE_FXN(fxn_brd_memunmap, pfn_brd_mem_un_map);
1068 STORE_FXN(fxn_chnl_create, pfn_chnl_create); 1123 STORE_FXN(fxn_chnl_create, pfn_chnl_create);
1069 STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy); 1124 STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy);
1070 STORE_FXN(fxn_chnl_open, pfn_chnl_open); 1125 STORE_FXN(fxn_chnl_open, pfn_chnl_open);
diff --git a/drivers/staging/tidspbridge/pmgr/dmm.c b/drivers/staging/tidspbridge/pmgr/dmm.c
new file mode 100644
index 000000000000..8685233d7627
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/dmm.c
@@ -0,0 +1,533 @@
1/*
2 * dmm.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * The Dynamic Memory Manager (DMM) module manages the DSP Virtual address
7 * space that can be directly mapped to any MPU buffer or memory region
8 *
9 * Notes:
10 * Region: Generic memory entitiy having a start address and a size
11 * Chunk: Reserved region
12 *
13 * Copyright (C) 2005-2006 Texas Instruments, Inc.
14 *
15 * This package is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License version 2 as
17 * published by the Free Software Foundation.
18 *
19 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
21 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
22 */
23#include <linux/types.h>
24
25/* ----------------------------------- Host OS */
26#include <dspbridge/host_os.h>
27
28/* ----------------------------------- DSP/BIOS Bridge */
29#include <dspbridge/dbdefs.h>
30
31/* ----------------------------------- Trace & Debug */
32#include <dspbridge/dbc.h>
33
34/* ----------------------------------- OS Adaptation Layer */
35#include <dspbridge/sync.h>
36
37/* ----------------------------------- Platform Manager */
38#include <dspbridge/dev.h>
39#include <dspbridge/proc.h>
40
41/* ----------------------------------- This */
42#include <dspbridge/dmm.h>
43
44/* ----------------------------------- Defines, Data Structures, Typedefs */
45#define DMM_ADDR_VIRTUAL(a) \
46 (((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\
47 dyn_mem_map_beg)
48#define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K)
49
50/* DMM Mgr */
51struct dmm_object {
52 /* Dmm Lock is used to serialize access mem manager for
53 * multi-threads. */
54 spinlock_t dmm_lock; /* Lock to access dmm mgr */
55};
56
57/* ----------------------------------- Globals */
58static u32 refs; /* module reference count */
59struct map_page {
60 u32 region_size:15;
61 u32 mapped_size:15;
62 u32 reserved:1;
63 u32 mapped:1;
64};
65
66/* Create the free list */
67static struct map_page *virtual_mapping_table;
68static u32 free_region; /* The index of free region */
69static u32 free_size;
70static u32 dyn_mem_map_beg; /* The Beginning of dynamic memory mapping */
71static u32 table_size; /* The size of virt and phys pages tables */
72
73/* ----------------------------------- Function Prototypes */
74static struct map_page *get_region(u32 addr);
75static struct map_page *get_free_region(u32 len);
76static struct map_page *get_mapped_region(u32 addrs);
77
78/* ======== dmm_create_tables ========
79 * Purpose:
80 * Create table to hold the information of physical address
81 * the buffer pages that is passed by the user, and the table
82 * to hold the information of the virtual memory that is reserved
83 * for DSP.
84 */
85int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size)
86{
87 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
88 int status = 0;
89
90 status = dmm_delete_tables(dmm_obj);
91 if (!status) {
92 dyn_mem_map_beg = addr;
93 table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K;
94 /* Create the free list */
95 virtual_mapping_table = __vmalloc(table_size *
96 sizeof(struct map_page), GFP_KERNEL |
97 __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
98 if (virtual_mapping_table == NULL)
99 status = -ENOMEM;
100 else {
101 /* On successful allocation,
102 * all entries are zero ('free') */
103 free_region = 0;
104 free_size = table_size * PG_SIZE4K;
105 virtual_mapping_table[0].region_size = table_size;
106 }
107 }
108
109 if (status)
110 pr_err("%s: failure, status 0x%x\n", __func__, status);
111
112 return status;
113}
114
115/*
116 * ======== dmm_create ========
117 * Purpose:
118 * Create a dynamic memory manager object.
119 */
120int dmm_create(struct dmm_object **dmm_manager,
121 struct dev_object *hdev_obj,
122 const struct dmm_mgrattrs *mgr_attrts)
123{
124 struct dmm_object *dmm_obj = NULL;
125 int status = 0;
126 DBC_REQUIRE(refs > 0);
127 DBC_REQUIRE(dmm_manager != NULL);
128
129 *dmm_manager = NULL;
130 /* create, zero, and tag a cmm mgr object */
131 dmm_obj = kzalloc(sizeof(struct dmm_object), GFP_KERNEL);
132 if (dmm_obj != NULL) {
133 spin_lock_init(&dmm_obj->dmm_lock);
134 *dmm_manager = dmm_obj;
135 } else {
136 status = -ENOMEM;
137 }
138
139 return status;
140}
141
142/*
143 * ======== dmm_destroy ========
144 * Purpose:
145 * Release the communication memory manager resources.
146 */
147int dmm_destroy(struct dmm_object *dmm_mgr)
148{
149 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
150 int status = 0;
151
152 DBC_REQUIRE(refs > 0);
153 if (dmm_mgr) {
154 status = dmm_delete_tables(dmm_obj);
155 if (!status)
156 kfree(dmm_obj);
157 } else
158 status = -EFAULT;
159
160 return status;
161}
162
163/*
164 * ======== dmm_delete_tables ========
165 * Purpose:
166 * Delete DMM Tables.
167 */
168int dmm_delete_tables(struct dmm_object *dmm_mgr)
169{
170 int status = 0;
171
172 DBC_REQUIRE(refs > 0);
173 /* Delete all DMM tables */
174 if (dmm_mgr)
175 vfree(virtual_mapping_table);
176 else
177 status = -EFAULT;
178 return status;
179}
180
181/*
182 * ======== dmm_exit ========
183 * Purpose:
184 * Discontinue usage of module; free resources when reference count
185 * reaches 0.
186 */
187void dmm_exit(void)
188{
189 DBC_REQUIRE(refs > 0);
190
191 refs--;
192}
193
194/*
195 * ======== dmm_get_handle ========
196 * Purpose:
197 * Return the dynamic memory manager object for this device.
198 * This is typically called from the client process.
199 */
200int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
201{
202 int status = 0;
203 struct dev_object *hdev_obj;
204
205 DBC_REQUIRE(refs > 0);
206 DBC_REQUIRE(dmm_manager != NULL);
207 if (hprocessor != NULL)
208 status = proc_get_dev_object(hprocessor, &hdev_obj);
209 else
210 hdev_obj = dev_get_first(); /* default */
211
212 if (!status)
213 status = dev_get_dmm_mgr(hdev_obj, dmm_manager);
214
215 return status;
216}
217
218/*
219 * ======== dmm_init ========
220 * Purpose:
221 * Initializes private state of DMM module.
222 */
223bool dmm_init(void)
224{
225 bool ret = true;
226
227 DBC_REQUIRE(refs >= 0);
228
229 if (ret)
230 refs++;
231
232 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
233
234 virtual_mapping_table = NULL;
235 table_size = 0;
236
237 return ret;
238}
239
240/*
241 * ======== dmm_map_memory ========
242 * Purpose:
243 * Add a mapping block to the reserved chunk. DMM assumes that this block
244 * will be mapped in the DSP/IVA's address space. DMM returns an error if a
245 * mapping overlaps another one. This function stores the info that will be
246 * required later while unmapping the block.
247 */
248int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size)
249{
250 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
251 struct map_page *chunk;
252 int status = 0;
253
254 spin_lock(&dmm_obj->dmm_lock);
255 /* Find the Reserved memory chunk containing the DSP block to
256 * be mapped */
257 chunk = (struct map_page *)get_region(addr);
258 if (chunk != NULL) {
259 /* Mark the region 'mapped', leave the 'reserved' info as-is */
260 chunk->mapped = true;
261 chunk->mapped_size = (size / PG_SIZE4K);
262 } else
263 status = -ENOENT;
264 spin_unlock(&dmm_obj->dmm_lock);
265
266 dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, "
267 "chunk %p", __func__, dmm_mgr, addr, size, status, chunk);
268
269 return status;
270}
271
272/*
273 * ======== dmm_reserve_memory ========
274 * Purpose:
275 * Reserve a chunk of virtually contiguous DSP/IVA address space.
276 */
277int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size,
278 u32 *prsv_addr)
279{
280 int status = 0;
281 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
282 struct map_page *node;
283 u32 rsv_addr = 0;
284 u32 rsv_size = 0;
285
286 spin_lock(&dmm_obj->dmm_lock);
287
288 /* Try to get a DSP chunk from the free list */
289 node = get_free_region(size);
290 if (node != NULL) {
291 /* DSP chunk of given size is available. */
292 rsv_addr = DMM_ADDR_VIRTUAL(node);
293 /* Calculate the number entries to use */
294 rsv_size = size / PG_SIZE4K;
295 if (rsv_size < node->region_size) {
296 /* Mark remainder of free region */
297 node[rsv_size].mapped = false;
298 node[rsv_size].reserved = false;
299 node[rsv_size].region_size =
300 node->region_size - rsv_size;
301 node[rsv_size].mapped_size = 0;
302 }
303 /* get_region will return first fit chunk. But we only use what
304 is requested. */
305 node->mapped = false;
306 node->reserved = true;
307 node->region_size = rsv_size;
308 node->mapped_size = 0;
309 /* Return the chunk's starting address */
310 *prsv_addr = rsv_addr;
311 } else
312 /*dSP chunk of given size is not available */
313 status = -ENOMEM;
314
315 spin_unlock(&dmm_obj->dmm_lock);
316
317 dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, "
318 "rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size,
319 prsv_addr, status, rsv_addr, rsv_size);
320
321 return status;
322}
323
324/*
325 * ======== dmm_un_map_memory ========
326 * Purpose:
327 * Remove the mapped block from the reserved chunk.
328 */
329int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize)
330{
331 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
332 struct map_page *chunk;
333 int status = 0;
334
335 spin_lock(&dmm_obj->dmm_lock);
336 chunk = get_mapped_region(addr);
337 if (chunk == NULL)
338 status = -ENOENT;
339
340 if (!status) {
341 /* Unmap the region */
342 *psize = chunk->mapped_size * PG_SIZE4K;
343 chunk->mapped = false;
344 chunk->mapped_size = 0;
345 }
346 spin_unlock(&dmm_obj->dmm_lock);
347
348 dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, "
349 "chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk);
350
351 return status;
352}
353
354/*
355 * ======== dmm_un_reserve_memory ========
356 * Purpose:
357 * Free a chunk of reserved DSP/IVA address space.
358 */
359int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, u32 rsv_addr)
360{
361 struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
362 struct map_page *chunk;
363 u32 i;
364 int status = 0;
365 u32 chunk_size;
366
367 spin_lock(&dmm_obj->dmm_lock);
368
369 /* Find the chunk containing the reserved address */
370 chunk = get_mapped_region(rsv_addr);
371 if (chunk == NULL)
372 status = -ENOENT;
373
374 if (!status) {
375 /* Free all the mapped pages for this reserved region */
376 i = 0;
377 while (i < chunk->region_size) {
378 if (chunk[i].mapped) {
379 /* Remove mapping from the page tables. */
380 chunk_size = chunk[i].mapped_size;
381 /* Clear the mapping flags */
382 chunk[i].mapped = false;
383 chunk[i].mapped_size = 0;
384 i += chunk_size;
385 } else
386 i++;
387 }
388 /* Clear the flags (mark the region 'free') */
389 chunk->reserved = false;
390 /* NOTE: We do NOT coalesce free regions here.
391 * Free regions are coalesced in get_region(), as it traverses
392 *the whole mapping table
393 */
394 }
395 spin_unlock(&dmm_obj->dmm_lock);
396
397 dev_dbg(bridge, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p",
398 __func__, dmm_mgr, rsv_addr, status, chunk);
399
400 return status;
401}
402
403/*
404 * ======== get_region ========
405 * Purpose:
406 * Returns a region containing the specified memory region
407 */
408static struct map_page *get_region(u32 addr)
409{
410 struct map_page *curr_region = NULL;
411 u32 i = 0;
412
413 if (virtual_mapping_table != NULL) {
414 /* find page mapped by this address */
415 i = DMM_ADDR_TO_INDEX(addr);
416 if (i < table_size)
417 curr_region = virtual_mapping_table + i;
418 }
419
420 dev_dbg(bridge, "%s: curr_region %p, free_region %d, free_size %d\n",
421 __func__, curr_region, free_region, free_size);
422 return curr_region;
423}
424
425/*
426 * ======== get_free_region ========
427 * Purpose:
428 * Returns the requested free region
429 */
430static struct map_page *get_free_region(u32 len)
431{
432 struct map_page *curr_region = NULL;
433 u32 i = 0;
434 u32 region_size = 0;
435 u32 next_i = 0;
436
437 if (virtual_mapping_table == NULL)
438 return curr_region;
439 if (len > free_size) {
440 /* Find the largest free region
441 * (coalesce during the traversal) */
442 while (i < table_size) {
443 region_size = virtual_mapping_table[i].region_size;
444 next_i = i + region_size;
445 if (virtual_mapping_table[i].reserved == false) {
446 /* Coalesce, if possible */
447 if (next_i < table_size &&
448 virtual_mapping_table[next_i].reserved
449 == false) {
450 virtual_mapping_table[i].region_size +=
451 virtual_mapping_table
452 [next_i].region_size;
453 continue;
454 }
455 region_size *= PG_SIZE4K;
456 if (region_size > free_size) {
457 free_region = i;
458 free_size = region_size;
459 }
460 }
461 i = next_i;
462 }
463 }
464 if (len <= free_size) {
465 curr_region = virtual_mapping_table + free_region;
466 free_region += (len / PG_SIZE4K);
467 free_size -= len;
468 }
469 return curr_region;
470}
471
472/*
473 * ======== get_mapped_region ========
474 * Purpose:
475 * Returns the requestedmapped region
476 */
477static struct map_page *get_mapped_region(u32 addrs)
478{
479 u32 i = 0;
480 struct map_page *curr_region = NULL;
481
482 if (virtual_mapping_table == NULL)
483 return curr_region;
484
485 i = DMM_ADDR_TO_INDEX(addrs);
486 if (i < table_size && (virtual_mapping_table[i].mapped ||
487 virtual_mapping_table[i].reserved))
488 curr_region = virtual_mapping_table + i;
489 return curr_region;
490}
491
492#ifdef DSP_DMM_DEBUG
493u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr)
494{
495 struct map_page *curr_node = NULL;
496 u32 i;
497 u32 freemem = 0;
498 u32 bigsize = 0;
499
500 spin_lock(&dmm_mgr->dmm_lock);
501
502 if (virtual_mapping_table != NULL) {
503 for (i = 0; i < table_size; i +=
504 virtual_mapping_table[i].region_size) {
505 curr_node = virtual_mapping_table + i;
506 if (curr_node->reserved) {
507 /*printk("RESERVED size = 0x%x, "
508 "Map size = 0x%x\n",
509 (curr_node->region_size * PG_SIZE4K),
510 (curr_node->mapped == false) ? 0 :
511 (curr_node->mapped_size * PG_SIZE4K));
512 */
513 } else {
514/* printk("UNRESERVED size = 0x%x\n",
515 (curr_node->region_size * PG_SIZE4K));
516 */
517 freemem += (curr_node->region_size * PG_SIZE4K);
518 if (curr_node->region_size > bigsize)
519 bigsize = curr_node->region_size;
520 }
521 }
522 }
523 spin_unlock(&dmm_mgr->dmm_lock);
524 printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n",
525 freemem / (1024 * 1024));
526 printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n",
527 (((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024));
528 printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n",
529 (bigsize * PG_SIZE4K / (1024 * 1024)));
530
531 return 0;
532}
533#endif
diff --git a/drivers/staging/tidspbridge/pmgr/dspapi.c b/drivers/staging/tidspbridge/pmgr/dspapi.c
index 981551ce4d78..86ca785f1913 100644
--- a/drivers/staging/tidspbridge/pmgr/dspapi.c
+++ b/drivers/staging/tidspbridge/pmgr/dspapi.c
@@ -993,10 +993,27 @@ u32 procwrap_register_notify(union trapped_args *args, void *pr_ctxt)
993/* 993/*
994 * ======== procwrap_reserve_memory ======== 994 * ======== procwrap_reserve_memory ========
995 */ 995 */
996u32 __deprecated procwrap_reserve_memory(union trapped_args *args, 996u32 procwrap_reserve_memory(union trapped_args *args, void *pr_ctxt)
997 void *pr_ctxt)
998{ 997{
999 return 0; 998 int status;
999 void *prsv_addr;
1000 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
1001
1002 if ((args->args_proc_rsvmem.ul_size <= 0) ||
1003 (args->args_proc_rsvmem.ul_size & (PG_SIZE4K - 1)) != 0)
1004 return -EINVAL;
1005
1006 status = proc_reserve_memory(hprocessor,
1007 args->args_proc_rsvmem.ul_size, &prsv_addr,
1008 pr_ctxt);
1009 if (!status) {
1010 if (put_user(prsv_addr, args->args_proc_rsvmem.pp_rsv_addr)) {
1011 status = -EINVAL;
1012 proc_un_reserve_memory(args->args_proc_rsvmem.
1013 hprocessor, prsv_addr, pr_ctxt);
1014 }
1015 }
1016 return status;
1000} 1017}
1001 1018
1002/* 1019/*
@@ -1025,10 +1042,15 @@ u32 procwrap_un_map(union trapped_args *args, void *pr_ctxt)
1025/* 1042/*
1026 * ======== procwrap_un_reserve_memory ======== 1043 * ======== procwrap_un_reserve_memory ========
1027 */ 1044 */
1028u32 __deprecated procwrap_un_reserve_memory(union trapped_args *args, 1045u32 procwrap_un_reserve_memory(union trapped_args *args, void *pr_ctxt)
1029 void *pr_ctxt)
1030{ 1046{
1031 return 0; 1047 int status;
1048 void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
1049
1050 status = proc_un_reserve_memory(hprocessor,
1051 args->args_proc_unrsvmem.prsv_addr,
1052 pr_ctxt);
1053 return status;
1032} 1054}
1033 1055
1034/* 1056/*
diff --git a/drivers/staging/tidspbridge/rmgr/drv.c b/drivers/staging/tidspbridge/rmgr/drv.c
index 91cc168516e5..81b1b9013550 100644
--- a/drivers/staging/tidspbridge/rmgr/drv.c
+++ b/drivers/staging/tidspbridge/rmgr/drv.c
@@ -146,6 +146,7 @@ int drv_remove_all_dmm_res_elements(void *process_ctxt)
146 struct process_context *ctxt = (struct process_context *)process_ctxt; 146 struct process_context *ctxt = (struct process_context *)process_ctxt;
147 int status = 0; 147 int status = 0;
148 struct dmm_map_object *temp_map, *map_obj; 148 struct dmm_map_object *temp_map, *map_obj;
149 struct dmm_rsv_object *temp_rsv, *rsv_obj;
149 150
150 /* Free DMM mapped memory resources */ 151 /* Free DMM mapped memory resources */
151 list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) { 152 list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) {
@@ -155,6 +156,16 @@ int drv_remove_all_dmm_res_elements(void *process_ctxt)
155 pr_err("%s: proc_un_map failed!" 156 pr_err("%s: proc_un_map failed!"
156 " status = 0x%xn", __func__, status); 157 " status = 0x%xn", __func__, status);
157 } 158 }
159
160 /* Free DMM reserved memory resources */
161 list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) {
162 status = proc_un_reserve_memory(ctxt->hprocessor, (void *)
163 rsv_obj->dsp_reserved_addr,
164 ctxt);
165 if (status)
166 pr_err("%s: proc_un_reserve_memory failed!"
167 " status = 0x%xn", __func__, status);
168 }
158 return status; 169 return status;
159} 170}
160 171
@@ -732,6 +743,7 @@ static int request_bridge_resources(struct cfg_hostres *res)
732 host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE); 743 host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE);
733 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]); 744 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]);
734 dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]); 745 dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]);
746 dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
735 747
736 /* for 24xx base port is not mapping the mamory for DSP 748 /* for 24xx base port is not mapping the mamory for DSP
737 * internal memory TODO Do a ioremap here */ 749 * internal memory TODO Do a ioremap here */
@@ -785,6 +797,8 @@ int drv_request_bridge_res_dsp(void **phost_resources)
785 OMAP_PER_PRM_SIZE); 797 OMAP_PER_PRM_SIZE);
786 host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE, 798 host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
787 OMAP_CORE_PRM_SIZE); 799 OMAP_CORE_PRM_SIZE);
800 host_res->dw_dmmu_base = ioremap(OMAP_DMMU_BASE,
801 OMAP_DMMU_SIZE);
788 802
789 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", 803 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n",
790 host_res->dw_mem_base[0]); 804 host_res->dw_mem_base[0]);
@@ -796,6 +810,7 @@ int drv_request_bridge_res_dsp(void **phost_resources)
796 host_res->dw_mem_base[3]); 810 host_res->dw_mem_base[3]);
797 dev_dbg(bridge, "dw_mem_base[4] 0x%x\n", 811 dev_dbg(bridge, "dw_mem_base[4] 0x%x\n",
798 host_res->dw_mem_base[4]); 812 host_res->dw_mem_base[4]);
813 dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
799 814
800 shm_size = drv_datap->shm_size; 815 shm_size = drv_datap->shm_size;
801 if (shm_size >= 0x10000) { 816 if (shm_size >= 0x10000) {
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index 34be43fec044..324fcdffb3b3 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -509,6 +509,8 @@ static int bridge_open(struct inode *ip, struct file *filp)
509 pr_ctxt->res_state = PROC_RES_ALLOCATED; 509 pr_ctxt->res_state = PROC_RES_ALLOCATED;
510 spin_lock_init(&pr_ctxt->dmm_map_lock); 510 spin_lock_init(&pr_ctxt->dmm_map_lock);
511 INIT_LIST_HEAD(&pr_ctxt->dmm_map_list); 511 INIT_LIST_HEAD(&pr_ctxt->dmm_map_list);
512 spin_lock_init(&pr_ctxt->dmm_rsv_lock);
513 INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list);
512 514
513 pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL); 515 pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
514 if (pr_ctxt->node_id) { 516 if (pr_ctxt->node_id) {
diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c
index a660247f527a..1562f3c1281c 100644
--- a/drivers/staging/tidspbridge/rmgr/node.c
+++ b/drivers/staging/tidspbridge/rmgr/node.c
@@ -56,6 +56,7 @@
56/* ----------------------------------- This */ 56/* ----------------------------------- This */
57#include <dspbridge/nodepriv.h> 57#include <dspbridge/nodepriv.h>
58#include <dspbridge/node.h> 58#include <dspbridge/node.h>
59#include <dspbridge/dmm.h>
59 60
60/* Static/Dynamic Loader includes */ 61/* Static/Dynamic Loader includes */
61#include <dspbridge/dbll.h> 62#include <dspbridge/dbll.h>
@@ -316,6 +317,10 @@ int node_allocate(struct proc_object *hprocessor,
316 u32 mapped_addr = 0; 317 u32 mapped_addr = 0;
317 u32 map_attrs = 0x0; 318 u32 map_attrs = 0x0;
318 struct dsp_processorstate proc_state; 319 struct dsp_processorstate proc_state;
320#ifdef DSP_DMM_DEBUG
321 struct dmm_object *dmm_mgr;
322 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
323#endif
319 324
320 void *node_res; 325 void *node_res;
321 326
@@ -425,12 +430,34 @@ int node_allocate(struct proc_object *hprocessor,
425 if (status) 430 if (status)
426 goto func_cont; 431 goto func_cont;
427 432
433 status = proc_reserve_memory(hprocessor,
434 pnode->create_args.asa.task_arg_obj.
435 heap_size + PAGE_SIZE,
436 (void **)&(pnode->create_args.asa.
437 task_arg_obj.udsp_heap_res_addr),
438 pr_ctxt);
439 if (status) {
440 pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
441 __func__, status);
442 goto func_cont;
443 }
444#ifdef DSP_DMM_DEBUG
445 status = dmm_get_handle(p_proc_object, &dmm_mgr);
446 if (!dmm_mgr) {
447 status = DSP_EHANDLE;
448 goto func_cont;
449 }
450
451 dmm_mem_map_dump(dmm_mgr);
452#endif
453
428 map_attrs |= DSP_MAPLITTLEENDIAN; 454 map_attrs |= DSP_MAPLITTLEENDIAN;
429 map_attrs |= DSP_MAPELEMSIZE32; 455 map_attrs |= DSP_MAPELEMSIZE32;
430 map_attrs |= DSP_MAPVIRTUALADDR; 456 map_attrs |= DSP_MAPVIRTUALADDR;
431 status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr, 457 status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
432 pnode->create_args.asa.task_arg_obj.heap_size, 458 pnode->create_args.asa.task_arg_obj.heap_size,
433 NULL, (void **)&mapped_addr, map_attrs, 459 (void *)pnode->create_args.asa.task_arg_obj.
460 udsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
434 pr_ctxt); 461 pr_ctxt);
435 if (status) 462 if (status)
436 pr_err("%s: Failed to map memory for Heap: 0x%x\n", 463 pr_err("%s: Failed to map memory for Heap: 0x%x\n",
@@ -2484,7 +2511,11 @@ static void delete_node(struct node_object *hnode,
2484 struct stream_chnl stream; 2511 struct stream_chnl stream;
2485 struct node_msgargs node_msg_args; 2512 struct node_msgargs node_msg_args;
2486 struct node_taskargs task_arg_obj; 2513 struct node_taskargs task_arg_obj;
2487 2514#ifdef DSP_DMM_DEBUG
2515 struct dmm_object *dmm_mgr;
2516 struct proc_object *p_proc_object =
2517 (struct proc_object *)hnode->hprocessor;
2518#endif
2488 int status; 2519 int status;
2489 if (!hnode) 2520 if (!hnode)
2490 goto func_end; 2521 goto func_end;
@@ -2545,6 +2576,19 @@ static void delete_node(struct node_object *hnode,
2545 status = proc_un_map(hnode->hprocessor, (void *) 2576 status = proc_un_map(hnode->hprocessor, (void *)
2546 task_arg_obj.udsp_heap_addr, 2577 task_arg_obj.udsp_heap_addr,
2547 pr_ctxt); 2578 pr_ctxt);
2579
2580 status = proc_un_reserve_memory(hnode->hprocessor,
2581 (void *)
2582 task_arg_obj.
2583 udsp_heap_res_addr,
2584 pr_ctxt);
2585#ifdef DSP_DMM_DEBUG
2586 status = dmm_get_handle(p_proc_object, &dmm_mgr);
2587 if (dmm_mgr)
2588 dmm_mem_map_dump(dmm_mgr);
2589 else
2590 status = DSP_EHANDLE;
2591#endif
2548 } 2592 }
2549 } 2593 }
2550 if (node_type != NODE_MESSAGE) { 2594 if (node_type != NODE_MESSAGE) {
diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c
index 7a15a02efedf..b47d7aa747b1 100644
--- a/drivers/staging/tidspbridge/rmgr/proc.c
+++ b/drivers/staging/tidspbridge/rmgr/proc.c
@@ -39,6 +39,7 @@
39#include <dspbridge/cod.h> 39#include <dspbridge/cod.h>
40#include <dspbridge/dev.h> 40#include <dspbridge/dev.h>
41#include <dspbridge/procpriv.h> 41#include <dspbridge/procpriv.h>
42#include <dspbridge/dmm.h>
42 43
43/* ----------------------------------- Resource Manager */ 44/* ----------------------------------- Resource Manager */
44#include <dspbridge/mgr.h> 45#include <dspbridge/mgr.h>
@@ -51,7 +52,6 @@
51#include <dspbridge/msg.h> 52#include <dspbridge/msg.h>
52#include <dspbridge/dspioctl.h> 53#include <dspbridge/dspioctl.h>
53#include <dspbridge/drv.h> 54#include <dspbridge/drv.h>
54#include <_tiomap.h>
55 55
56/* ----------------------------------- This */ 56/* ----------------------------------- This */
57#include <dspbridge/proc.h> 57#include <dspbridge/proc.h>
@@ -151,21 +151,34 @@ static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt,
151 return map_obj; 151 return map_obj;
152} 152}
153 153
154static int match_exact_map_obj(struct dmm_map_object *map_obj,
155 u32 dsp_addr, u32 size)
156{
157 if (map_obj->dsp_addr == dsp_addr && map_obj->size != size)
158 pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n",
159 __func__, dsp_addr, map_obj->size, size);
160
161 return map_obj->dsp_addr == dsp_addr &&
162 map_obj->size == size;
163}
164
154static void remove_mapping_information(struct process_context *pr_ctxt, 165static void remove_mapping_information(struct process_context *pr_ctxt,
155 u32 dsp_addr) 166 u32 dsp_addr, u32 size)
156{ 167{
157 struct dmm_map_object *map_obj; 168 struct dmm_map_object *map_obj;
158 169
159 pr_debug("%s: looking for virt 0x%x\n", __func__, dsp_addr); 170 pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__,
171 dsp_addr, size);
160 172
161 spin_lock(&pr_ctxt->dmm_map_lock); 173 spin_lock(&pr_ctxt->dmm_map_lock);
162 list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) { 174 list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
163 pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x\n", 175 pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
164 __func__, 176 __func__,
165 map_obj->mpu_addr, 177 map_obj->mpu_addr,
166 map_obj->dsp_addr); 178 map_obj->dsp_addr,
179 map_obj->size);
167 180
168 if (map_obj->dsp_addr == dsp_addr) { 181 if (match_exact_map_obj(map_obj, dsp_addr, size)) {
169 pr_debug("%s: match, deleting map info\n", __func__); 182 pr_debug("%s: match, deleting map info\n", __func__);
170 list_del(&map_obj->link); 183 list_del(&map_obj->link);
171 kfree(map_obj->dma_info.sg); 184 kfree(map_obj->dma_info.sg);
@@ -1077,6 +1090,7 @@ int proc_load(void *hprocessor, const s32 argc_index,
1077 s32 cnew_envp; /* " " in new_envp[] */ 1090 s32 cnew_envp; /* " " in new_envp[] */
1078 s32 nproc_id = 0; /* Anticipate MP version. */ 1091 s32 nproc_id = 0; /* Anticipate MP version. */
1079 struct dcd_manager *hdcd_handle; 1092 struct dcd_manager *hdcd_handle;
1093 struct dmm_object *dmm_mgr;
1080 u32 dw_ext_end; 1094 u32 dw_ext_end;
1081 u32 proc_id; 1095 u32 proc_id;
1082 int brd_state; 1096 int brd_state;
@@ -1267,6 +1281,25 @@ int proc_load(void *hprocessor, const s32 argc_index,
1267 if (!status) 1281 if (!status)
1268 status = cod_get_sym_value(cod_mgr, EXTEND, 1282 status = cod_get_sym_value(cod_mgr, EXTEND,
1269 &dw_ext_end); 1283 &dw_ext_end);
1284
1285 /* Reset DMM structs and add an initial free chunk */
1286 if (!status) {
1287 status =
1288 dev_get_dmm_mgr(p_proc_object->hdev_obj,
1289 &dmm_mgr);
1290 if (dmm_mgr) {
1291 /* Set dw_ext_end to DMM START u8
1292 * address */
1293 dw_ext_end =
1294 (dw_ext_end + 1) * DSPWORDSIZE;
1295 /* DMM memory is from EXT_END */
1296 status = dmm_create_tables(dmm_mgr,
1297 dw_ext_end,
1298 DMMPOOLSIZE);
1299 } else {
1300 status = -EFAULT;
1301 }
1302 }
1270 } 1303 }
1271 } 1304 }
1272 /* Restore the original argv[0] */ 1305 /* Restore the original argv[0] */
@@ -1319,10 +1352,12 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
1319{ 1352{
1320 u32 va_align; 1353 u32 va_align;
1321 u32 pa_align; 1354 u32 pa_align;
1355 struct dmm_object *dmm_mgr;
1322 u32 size_align; 1356 u32 size_align;
1323 int status = 0; 1357 int status = 0;
1324 struct proc_object *p_proc_object = (struct proc_object *)hprocessor; 1358 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1325 struct dmm_map_object *map_obj; 1359 struct dmm_map_object *map_obj;
1360 u32 tmp_addr = 0;
1326 1361
1327#ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK 1362#ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK
1328 if ((ul_map_attr & BUFMODE_MASK) != RBUF) { 1363 if ((ul_map_attr & BUFMODE_MASK) != RBUF) {
@@ -1347,30 +1382,33 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
1347 } 1382 }
1348 /* Critical section */ 1383 /* Critical section */
1349 mutex_lock(&proc_lock); 1384 mutex_lock(&proc_lock);
1385 dmm_get_handle(p_proc_object, &dmm_mgr);
1386 if (dmm_mgr)
1387 status = dmm_map_memory(dmm_mgr, va_align, size_align);
1388 else
1389 status = -EFAULT;
1350 1390
1351 /* Add mapping to the page tables. */ 1391 /* Add mapping to the page tables. */
1352 if (!status) { 1392 if (!status) {
1393
1394 /* Mapped address = MSB of VA | LSB of PA */
1395 tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1)));
1353 /* mapped memory resource tracking */ 1396 /* mapped memory resource tracking */
1354 map_obj = add_mapping_info(pr_ctxt, pa_align, va_align, 1397 map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr,
1355 size_align); 1398 size_align);
1356 if (!map_obj) { 1399 if (!map_obj)
1357 status = -ENOMEM; 1400 status = -ENOMEM;
1358 } else { 1401 else
1359 va_align = user_to_dsp_map( 1402 status = (*p_proc_object->intf_fxns->pfn_brd_mem_map)
1360 p_proc_object->hbridge_context->dsp_mmu, 1403 (p_proc_object->hbridge_context, pa_align, va_align,
1361 pa_align, va_align, size_align, 1404 size_align, ul_map_attr, map_obj->pages);
1362 map_obj->pages);
1363 if (IS_ERR_VALUE(va_align))
1364 status = (int)va_align;
1365 }
1366 } 1405 }
1367 if (!status) { 1406 if (!status) {
1368 /* Mapped address = MSB of VA | LSB of PA */ 1407 /* Mapped address = MSB of VA | LSB of PA */
1369 map_obj->dsp_addr = (va_align | 1408 *pp_map_addr = (void *) tmp_addr;
1370 ((u32)pmpu_addr & (PG_SIZE4K - 1)));
1371 *pp_map_addr = (void *)map_obj->dsp_addr;
1372 } else { 1409 } else {
1373 remove_mapping_information(pr_ctxt, va_align); 1410 remove_mapping_information(pr_ctxt, tmp_addr, size_align);
1411 dmm_un_map_memory(dmm_mgr, va_align, &size_align);
1374 } 1412 }
1375 mutex_unlock(&proc_lock); 1413 mutex_unlock(&proc_lock);
1376 1414
@@ -1463,6 +1501,55 @@ func_end:
1463} 1501}
1464 1502
1465/* 1503/*
1504 * ======== proc_reserve_memory ========
1505 * Purpose:
1506 * Reserve a virtually contiguous region of DSP address space.
1507 */
1508int proc_reserve_memory(void *hprocessor, u32 ul_size,
1509 void **pp_rsv_addr,
1510 struct process_context *pr_ctxt)
1511{
1512 struct dmm_object *dmm_mgr;
1513 int status = 0;
1514 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1515 struct dmm_rsv_object *rsv_obj;
1516
1517 if (!p_proc_object) {
1518 status = -EFAULT;
1519 goto func_end;
1520 }
1521
1522 status = dmm_get_handle(p_proc_object, &dmm_mgr);
1523 if (!dmm_mgr) {
1524 status = -EFAULT;
1525 goto func_end;
1526 }
1527
1528 status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr);
1529 if (status != 0)
1530 goto func_end;
1531
1532 /*
1533 * A successful reserve should be followed by insertion of rsv_obj
1534 * into dmm_rsv_list, so that reserved memory resource tracking
1535 * remains uptodate
1536 */
1537 rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL);
1538 if (rsv_obj) {
1539 rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr;
1540 spin_lock(&pr_ctxt->dmm_rsv_lock);
1541 list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list);
1542 spin_unlock(&pr_ctxt->dmm_rsv_lock);
1543 }
1544
1545func_end:
1546 dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p "
1547 "status 0x%x\n", __func__, hprocessor,
1548 ul_size, pp_rsv_addr, status);
1549 return status;
1550}
1551
1552/*
1466 * ======== proc_start ======== 1553 * ======== proc_start ========
1467 * Purpose: 1554 * Purpose:
1468 * Start a processor running. 1555 * Start a processor running.
@@ -1610,7 +1697,9 @@ int proc_un_map(void *hprocessor, void *map_addr,
1610{ 1697{
1611 int status = 0; 1698 int status = 0;
1612 struct proc_object *p_proc_object = (struct proc_object *)hprocessor; 1699 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1700 struct dmm_object *dmm_mgr;
1613 u32 va_align; 1701 u32 va_align;
1702 u32 size_align;
1614 1703
1615 va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K); 1704 va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K);
1616 if (!p_proc_object) { 1705 if (!p_proc_object) {
@@ -1618,11 +1707,24 @@ int proc_un_map(void *hprocessor, void *map_addr,
1618 goto func_end; 1707 goto func_end;
1619 } 1708 }
1620 1709
1710 status = dmm_get_handle(hprocessor, &dmm_mgr);
1711 if (!dmm_mgr) {
1712 status = -EFAULT;
1713 goto func_end;
1714 }
1715
1621 /* Critical section */ 1716 /* Critical section */
1622 mutex_lock(&proc_lock); 1717 mutex_lock(&proc_lock);
1718 /*
1719 * Update DMM structures. Get the size to unmap.
1720 * This function returns error if the VA is not mapped
1721 */
1722 status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align);
1623 /* Remove mapping from the page tables. */ 1723 /* Remove mapping from the page tables. */
1624 status = user_to_dsp_unmap(p_proc_object->hbridge_context->dsp_mmu, 1724 if (!status) {
1625 va_align); 1725 status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map)
1726 (p_proc_object->hbridge_context, va_align, size_align);
1727 }
1626 1728
1627 mutex_unlock(&proc_lock); 1729 mutex_unlock(&proc_lock);
1628 if (status) 1730 if (status)
@@ -1633,7 +1735,7 @@ int proc_un_map(void *hprocessor, void *map_addr,
1633 * from dmm_map_list, so that mapped memory resource tracking 1735 * from dmm_map_list, so that mapped memory resource tracking
1634 * remains uptodate 1736 * remains uptodate
1635 */ 1737 */
1636 remove_mapping_information(pr_ctxt, (u32) map_addr); 1738 remove_mapping_information(pr_ctxt, (u32) map_addr, size_align);
1637 1739
1638func_end: 1740func_end:
1639 dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n", 1741 dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n",
@@ -1642,6 +1744,55 @@ func_end:
1642} 1744}
1643 1745
1644/* 1746/*
1747 * ======== proc_un_reserve_memory ========
1748 * Purpose:
1749 * Frees a previously reserved region of DSP address space.
1750 */
1751int proc_un_reserve_memory(void *hprocessor, void *prsv_addr,
1752 struct process_context *pr_ctxt)
1753{
1754 struct dmm_object *dmm_mgr;
1755 int status = 0;
1756 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
1757 struct dmm_rsv_object *rsv_obj;
1758
1759 if (!p_proc_object) {
1760 status = -EFAULT;
1761 goto func_end;
1762 }
1763
1764 status = dmm_get_handle(p_proc_object, &dmm_mgr);
1765 if (!dmm_mgr) {
1766 status = -EFAULT;
1767 goto func_end;
1768 }
1769
1770 status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr);
1771 if (status != 0)
1772 goto func_end;
1773
1774 /*
1775 * A successful unreserve should be followed by removal of rsv_obj
1776 * from dmm_rsv_list, so that reserved memory resource tracking
1777 * remains uptodate
1778 */
1779 spin_lock(&pr_ctxt->dmm_rsv_lock);
1780 list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) {
1781 if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) {
1782 list_del(&rsv_obj->link);
1783 kfree(rsv_obj);
1784 break;
1785 }
1786 }
1787 spin_unlock(&pr_ctxt->dmm_rsv_lock);
1788
1789func_end:
1790 dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n",
1791 __func__, hprocessor, prsv_addr, status);
1792 return status;
1793}
1794
1795/*
1645 * ======== = proc_monitor ======== == 1796 * ======== = proc_monitor ======== ==
1646 * Purpose: 1797 * Purpose:
1647 * Place the Processor in Monitor State. This is an internal 1798 * Place the Processor in Monitor State. This is an internal
diff --git a/drivers/staging/udlfb/udlfb.c b/drivers/staging/udlfb/udlfb.c
index 5969e848d297..fed25105970a 100644
--- a/drivers/staging/udlfb/udlfb.c
+++ b/drivers/staging/udlfb/udlfb.c
@@ -887,7 +887,7 @@ static int dlfb_ops_open(struct fb_info *info, int user)
887 887
888 struct fb_deferred_io *fbdefio; 888 struct fb_deferred_io *fbdefio;
889 889
890 fbdefio = kmalloc(GFP_KERNEL, sizeof(struct fb_deferred_io)); 890 fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
891 891
892 if (fbdefio) { 892 if (fbdefio) {
893 fbdefio->delay = DL_DEFIO_WRITE_DELAY; 893 fbdefio->delay = DL_DEFIO_WRITE_DELAY;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index e992d5d9e15b..7cc3d2407d1b 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1675,13 +1675,14 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
1675 1675
1676 { 1676 {
1677 char essid[IW_ESSID_MAX_SIZE+1]; 1677 char essid[IW_ESSID_MAX_SIZE+1];
1678 if (wrq->u.essid.pointer) 1678 if (wrq->u.essid.pointer) {
1679 rc = iwctl_giwessid(dev, NULL, 1679 rc = iwctl_giwessid(dev, NULL,
1680 &(wrq->u.essid), essid); 1680 &(wrq->u.essid), essid);
1681 if (copy_to_user(wrq->u.essid.pointer, 1681 if (copy_to_user(wrq->u.essid.pointer,
1682 essid, 1682 essid,
1683 wrq->u.essid.length) ) 1683 wrq->u.essid.length) )
1684 rc = -EFAULT; 1684 rc = -EFAULT;
1685 }
1685 } 1686 }
1686 break; 1687 break;
1687 1688
diff --git a/drivers/staging/westbridge/astoria/api/src/cyasusb.c b/drivers/staging/westbridge/astoria/api/src/cyasusb.c
index 5a2197012065..7777d9a60a52 100644
--- a/drivers/staging/westbridge/astoria/api/src/cyasusb.c
+++ b/drivers/staging/westbridge/astoria/api/src/cyasusb.c
@@ -1417,7 +1417,6 @@ cy_as_usb_set_enum_config(cy_as_device_handle handle,
1417 */ 1417 */
1418 bus_mask = 0; 1418 bus_mask = 0;
1419 media_mask = 0; 1419 media_mask = 0;
1420 media_mask = 0;
1421 for (bus = 0; bus < CY_AS_MAX_BUSES; bus++) { 1420 for (bus = 0; bus < CY_AS_MAX_BUSES; bus++) {
1422 for (device = 0; device < CY_AS_MAX_STORAGE_DEVICES; device++) { 1421 for (device = 0; device < CY_AS_MAX_STORAGE_DEVICES; device++) {
1423 if (config_p->devices_to_enumerate[bus][device] == 1422 if (config_p->devices_to_enumerate[bus][device] ==
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 4af83d5318f2..6a71f52c59b1 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -139,7 +139,7 @@ exit:
139} 139}
140 140
141int prism2_add_key(struct wiphy *wiphy, struct net_device *dev, 141int prism2_add_key(struct wiphy *wiphy, struct net_device *dev,
142 u8 key_index, const u8 *mac_addr, 142 u8 key_index, bool pairwise, const u8 *mac_addr,
143 struct key_params *params) 143 struct key_params *params)
144{ 144{
145 wlandevice_t *wlandev = dev->ml_priv; 145 wlandevice_t *wlandev = dev->ml_priv;
@@ -198,7 +198,7 @@ exit:
198} 198}
199 199
200int prism2_get_key(struct wiphy *wiphy, struct net_device *dev, 200int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
201 u8 key_index, const u8 *mac_addr, void *cookie, 201 u8 key_index, bool pairwise, const u8 *mac_addr, void *cookie,
202 void (*callback)(void *cookie, struct key_params*)) 202 void (*callback)(void *cookie, struct key_params*))
203{ 203{
204 wlandevice_t *wlandev = dev->ml_priv; 204 wlandevice_t *wlandev = dev->ml_priv;
@@ -227,7 +227,7 @@ int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
227} 227}
228 228
229int prism2_del_key(struct wiphy *wiphy, struct net_device *dev, 229int prism2_del_key(struct wiphy *wiphy, struct net_device *dev,
230 u8 key_index, const u8 *mac_addr) 230 u8 key_index, bool pairwise, const u8 *mac_addr)
231{ 231{
232 wlandevice_t *wlandev = dev->ml_priv; 232 wlandevice_t *wlandev = dev->ml_priv;
233 u32 did; 233 u32 did;
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index aa1792c8429e..b7b4a733b467 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -522,8 +522,8 @@ static int p80211netdev_ethtool(wlandevice_t *wlandev, void __user *useraddr)
522 if (copy_to_user(useraddr, &edata, sizeof(edata))) 522 if (copy_to_user(useraddr, &edata, sizeof(edata)))
523 return -EFAULT; 523 return -EFAULT;
524 return 0; 524 return 0;
525 }
526#endif 525#endif
526 }
527 527
528 return -EOPNOTSUPP; 528 return -EOPNOTSUPP;
529} 529}
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 04ef3ef0a422..81b46585edf7 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -716,8 +716,8 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
716 if (msg->len < 128) 716 if (msg->len < 128)
717 *--dp = (msg->len << 1) | EA; 717 *--dp = (msg->len << 1) | EA;
718 else { 718 else {
719 *--dp = (msg->len >> 6) | EA; 719 *--dp = ((msg->len & 127) << 1) | EA;
720 *--dp = (msg->len & 127) << 1; 720 *--dp = (msg->len >> 6) & 0xfe;
721 } 721 }
722 } 722 }
723 723
@@ -2375,6 +2375,7 @@ static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm,
2375 gsm->mru = c->mru; 2375 gsm->mru = c->mru;
2376 gsm->encoding = c->encapsulation; 2376 gsm->encoding = c->encapsulation;
2377 gsm->adaption = c->adaption; 2377 gsm->adaption = c->adaption;
2378 gsm->n2 = c->n2;
2378 2379
2379 if (c->i == 1) 2380 if (c->i == 1)
2380 gsm->ftype = UIH; 2381 gsm->ftype = UIH;
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index cc1e9850d655..d8210ca00720 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -413,7 +413,8 @@ static void flush_to_ldisc(struct work_struct *work)
413 spin_lock_irqsave(&tty->buf.lock, flags); 413 spin_lock_irqsave(&tty->buf.lock, flags);
414 414
415 if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) { 415 if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
416 struct tty_buffer *head; 416 struct tty_buffer *head, *tail = tty->buf.tail;
417 int seen_tail = 0;
417 while ((head = tty->buf.head) != NULL) { 418 while ((head = tty->buf.head) != NULL) {
418 int count; 419 int count;
419 char *char_buf; 420 char *char_buf;
@@ -423,6 +424,15 @@ static void flush_to_ldisc(struct work_struct *work)
423 if (!count) { 424 if (!count) {
424 if (head->next == NULL) 425 if (head->next == NULL)
425 break; 426 break;
427 /*
428 There's a possibility tty might get new buffer
429 added during the unlock window below. We could
430 end up spinning in here forever hogging the CPU
431 completely. To avoid this let's have a rest each
432 time we processed the tail buffer.
433 */
434 if (tail == head)
435 seen_tail = 1;
426 tty->buf.head = head->next; 436 tty->buf.head = head->next;
427 tty_buffer_free(tty, head); 437 tty_buffer_free(tty, head);
428 continue; 438 continue;
@@ -432,7 +442,7 @@ static void flush_to_ldisc(struct work_struct *work)
432 line discipline as we want to empty the queue */ 442 line discipline as we want to empty the queue */
433 if (test_bit(TTY_FLUSHPENDING, &tty->flags)) 443 if (test_bit(TTY_FLUSHPENDING, &tty->flags))
434 break; 444 break;
435 if (!tty->receive_room) { 445 if (!tty->receive_room || seen_tail) {
436 schedule_delayed_work(&tty->buf.work, 1); 446 schedule_delayed_work(&tty->buf.work, 1);
437 break; 447 break;
438 } 448 }
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 412f9775d19c..d8e96b005023 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -47,6 +47,7 @@
47 47
48static DEFINE_SPINLOCK(tty_ldisc_lock); 48static DEFINE_SPINLOCK(tty_ldisc_lock);
49static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait); 49static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
50static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_idle);
50/* Line disc dispatch table */ 51/* Line disc dispatch table */
51static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS]; 52static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS];
52 53
@@ -83,6 +84,7 @@ static void put_ldisc(struct tty_ldisc *ld)
83 return; 84 return;
84 } 85 }
85 local_irq_restore(flags); 86 local_irq_restore(flags);
87 wake_up(&tty_ldisc_idle);
86} 88}
87 89
88/** 90/**
@@ -531,6 +533,23 @@ static int tty_ldisc_halt(struct tty_struct *tty)
531} 533}
532 534
533/** 535/**
536 * tty_ldisc_wait_idle - wait for the ldisc to become idle
537 * @tty: tty to wait for
538 *
539 * Wait for the line discipline to become idle. The discipline must
540 * have been halted for this to guarantee it remains idle.
541 */
542static int tty_ldisc_wait_idle(struct tty_struct *tty)
543{
544 int ret;
545 ret = wait_event_interruptible_timeout(tty_ldisc_idle,
546 atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
547 if (ret < 0)
548 return ret;
549 return ret > 0 ? 0 : -EBUSY;
550}
551
552/**
534 * tty_set_ldisc - set line discipline 553 * tty_set_ldisc - set line discipline
535 * @tty: the terminal to set 554 * @tty: the terminal to set
536 * @ldisc: the line discipline 555 * @ldisc: the line discipline
@@ -634,8 +653,17 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
634 653
635 flush_scheduled_work(); 654 flush_scheduled_work();
636 655
656 retval = tty_ldisc_wait_idle(tty);
657
637 tty_lock(); 658 tty_lock();
638 mutex_lock(&tty->ldisc_mutex); 659 mutex_lock(&tty->ldisc_mutex);
660
661 /* handle wait idle failure locked */
662 if (retval) {
663 tty_ldisc_put(new_ldisc);
664 goto enable;
665 }
666
639 if (test_bit(TTY_HUPPED, &tty->flags)) { 667 if (test_bit(TTY_HUPPED, &tty->flags)) {
640 /* We were raced by the hangup method. It will have stomped 668 /* We were raced by the hangup method. It will have stomped
641 the ldisc data and closed the ldisc down */ 669 the ldisc data and closed the ldisc down */
@@ -669,6 +697,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
669 697
670 tty_ldisc_put(o_ldisc); 698 tty_ldisc_put(o_ldisc);
671 699
700enable:
672 /* 701 /*
673 * Allow ldisc referencing to occur again 702 * Allow ldisc referencing to occur again
674 */ 703 */
@@ -714,9 +743,12 @@ static void tty_reset_termios(struct tty_struct *tty)
714 * state closed 743 * state closed
715 */ 744 */
716 745
717static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc) 746static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
718{ 747{
719 struct tty_ldisc *ld; 748 struct tty_ldisc *ld = tty_ldisc_get(ldisc);
749
750 if (IS_ERR(ld))
751 return -1;
720 752
721 tty_ldisc_close(tty, tty->ldisc); 753 tty_ldisc_close(tty, tty->ldisc);
722 tty_ldisc_put(tty->ldisc); 754 tty_ldisc_put(tty->ldisc);
@@ -724,10 +756,10 @@ static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
724 /* 756 /*
725 * Switch the line discipline back 757 * Switch the line discipline back
726 */ 758 */
727 ld = tty_ldisc_get(ldisc);
728 BUG_ON(IS_ERR(ld));
729 tty_ldisc_assign(tty, ld); 759 tty_ldisc_assign(tty, ld);
730 tty_set_termios_ldisc(tty, ldisc); 760 tty_set_termios_ldisc(tty, ldisc);
761
762 return 0;
731} 763}
732 764
733/** 765/**
@@ -802,13 +834,16 @@ void tty_ldisc_hangup(struct tty_struct *tty)
802 a FIXME */ 834 a FIXME */
803 if (tty->ldisc) { /* Not yet closed */ 835 if (tty->ldisc) { /* Not yet closed */
804 if (reset == 0) { 836 if (reset == 0) {
805 tty_ldisc_reinit(tty, tty->termios->c_line); 837
806 err = tty_ldisc_open(tty, tty->ldisc); 838 if (!tty_ldisc_reinit(tty, tty->termios->c_line))
839 err = tty_ldisc_open(tty, tty->ldisc);
840 else
841 err = 1;
807 } 842 }
808 /* If the re-open fails or we reset then go to N_TTY. The 843 /* If the re-open fails or we reset then go to N_TTY. The
809 N_TTY open cannot fail */ 844 N_TTY open cannot fail */
810 if (reset || err) { 845 if (reset || err) {
811 tty_ldisc_reinit(tty, N_TTY); 846 BUG_ON(tty_ldisc_reinit(tty, N_TTY));
812 WARN_ON(tty_ldisc_open(tty, tty->ldisc)); 847 WARN_ON(tty_ldisc_open(tty, tty->ldisc));
813 } 848 }
814 tty_ldisc_enable(tty); 849 tty_ldisc_enable(tty);
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 273ab44cc91d..eab3a1ff99e4 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -553,12 +553,12 @@ static unsigned int
553vcs_poll(struct file *file, poll_table *wait) 553vcs_poll(struct file *file, poll_table *wait)
554{ 554{
555 struct vcs_poll_data *poll = vcs_poll_data_get(file); 555 struct vcs_poll_data *poll = vcs_poll_data_get(file);
556 int ret = 0; 556 int ret = DEFAULT_POLLMASK|POLLERR|POLLPRI;
557 557
558 if (poll) { 558 if (poll) {
559 poll_wait(file, &poll->waitq, wait); 559 poll_wait(file, &poll->waitq, wait);
560 if (!poll->seen_last_update) 560 if (poll->seen_last_update)
561 ret = POLLIN | POLLRDNORM; 561 ret = DEFAULT_POLLMASK;
562 } 562 }
563 return ret; 563 return ret;
564} 564}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index f1aaff6202a5..045bb4b823e1 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -965,10 +965,11 @@ static int proc_getdriver(struct dev_state *ps, void __user *arg)
965 965
966static int proc_connectinfo(struct dev_state *ps, void __user *arg) 966static int proc_connectinfo(struct dev_state *ps, void __user *arg)
967{ 967{
968 struct usbdevfs_connectinfo ci; 968 struct usbdevfs_connectinfo ci = {
969 .devnum = ps->dev->devnum,
970 .slow = ps->dev->speed == USB_SPEED_LOW
971 };
969 972
970 ci.devnum = ps->dev->devnum;
971 ci.slow = ps->dev->speed == USB_SPEED_LOW;
972 if (copy_to_user(arg, &ci, sizeof(ci))) 973 if (copy_to_user(arg, &ci, sizeof(ci)))
973 return -EFAULT; 974 return -EFAULT;
974 return 0; 975 return 0;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index b739ca814651..607d0db4a988 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -158,7 +158,7 @@ config USB_GADGET_FSL_USB2
158 boolean "Freescale Highspeed USB DR Peripheral Controller" 158 boolean "Freescale Highspeed USB DR Peripheral Controller"
159 depends on FSL_SOC || ARCH_MXC 159 depends on FSL_SOC || ARCH_MXC
160 select USB_GADGET_DUALSPEED 160 select USB_GADGET_DUALSPEED
161 select USB_FSL_MPH_DR_OF 161 select USB_FSL_MPH_DR_OF if OF
162 help 162 help
163 Some of Freescale PowerPC processors have a High Speed 163 Some of Freescale PowerPC processors have a High Speed
164 Dual-Role(DR) USB controller, which supports device mode. 164 Dual-Role(DR) USB controller, which supports device mode.
diff --git a/drivers/usb/gadget/goku_udc.h b/drivers/usb/gadget/goku_udc.h
index 566cb2319056..e7e0c69d3b1f 100644
--- a/drivers/usb/gadget/goku_udc.h
+++ b/drivers/usb/gadget/goku_udc.h
@@ -251,7 +251,8 @@ struct goku_udc {
251 got_region:1, 251 got_region:1,
252 req_config:1, 252 req_config:1,
253 configured:1, 253 configured:1,
254 enabled:1; 254 enabled:1,
255 registered:1;
255 256
256 /* pci state used to access those endpoints */ 257 /* pci state used to access those endpoints */
257 struct pci_dev *pdev; 258 struct pci_dev *pdev;
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index 01e5354a4c20..40f7716b31fc 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -105,11 +105,15 @@ struct gs_port {
105 wait_queue_head_t close_wait; /* wait for last close */ 105 wait_queue_head_t close_wait; /* wait for last close */
106 106
107 struct list_head read_pool; 107 struct list_head read_pool;
108 int read_started;
109 int read_allocated;
108 struct list_head read_queue; 110 struct list_head read_queue;
109 unsigned n_read; 111 unsigned n_read;
110 struct tasklet_struct push; 112 struct tasklet_struct push;
111 113
112 struct list_head write_pool; 114 struct list_head write_pool;
115 int write_started;
116 int write_allocated;
113 struct gs_buf port_write_buf; 117 struct gs_buf port_write_buf;
114 wait_queue_head_t drain_wait; /* wait while writes drain */ 118 wait_queue_head_t drain_wait; /* wait while writes drain */
115 119
@@ -363,6 +367,9 @@ __acquires(&port->port_lock)
363 struct usb_request *req; 367 struct usb_request *req;
364 int len; 368 int len;
365 369
370 if (port->write_started >= QUEUE_SIZE)
371 break;
372
366 req = list_entry(pool->next, struct usb_request, list); 373 req = list_entry(pool->next, struct usb_request, list);
367 len = gs_send_packet(port, req->buf, in->maxpacket); 374 len = gs_send_packet(port, req->buf, in->maxpacket);
368 if (len == 0) { 375 if (len == 0) {
@@ -397,6 +404,8 @@ __acquires(&port->port_lock)
397 break; 404 break;
398 } 405 }
399 406
407 port->write_started++;
408
400 /* abort immediately after disconnect */ 409 /* abort immediately after disconnect */
401 if (!port->port_usb) 410 if (!port->port_usb)
402 break; 411 break;
@@ -418,7 +427,6 @@ __acquires(&port->port_lock)
418{ 427{
419 struct list_head *pool = &port->read_pool; 428 struct list_head *pool = &port->read_pool;
420 struct usb_ep *out = port->port_usb->out; 429 struct usb_ep *out = port->port_usb->out;
421 unsigned started = 0;
422 430
423 while (!list_empty(pool)) { 431 while (!list_empty(pool)) {
424 struct usb_request *req; 432 struct usb_request *req;
@@ -430,6 +438,9 @@ __acquires(&port->port_lock)
430 if (!tty) 438 if (!tty)
431 break; 439 break;
432 440
441 if (port->read_started >= QUEUE_SIZE)
442 break;
443
433 req = list_entry(pool->next, struct usb_request, list); 444 req = list_entry(pool->next, struct usb_request, list);
434 list_del(&req->list); 445 list_del(&req->list);
435 req->length = out->maxpacket; 446 req->length = out->maxpacket;
@@ -447,13 +458,13 @@ __acquires(&port->port_lock)
447 list_add(&req->list, pool); 458 list_add(&req->list, pool);
448 break; 459 break;
449 } 460 }
450 started++; 461 port->read_started++;
451 462
452 /* abort immediately after disconnect */ 463 /* abort immediately after disconnect */
453 if (!port->port_usb) 464 if (!port->port_usb)
454 break; 465 break;
455 } 466 }
456 return started; 467 return port->read_started;
457} 468}
458 469
459/* 470/*
@@ -535,6 +546,7 @@ static void gs_rx_push(unsigned long _port)
535 } 546 }
536recycle: 547recycle:
537 list_move(&req->list, &port->read_pool); 548 list_move(&req->list, &port->read_pool);
549 port->read_started--;
538 } 550 }
539 551
540 /* Push from tty to ldisc; without low_latency set this is handled by 552 /* Push from tty to ldisc; without low_latency set this is handled by
@@ -587,6 +599,7 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
587 599
588 spin_lock(&port->port_lock); 600 spin_lock(&port->port_lock);
589 list_add(&req->list, &port->write_pool); 601 list_add(&req->list, &port->write_pool);
602 port->write_started--;
590 603
591 switch (req->status) { 604 switch (req->status) {
592 default: 605 default:
@@ -608,7 +621,8 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
608 spin_unlock(&port->port_lock); 621 spin_unlock(&port->port_lock);
609} 622}
610 623
611static void gs_free_requests(struct usb_ep *ep, struct list_head *head) 624static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
625 int *allocated)
612{ 626{
613 struct usb_request *req; 627 struct usb_request *req;
614 628
@@ -616,25 +630,31 @@ static void gs_free_requests(struct usb_ep *ep, struct list_head *head)
616 req = list_entry(head->next, struct usb_request, list); 630 req = list_entry(head->next, struct usb_request, list);
617 list_del(&req->list); 631 list_del(&req->list);
618 gs_free_req(ep, req); 632 gs_free_req(ep, req);
633 if (allocated)
634 (*allocated)--;
619 } 635 }
620} 636}
621 637
622static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, 638static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
623 void (*fn)(struct usb_ep *, struct usb_request *)) 639 void (*fn)(struct usb_ep *, struct usb_request *),
640 int *allocated)
624{ 641{
625 int i; 642 int i;
626 struct usb_request *req; 643 struct usb_request *req;
644 int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
627 645
628 /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't 646 /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
629 * do quite that many this time, don't fail ... we just won't 647 * do quite that many this time, don't fail ... we just won't
630 * be as speedy as we might otherwise be. 648 * be as speedy as we might otherwise be.
631 */ 649 */
632 for (i = 0; i < QUEUE_SIZE; i++) { 650 for (i = 0; i < n; i++) {
633 req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC); 651 req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
634 if (!req) 652 if (!req)
635 return list_empty(head) ? -ENOMEM : 0; 653 return list_empty(head) ? -ENOMEM : 0;
636 req->complete = fn; 654 req->complete = fn;
637 list_add_tail(&req->list, head); 655 list_add_tail(&req->list, head);
656 if (allocated)
657 (*allocated)++;
638 } 658 }
639 return 0; 659 return 0;
640} 660}
@@ -661,14 +681,15 @@ static int gs_start_io(struct gs_port *port)
661 * configurations may use different endpoints with a given port; 681 * configurations may use different endpoints with a given port;
662 * and high speed vs full speed changes packet sizes too. 682 * and high speed vs full speed changes packet sizes too.
663 */ 683 */
664 status = gs_alloc_requests(ep, head, gs_read_complete); 684 status = gs_alloc_requests(ep, head, gs_read_complete,
685 &port->read_allocated);
665 if (status) 686 if (status)
666 return status; 687 return status;
667 688
668 status = gs_alloc_requests(port->port_usb->in, &port->write_pool, 689 status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
669 gs_write_complete); 690 gs_write_complete, &port->write_allocated);
670 if (status) { 691 if (status) {
671 gs_free_requests(ep, head); 692 gs_free_requests(ep, head, &port->read_allocated);
672 return status; 693 return status;
673 } 694 }
674 695
@@ -680,8 +701,9 @@ static int gs_start_io(struct gs_port *port)
680 if (started) { 701 if (started) {
681 tty_wakeup(port->port_tty); 702 tty_wakeup(port->port_tty);
682 } else { 703 } else {
683 gs_free_requests(ep, head); 704 gs_free_requests(ep, head, &port->read_allocated);
684 gs_free_requests(port->port_usb->in, &port->write_pool); 705 gs_free_requests(port->port_usb->in, &port->write_pool,
706 &port->write_allocated);
685 status = -EIO; 707 status = -EIO;
686 } 708 }
687 709
@@ -1315,8 +1337,12 @@ void gserial_disconnect(struct gserial *gser)
1315 spin_lock_irqsave(&port->port_lock, flags); 1337 spin_lock_irqsave(&port->port_lock, flags);
1316 if (port->open_count == 0 && !port->openclose) 1338 if (port->open_count == 0 && !port->openclose)
1317 gs_buf_free(&port->port_write_buf); 1339 gs_buf_free(&port->port_write_buf);
1318 gs_free_requests(gser->out, &port->read_pool); 1340 gs_free_requests(gser->out, &port->read_pool, NULL);
1319 gs_free_requests(gser->out, &port->read_queue); 1341 gs_free_requests(gser->out, &port->read_queue, NULL);
1320 gs_free_requests(gser->in, &port->write_pool); 1342 gs_free_requests(gser->in, &port->write_pool, NULL);
1343
1344 port->read_allocated = port->read_started =
1345 port->write_allocated = port->write_started = 0;
1346
1321 spin_unlock_irqrestore(&port->port_lock, flags); 1347 spin_unlock_irqrestore(&port->port_lock, flags);
1322} 1348}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 2391c396ca32..6f4f8e6a40c7 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -122,7 +122,7 @@ config USB_EHCI_FSL
122 bool "Support for Freescale on-chip EHCI USB controller" 122 bool "Support for Freescale on-chip EHCI USB controller"
123 depends on USB_EHCI_HCD && FSL_SOC 123 depends on USB_EHCI_HCD && FSL_SOC
124 select USB_EHCI_ROOT_HUB_TT 124 select USB_EHCI_ROOT_HUB_TT
125 select USB_FSL_MPH_DR_OF 125 select USB_FSL_MPH_DR_OF if OF
126 ---help--- 126 ---help---
127 Variation of ARC USB block used in some Freescale chips. 127 Variation of ARC USB block used in some Freescale chips.
128 128
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index ac9c4d7c44af..bce85055019a 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -36,6 +36,8 @@ struct ehci_mxc_priv {
36static int ehci_mxc_setup(struct usb_hcd *hcd) 36static int ehci_mxc_setup(struct usb_hcd *hcd)
37{ 37{
38 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 38 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
39 struct device *dev = hcd->self.controller;
40 struct mxc_usbh_platform_data *pdata = dev_get_platdata(dev);
39 int retval; 41 int retval;
40 42
41 /* EHCI registers start at offset 0x100 */ 43 /* EHCI registers start at offset 0x100 */
@@ -63,6 +65,12 @@ static int ehci_mxc_setup(struct usb_hcd *hcd)
63 65
64 ehci_reset(ehci); 66 ehci_reset(ehci);
65 67
68 /* set up the PORTSCx register */
69 ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]);
70
71 /* is this really needed? */
72 msleep(10);
73
66 ehci_port_power(ehci, 0); 74 ehci_port_power(ehci, 0);
67 return 0; 75 return 0;
68} 76}
@@ -114,7 +122,7 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
114 struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data; 122 struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
115 struct usb_hcd *hcd; 123 struct usb_hcd *hcd;
116 struct resource *res; 124 struct resource *res;
117 int irq, ret, temp; 125 int irq, ret;
118 struct ehci_mxc_priv *priv; 126 struct ehci_mxc_priv *priv;
119 struct device *dev = &pdev->dev; 127 struct device *dev = &pdev->dev;
120 128
@@ -188,10 +196,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
188 clk_enable(priv->ahbclk); 196 clk_enable(priv->ahbclk);
189 } 197 }
190 198
191 /* set up the PORTSCx register */
192 ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]);
193 mdelay(10);
194
195 /* setup specific usb hw */ 199 /* setup specific usb hw */
196 ret = mxc_initialize_usb_hw(pdev->id, pdata->flags); 200 ret = mxc_initialize_usb_hw(pdev->id, pdata->flags);
197 if (ret < 0) 201 if (ret < 0)
diff --git a/drivers/usb/host/ohci-jz4740.c b/drivers/usb/host/ohci-jz4740.c
index 10e1872f3ab9..931d588c3fb5 100644
--- a/drivers/usb/host/ohci-jz4740.c
+++ b/drivers/usb/host/ohci-jz4740.c
@@ -273,4 +273,4 @@ static struct platform_driver ohci_hcd_jz4740_driver = {
273 }, 273 },
274}; 274};
275 275
276MODULE_ALIAS("platfrom:jz4740-ohci"); 276MODULE_ALIAS("platform:jz4740-ohci");
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 375664198776..c9078e4e1f4d 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -553,6 +553,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
553 /* needed for power consumption */ 553 /* needed for power consumption */
554 struct usb_config_descriptor *cfg_descriptor = &dev->udev->actconfig->desc; 554 struct usb_config_descriptor *cfg_descriptor = &dev->udev->actconfig->desc;
555 555
556 memset(&info, 0, sizeof(info));
556 /* directly from the descriptor */ 557 /* directly from the descriptor */
557 info.vendor = le16_to_cpu(dev->udev->descriptor.idVendor); 558 info.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
558 info.product = dev->product_id; 559 info.product = dev->product_id;
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 70d00e99a4b4..dd573abd2d1e 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3008,6 +3008,7 @@ sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3008#else 3008#else
3009 x.sisusb_conactive = 0; 3009 x.sisusb_conactive = 0;
3010#endif 3010#endif
3011 memset(x.sisusb_reserved, 0, sizeof(x.sisusb_reserved));
3011 3012
3012 if (copy_to_user((void __user *)arg, &x, sizeof(x))) 3013 if (copy_to_user((void __user *)arg, &x, sizeof(x)))
3013 retval = -EFAULT; 3014 retval = -EFAULT;
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index 611a9d274363..fcb5206a65bd 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -171,8 +171,9 @@ static irqreturn_t blackfin_interrupt(int irq, void *__hci)
171 } 171 }
172 172
173 /* Start sampling ID pin, when plug is removed from MUSB */ 173 /* Start sampling ID pin, when plug is removed from MUSB */
174 if (is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE 174 if ((is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE
175 || musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) { 175 || musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) ||
176 (musb->int_usb & MUSB_INTR_DISCONNECT && is_host_active(musb))) {
176 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); 177 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
177 musb->a_wait_bcon = TIMER_DELAY; 178 musb->a_wait_bcon = TIMER_DELAY;
178 } 179 }
@@ -323,30 +324,8 @@ int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
323 return -EIO; 324 return -EIO;
324} 325}
325 326
326int __init musb_platform_init(struct musb *musb, void *board_data) 327static void musb_platform_reg_init(struct musb *musb)
327{ 328{
328
329 /*
330 * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE
331 * and OTG HOST modes, while rev 1.1 and greater require PE7 to
332 * be low for DEVICE mode and high for HOST mode. We set it high
333 * here because we are in host mode
334 */
335
336 if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) {
337 printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d \n",
338 musb->config->gpio_vrsel);
339 return -ENODEV;
340 }
341 gpio_direction_output(musb->config->gpio_vrsel, 0);
342
343 usb_nop_xceiv_register();
344 musb->xceiv = otg_get_transceiver();
345 if (!musb->xceiv) {
346 gpio_free(musb->config->gpio_vrsel);
347 return -ENODEV;
348 }
349
350 if (ANOMALY_05000346) { 329 if (ANOMALY_05000346) {
351 bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value); 330 bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value);
352 SSYNC(); 331 SSYNC();
@@ -358,7 +337,8 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
358 } 337 }
359 338
360 /* Configure PLL oscillator register */ 339 /* Configure PLL oscillator register */
361 bfin_write_USB_PLLOSC_CTRL(0x30a8); 340 bfin_write_USB_PLLOSC_CTRL(0x3080 |
341 ((480/musb->config->clkin) << 1));
362 SSYNC(); 342 SSYNC();
363 343
364 bfin_write_USB_SRP_CLKDIV((get_sclk()/1000) / 32 - 1); 344 bfin_write_USB_SRP_CLKDIV((get_sclk()/1000) / 32 - 1);
@@ -380,6 +360,33 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
380 EP2_RX_ENA | EP3_RX_ENA | EP4_RX_ENA | 360 EP2_RX_ENA | EP3_RX_ENA | EP4_RX_ENA |
381 EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA); 361 EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA);
382 SSYNC(); 362 SSYNC();
363}
364
365int __init musb_platform_init(struct musb *musb, void *board_data)
366{
367
368 /*
369 * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE
370 * and OTG HOST modes, while rev 1.1 and greater require PE7 to
371 * be low for DEVICE mode and high for HOST mode. We set it high
372 * here because we are in host mode
373 */
374
375 if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) {
376 printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d\n",
377 musb->config->gpio_vrsel);
378 return -ENODEV;
379 }
380 gpio_direction_output(musb->config->gpio_vrsel, 0);
381
382 usb_nop_xceiv_register();
383 musb->xceiv = otg_get_transceiver();
384 if (!musb->xceiv) {
385 gpio_free(musb->config->gpio_vrsel);
386 return -ENODEV;
387 }
388
389 musb_platform_reg_init(musb);
383 390
384 if (is_host_enabled(musb)) { 391 if (is_host_enabled(musb)) {
385 musb->board_set_vbus = bfin_set_vbus; 392 musb->board_set_vbus = bfin_set_vbus;
@@ -394,6 +401,27 @@ int __init musb_platform_init(struct musb *musb, void *board_data)
394 return 0; 401 return 0;
395} 402}
396 403
404#ifdef CONFIG_PM
405void musb_platform_save_context(struct musb *musb,
406 struct musb_context_registers *musb_context)
407{
408 if (is_host_active(musb))
409 /*
410 * During hibernate gpio_vrsel will change from high to low
411 * low which will generate wakeup event resume the system
412 * immediately. Set it to 0 before hibernate to avoid this
413 * wakeup event.
414 */
415 gpio_set_value(musb->config->gpio_vrsel, 0);
416}
417
418void musb_platform_restore_context(struct musb *musb,
419 struct musb_context_registers *musb_context)
420{
421 musb_platform_reg_init(musb);
422}
423#endif
424
397int musb_platform_exit(struct musb *musb) 425int musb_platform_exit(struct musb *musb)
398{ 426{
399 gpio_free(musb->config->gpio_vrsel); 427 gpio_free(musb->config->gpio_vrsel);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c9f9024c5515..e6669fc3b804 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -552,7 +552,8 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
552 if (int_usb & MUSB_INTR_SESSREQ) { 552 if (int_usb & MUSB_INTR_SESSREQ) {
553 void __iomem *mbase = musb->mregs; 553 void __iomem *mbase = musb->mregs;
554 554
555 if (devctl & MUSB_DEVCTL_BDEVICE) { 555 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
556 && (devctl & MUSB_DEVCTL_BDEVICE)) {
556 DBG(3, "SessReq while on B state\n"); 557 DBG(3, "SessReq while on B state\n");
557 return IRQ_HANDLED; 558 return IRQ_HANDLED;
558 } 559 }
@@ -1052,6 +1053,11 @@ static void musb_shutdown(struct platform_device *pdev)
1052 clk_put(musb->clock); 1053 clk_put(musb->clock);
1053 spin_unlock_irqrestore(&musb->lock, flags); 1054 spin_unlock_irqrestore(&musb->lock, flags);
1054 1055
1056 if (!is_otg_enabled(musb) && is_host_enabled(musb))
1057 usb_remove_hcd(musb_to_hcd(musb));
1058 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
1059 musb_platform_exit(musb);
1060
1055 /* FIXME power down */ 1061 /* FIXME power down */
1056} 1062}
1057 1063
@@ -2244,13 +2250,6 @@ static int __exit musb_remove(struct platform_device *pdev)
2244 */ 2250 */
2245 musb_exit_debugfs(musb); 2251 musb_exit_debugfs(musb);
2246 musb_shutdown(pdev); 2252 musb_shutdown(pdev);
2247#ifdef CONFIG_USB_MUSB_HDRC_HCD
2248 if (musb->board_mode == MUSB_HOST)
2249 usb_remove_hcd(musb_to_hcd(musb));
2250#endif
2251 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2252 musb_platform_exit(musb);
2253 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2254 2253
2255 musb_free(musb); 2254 musb_free(musb);
2256 iounmap(ctrl_base); 2255 iounmap(ctrl_base);
@@ -2411,9 +2410,6 @@ static int musb_suspend(struct device *dev)
2411 unsigned long flags; 2410 unsigned long flags;
2412 struct musb *musb = dev_to_musb(&pdev->dev); 2411 struct musb *musb = dev_to_musb(&pdev->dev);
2413 2412
2414 if (!musb->clock)
2415 return 0;
2416
2417 spin_lock_irqsave(&musb->lock, flags); 2413 spin_lock_irqsave(&musb->lock, flags);
2418 2414
2419 if (is_peripheral_active(musb)) { 2415 if (is_peripheral_active(musb)) {
@@ -2428,10 +2424,12 @@ static int musb_suspend(struct device *dev)
2428 2424
2429 musb_save_context(musb); 2425 musb_save_context(musb);
2430 2426
2431 if (musb->set_clock) 2427 if (musb->clock) {
2432 musb->set_clock(musb->clock, 0); 2428 if (musb->set_clock)
2433 else 2429 musb->set_clock(musb->clock, 0);
2434 clk_disable(musb->clock); 2430 else
2431 clk_disable(musb->clock);
2432 }
2435 spin_unlock_irqrestore(&musb->lock, flags); 2433 spin_unlock_irqrestore(&musb->lock, flags);
2436 return 0; 2434 return 0;
2437} 2435}
@@ -2441,13 +2439,12 @@ static int musb_resume_noirq(struct device *dev)
2441 struct platform_device *pdev = to_platform_device(dev); 2439 struct platform_device *pdev = to_platform_device(dev);
2442 struct musb *musb = dev_to_musb(&pdev->dev); 2440 struct musb *musb = dev_to_musb(&pdev->dev);
2443 2441
2444 if (!musb->clock) 2442 if (musb->clock) {
2445 return 0; 2443 if (musb->set_clock)
2446 2444 musb->set_clock(musb->clock, 1);
2447 if (musb->set_clock) 2445 else
2448 musb->set_clock(musb->clock, 1); 2446 clk_enable(musb->clock);
2449 else 2447 }
2450 clk_enable(musb->clock);
2451 2448
2452 musb_restore_context(musb); 2449 musb_restore_context(musb);
2453 2450
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 69797e5b46a7..febaabcc2b35 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -487,7 +487,7 @@ struct musb_context_registers {
487}; 487};
488 488
489#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ 489#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
490 defined(CONFIG_ARCH_OMAP4) 490 defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_BLACKFIN)
491extern void musb_platform_save_context(struct musb *musb, 491extern void musb_platform_save_context(struct musb *musb,
492 struct musb_context_registers *musb_context); 492 struct musb_context_registers *musb_context);
493extern void musb_platform_restore_context(struct musb *musb, 493extern void musb_platform_restore_context(struct musb *musb,
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 5d815049cbaa..36cfd060dbe5 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -644,10 +644,8 @@ static void rxstate(struct musb *musb, struct musb_request *req)
644 */ 644 */
645 645
646 csr |= MUSB_RXCSR_DMAENAB; 646 csr |= MUSB_RXCSR_DMAENAB;
647 if (!musb_ep->hb_mult &&
648 musb_ep->hw_ep->rx_double_buffered)
649 csr |= MUSB_RXCSR_AUTOCLEAR;
650#ifdef USE_MODE1 647#ifdef USE_MODE1
648 csr |= MUSB_RXCSR_AUTOCLEAR;
651 /* csr |= MUSB_RXCSR_DMAMODE; */ 649 /* csr |= MUSB_RXCSR_DMAMODE; */
652 650
653 /* this special sequence (enabling and then 651 /* this special sequence (enabling and then
@@ -656,6 +654,10 @@ static void rxstate(struct musb *musb, struct musb_request *req)
656 */ 654 */
657 musb_writew(epio, MUSB_RXCSR, 655 musb_writew(epio, MUSB_RXCSR,
658 csr | MUSB_RXCSR_DMAMODE); 656 csr | MUSB_RXCSR_DMAMODE);
657#else
658 if (!musb_ep->hb_mult &&
659 musb_ep->hw_ep->rx_double_buffered)
660 csr |= MUSB_RXCSR_AUTOCLEAR;
659#endif 661#endif
660 musb_writew(epio, MUSB_RXCSR, csr); 662 musb_writew(epio, MUSB_RXCSR, csr);
661 663
@@ -807,7 +809,7 @@ void musb_g_rx(struct musb *musb, u8 epnum)
807 809
808#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) 810#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
809 /* Autoclear doesn't clear RxPktRdy for short packets */ 811 /* Autoclear doesn't clear RxPktRdy for short packets */
810 if ((dma->desired_mode == 0) 812 if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
811 || (dma->actual_len 813 || (dma->actual_len
812 & (musb_ep->packet_sz - 1))) { 814 & (musb_ep->packet_sz - 1))) {
813 /* ack the read! */ 815 /* ack the read! */
@@ -818,8 +820,16 @@ void musb_g_rx(struct musb *musb, u8 epnum)
818 /* incomplete, and not short? wait for next IN packet */ 820 /* incomplete, and not short? wait for next IN packet */
819 if ((request->actual < request->length) 821 if ((request->actual < request->length)
820 && (musb_ep->dma->actual_len 822 && (musb_ep->dma->actual_len
821 == musb_ep->packet_sz)) 823 == musb_ep->packet_sz)) {
824 /* In double buffer case, continue to unload fifo if
825 * there is Rx packet in FIFO.
826 **/
827 csr = musb_readw(epio, MUSB_RXCSR);
828 if ((csr & MUSB_RXCSR_RXPKTRDY) &&
829 hw_ep->rx_double_buffered)
830 goto exit;
822 return; 831 return;
832 }
823#endif 833#endif
824 musb_g_giveback(musb_ep, request, 0); 834 musb_g_giveback(musb_ep, request, 0);
825 835
@@ -827,7 +837,7 @@ void musb_g_rx(struct musb *musb, u8 epnum)
827 if (!request) 837 if (!request)
828 return; 838 return;
829 } 839 }
830 840exit:
831 /* Analyze request */ 841 /* Analyze request */
832 rxstate(musb, to_musb_request(request)); 842 rxstate(musb, to_musb_request(request));
833} 843}
@@ -916,13 +926,9 @@ static int musb_gadget_enable(struct usb_ep *ep,
916 * likewise high bandwidth periodic tx 926 * likewise high bandwidth periodic tx
917 */ 927 */
918 /* Set TXMAXP with the FIFO size of the endpoint 928 /* Set TXMAXP with the FIFO size of the endpoint
919 * to disable double buffering mode. Currently, It seems that double 929 * to disable double buffering mode.
920 * buffering has problem if musb RTL revision number < 2.0.
921 */ 930 */
922 if (musb->hwvers < MUSB_HWVERS_2000) 931 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
923 musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
924 else
925 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
926 932
927 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; 933 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
928 if (musb_readw(regs, MUSB_TXCSR) 934 if (musb_readw(regs, MUSB_TXCSR)
@@ -958,10 +964,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
958 /* Set RXMAXP with the FIFO size of the endpoint 964 /* Set RXMAXP with the FIFO size of the endpoint
959 * to disable double buffering mode. 965 * to disable double buffering mode.
960 */ 966 */
961 if (musb->hwvers < MUSB_HWVERS_2000) 967 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
962 musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_rx);
963 else
964 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
965 968
966 /* force shared fifo to OUT-only mode */ 969 /* force shared fifo to OUT-only mode */
967 if (hw_ep->is_shared_fifo) { 970 if (hw_ep->is_shared_fifo) {
@@ -1166,8 +1169,6 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1166 : DMA_FROM_DEVICE); 1169 : DMA_FROM_DEVICE);
1167 request->mapped = 0; 1170 request->mapped = 0;
1168 } 1171 }
1169 } else if (!req->buf) {
1170 return -ENODATA;
1171 } else 1172 } else
1172 request->mapped = 0; 1173 request->mapped = 0;
1173 1174
@@ -1695,8 +1696,10 @@ int __init musb_gadget_setup(struct musb *musb)
1695 musb_platform_try_idle(musb, 0); 1696 musb_platform_try_idle(musb, 0);
1696 1697
1697 status = device_register(&musb->g.dev); 1698 status = device_register(&musb->g.dev);
1698 if (status != 0) 1699 if (status != 0) {
1700 put_device(&musb->g.dev);
1699 the_gadget = NULL; 1701 the_gadget = NULL;
1702 }
1700 return status; 1703 return status;
1701} 1704}
1702 1705
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 244267527a60..5a727c5b8676 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -633,8 +633,9 @@ static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum)
633 return 0; 633 return 0;
634} 634}
635 635
636static inline void musb_read_txhubport(void __iomem *mbase, u8 epnum) 636static inline u8 musb_read_txhubport(void __iomem *mbase, u8 epnum)
637{ 637{
638 return 0;
638} 639}
639 640
640#endif /* CONFIG_BLACKFIN */ 641#endif /* CONFIG_BLACKFIN */
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 6f771af5cbdb..563114d613d6 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -158,6 +158,8 @@ static int dma_channel_program(struct dma_channel *channel,
158 dma_addr_t dma_addr, u32 len) 158 dma_addr_t dma_addr, u32 len)
159{ 159{
160 struct musb_dma_channel *musb_channel = channel->private_data; 160 struct musb_dma_channel *musb_channel = channel->private_data;
161 struct musb_dma_controller *controller = musb_channel->controller;
162 struct musb *musb = controller->private_data;
161 163
162 DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n", 164 DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n",
163 musb_channel->epnum, 165 musb_channel->epnum,
@@ -167,6 +169,18 @@ static int dma_channel_program(struct dma_channel *channel,
167 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || 169 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
168 channel->status == MUSB_DMA_STATUS_BUSY); 170 channel->status == MUSB_DMA_STATUS_BUSY);
169 171
172 /*
173 * The DMA engine in RTL1.8 and above cannot handle
174 * DMA addresses that are not aligned to a 4 byte boundary.
175 * It ends up masking the last two bits of the address
176 * programmed in DMA_ADDR.
177 *
178 * Fail such DMA transfers, so that the backup PIO mode
179 * can carry out the transfer
180 */
181 if ((musb->hwvers >= MUSB_HWVERS_1800) && (dma_addr % 4))
182 return false;
183
170 channel->actual_len = 0; 184 channel->actual_len = 0;
171 musb_channel->start_addr = dma_addr; 185 musb_channel->start_addr = dma_addr;
172 musb_channel->len = len; 186 musb_channel->len = len;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 89a9a5847803..76f8b3556672 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -794,6 +794,8 @@ static struct usb_device_id id_table_combined [] = {
794 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) }, 794 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) },
795 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) }, 795 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
796 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) }, 796 { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
797 { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
798 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
797 { }, /* Optional parameter entry */ 799 { }, /* Optional parameter entry */
798 { } /* Terminating entry */ 800 { } /* Terminating entry */
799}; 801};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 7dfe02f1fb6a..263f62551197 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1100,3 +1100,10 @@
1100#define FTDI_SCIENCESCOPE_LOGBOOKML_PID 0xFF18 1100#define FTDI_SCIENCESCOPE_LOGBOOKML_PID 0xFF18
1101#define FTDI_SCIENCESCOPE_LS_LOGBOOK_PID 0xFF1C 1101#define FTDI_SCIENCESCOPE_LS_LOGBOOK_PID 0xFF1C
1102#define FTDI_SCIENCESCOPE_HS_LOGBOOK_PID 0xFF1D 1102#define FTDI_SCIENCESCOPE_HS_LOGBOOK_PID 0xFF1D
1103
1104/*
1105 * Milkymist One JTAG/Serial
1106 */
1107#define QIHARDWARE_VID 0x20B7
1108#define MILKYMISTONE_JTAGSERIAL_PID 0x0713
1109
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2297fb1bcf65..ef2977d3a613 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -518,7 +518,7 @@ static const struct usb_device_id option_ids[] = {
518 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) }, 518 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) },
519 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, 519 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
520 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, 520 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
521 { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) }, 521 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
522 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, 522 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
523 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, 523 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
524 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, 524 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 2054b1e25a65..d1268191acbd 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -331,10 +331,7 @@ static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
331 331
332 iu->iu_id = IU_ID_COMMAND; 332 iu->iu_id = IU_ID_COMMAND;
333 iu->tag = cpu_to_be16(stream_id); 333 iu->tag = cpu_to_be16(stream_id);
334 if (sdev->ordered_tags && (cmnd->request->cmd_flags & REQ_HARDBARRIER)) 334 iu->prio_attr = UAS_SIMPLE_TAG;
335 iu->prio_attr = UAS_ORDERED_TAG;
336 else
337 iu->prio_attr = UAS_SIMPLE_TAG;
338 iu->len = len; 335 iu->len = len;
339 int_to_scsilun(sdev->lun, &iu->lun); 336 int_to_scsilun(sdev->lun, &iu->lun);
340 memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len); 337 memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
diff --git a/drivers/uwb/allocator.c b/drivers/uwb/allocator.c
index 436e4f7110cb..e45e673b8770 100644
--- a/drivers/uwb/allocator.c
+++ b/drivers/uwb/allocator.c
@@ -326,7 +326,8 @@ int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *availab
326 int bit_index; 326 int bit_index;
327 327
328 ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL); 328 ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL);
329 329 if (!ai)
330 return UWB_RSV_ALLOC_NOT_FOUND;
330 ai->min_mas = rsv->min_mas; 331 ai->min_mas = rsv->min_mas;
331 ai->max_mas = rsv->max_mas; 332 ai->max_mas = rsv->max_mas;
332 ai->max_interval = rsv->max_interval; 333 ai->max_interval = rsv->max_interval;
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 3ec24609151e..734c650a47c4 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -502,8 +502,10 @@ static ssize_t adp8860_bl_l1_daylight_max_store(struct device *dev,
502 struct device_attribute *attr, const char *buf, size_t count) 502 struct device_attribute *attr, const char *buf, size_t count)
503{ 503{
504 struct adp8860_bl *data = dev_get_drvdata(dev); 504 struct adp8860_bl *data = dev_get_drvdata(dev);
505 int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
506 if (ret)
507 return ret;
505 508
506 strict_strtoul(buf, 10, &data->cached_daylight_max);
507 return adp8860_store(dev, buf, count, ADP8860_BLMX1); 509 return adp8860_store(dev, buf, count, ADP8860_BLMX1);
508} 510}
509static DEVICE_ATTR(l1_daylight_max, 0664, adp8860_bl_l1_daylight_max_show, 511static DEVICE_ATTR(l1_daylight_max, 0664, adp8860_bl_l1_daylight_max_show,
@@ -614,7 +616,7 @@ static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev,
614 if (val == 0) { 616 if (val == 0) {
615 /* Enable automatic ambient light sensing */ 617 /* Enable automatic ambient light sensing */
616 adp8860_set_bits(data->client, ADP8860_MDCR, CMP_AUTOEN); 618 adp8860_set_bits(data->client, ADP8860_MDCR, CMP_AUTOEN);
617 } else if ((val > 0) && (val < 6)) { 619 } else if ((val > 0) && (val <= 3)) {
618 /* Disable automatic ambient light sensing */ 620 /* Disable automatic ambient light sensing */
619 adp8860_clr_bits(data->client, ADP8860_MDCR, CMP_AUTOEN); 621 adp8860_clr_bits(data->client, ADP8860_MDCR, CMP_AUTOEN);
620 622
@@ -622,7 +624,7 @@ static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev,
622 mutex_lock(&data->lock); 624 mutex_lock(&data->lock);
623 adp8860_read(data->client, ADP8860_CFGR, &reg_val); 625 adp8860_read(data->client, ADP8860_CFGR, &reg_val);
624 reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT); 626 reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT);
625 reg_val |= val << CFGR_BLV_SHIFT; 627 reg_val |= (val - 1) << CFGR_BLV_SHIFT;
626 adp8860_write(data->client, ADP8860_CFGR, reg_val); 628 adp8860_write(data->client, ADP8860_CFGR, reg_val);
627 mutex_unlock(&data->lock); 629 mutex_unlock(&data->lock);
628 } 630 }
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 9093ef0fa869..c67801e57aaf 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -78,7 +78,7 @@ static int l4f00242t03_lcd_power_set(struct lcd_device *ld, int power)
78 const u16 slpin = 0x10; 78 const u16 slpin = 0x10;
79 const u16 disoff = 0x28; 79 const u16 disoff = 0x28;
80 80
81 if (power) { 81 if (power <= FB_BLANK_NORMAL) {
82 if (priv->lcd_on) 82 if (priv->lcd_on)
83 return 0; 83 return 0;
84 84
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
index abc43a0eb97d..5d3cf33953ac 100644
--- a/drivers/video/backlight/lms283gf05.c
+++ b/drivers/video/backlight/lms283gf05.c
@@ -129,7 +129,7 @@ static int lms283gf05_power_set(struct lcd_device *ld, int power)
129 struct spi_device *spi = st->spi; 129 struct spi_device *spi = st->spi;
130 struct lms283gf05_pdata *pdata = spi->dev.platform_data; 130 struct lms283gf05_pdata *pdata = spi->dev.platform_data;
131 131
132 if (power) { 132 if (power <= FB_BLANK_NORMAL) {
133 if (pdata) 133 if (pdata)
134 lms283gf05_reset(pdata->reset_gpio, 134 lms283gf05_reset(pdata->reset_gpio,
135 pdata->reset_inverted); 135 pdata->reset_inverted);
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
index 9fb533f6373e..1485f7345f49 100644
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -335,6 +335,24 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
335 }, 335 },
336 .driver_data = (void *)&nvidia_chipset_data, 336 .driver_data = (void *)&nvidia_chipset_data,
337 }, 337 },
338 {
339 .callback = mbp_dmi_match,
340 .ident = "MacBookAir 3,1",
341 .matches = {
342 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
343 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,1"),
344 },
345 .driver_data = (void *)&nvidia_chipset_data,
346 },
347 {
348 .callback = mbp_dmi_match,
349 .ident = "MacBookAir 3,2",
350 .matches = {
351 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
352 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,2"),
353 },
354 .driver_data = (void *)&nvidia_chipset_data,
355 },
338 { } 356 { }
339}; 357};
340 358
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 550443518891..21866ec69656 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -25,6 +25,7 @@ struct pwm_bl_data {
25 struct pwm_device *pwm; 25 struct pwm_device *pwm;
26 struct device *dev; 26 struct device *dev;
27 unsigned int period; 27 unsigned int period;
28 unsigned int lth_brightness;
28 int (*notify)(struct device *, 29 int (*notify)(struct device *,
29 int brightness); 30 int brightness);
30}; 31};
@@ -48,7 +49,9 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
48 pwm_config(pb->pwm, 0, pb->period); 49 pwm_config(pb->pwm, 0, pb->period);
49 pwm_disable(pb->pwm); 50 pwm_disable(pb->pwm);
50 } else { 51 } else {
51 pwm_config(pb->pwm, brightness * pb->period / max, pb->period); 52 brightness = pb->lth_brightness +
53 (brightness * (pb->period - pb->lth_brightness) / max);
54 pwm_config(pb->pwm, brightness, pb->period);
52 pwm_enable(pb->pwm); 55 pwm_enable(pb->pwm);
53 } 56 }
54 return 0; 57 return 0;
@@ -92,6 +95,8 @@ static int pwm_backlight_probe(struct platform_device *pdev)
92 95
93 pb->period = data->pwm_period_ns; 96 pb->period = data->pwm_period_ns;
94 pb->notify = data->notify; 97 pb->notify = data->notify;
98 pb->lth_brightness = data->lth_brightness *
99 (data->pwm_period_ns / data->max_brightness);
95 pb->dev = &pdev->dev; 100 pb->dev = &pdev->dev;
96 101
97 pb->pwm = pwm_request(data->pwm_id, "backlight"); 102 pb->pwm = pwm_request(data->pwm_id, "backlight");
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index a3128c9cb7ad..5927db0da999 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -729,10 +729,10 @@ static ssize_t s6e63m0_sysfs_show_gamma_table(struct device *dev,
729 729
730 return strlen(buf); 730 return strlen(buf);
731} 731}
732static DEVICE_ATTR(gamma_table, 0644, 732static DEVICE_ATTR(gamma_table, 0444,
733 s6e63m0_sysfs_show_gamma_table, NULL); 733 s6e63m0_sysfs_show_gamma_table, NULL);
734 734
735static int __init s6e63m0_probe(struct spi_device *spi) 735static int __devinit s6e63m0_probe(struct spi_device *spi)
736{ 736{
737 int ret = 0; 737 int ret = 0;
738 struct s6e63m0 *lcd = NULL; 738 struct s6e63m0 *lcd = NULL;
@@ -829,6 +829,9 @@ static int __devexit s6e63m0_remove(struct spi_device *spi)
829 struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev); 829 struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev);
830 830
831 s6e63m0_power(lcd, FB_BLANK_POWERDOWN); 831 s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
832 device_remove_file(&spi->dev, &dev_attr_gamma_table);
833 device_remove_file(&spi->dev, &dev_attr_gamma_mode);
834 backlight_device_unregister(lcd->bd);
832 lcd_device_unregister(lcd->ld); 835 lcd_device_unregister(lcd->ld);
833 kfree(lcd); 836 kfree(lcd);
834 837
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 97612f548a8e..321a0c8346e5 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -1299,9 +1299,6 @@ static void restore_cpu_virqs(unsigned int cpu)
1299 evtchn_to_irq[evtchn] = irq; 1299 evtchn_to_irq[evtchn] = irq;
1300 irq_info[irq] = mk_virq_info(evtchn, virq); 1300 irq_info[irq] = mk_virq_info(evtchn, virq);
1301 bind_evtchn_to_cpu(evtchn, cpu); 1301 bind_evtchn_to_cpu(evtchn, cpu);
1302
1303 /* Ready for use. */
1304 unmask_evtchn(evtchn);
1305 } 1302 }
1306} 1303}
1307 1304
@@ -1327,10 +1324,6 @@ static void restore_cpu_ipis(unsigned int cpu)
1327 evtchn_to_irq[evtchn] = irq; 1324 evtchn_to_irq[evtchn] = irq;
1328 irq_info[irq] = mk_ipi_info(evtchn, ipi); 1325 irq_info[irq] = mk_ipi_info(evtchn, ipi);
1329 bind_evtchn_to_cpu(evtchn, cpu); 1326 bind_evtchn_to_cpu(evtchn, cpu);
1330
1331 /* Ready for use. */
1332 unmask_evtchn(evtchn);
1333
1334 } 1327 }
1335} 1328}
1336 1329
@@ -1390,6 +1383,7 @@ void xen_poll_irq(int irq)
1390void xen_irq_resume(void) 1383void xen_irq_resume(void)
1391{ 1384{
1392 unsigned int cpu, irq, evtchn; 1385 unsigned int cpu, irq, evtchn;
1386 struct irq_desc *desc;
1393 1387
1394 init_evtchn_cpu_bindings(); 1388 init_evtchn_cpu_bindings();
1395 1389
@@ -1408,6 +1402,23 @@ void xen_irq_resume(void)
1408 restore_cpu_virqs(cpu); 1402 restore_cpu_virqs(cpu);
1409 restore_cpu_ipis(cpu); 1403 restore_cpu_ipis(cpu);
1410 } 1404 }
1405
1406 /*
1407 * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These
1408 * are not handled by the IRQ core.
1409 */
1410 for_each_irq_desc(irq, desc) {
1411 if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND))
1412 continue;
1413 if (desc->status & IRQ_DISABLED)
1414 continue;
1415
1416 evtchn = evtchn_from_irq(irq);
1417 if (evtchn == -1)
1418 continue;
1419
1420 unmask_evtchn(evtchn);
1421 }
1411} 1422}
1412 1423
1413static struct irq_chip xen_dynamic_chip __read_mostly = { 1424static struct irq_chip xen_dynamic_chip __read_mostly = {
diff --git a/fs/bio.c b/fs/bio.c
index 8abb2dfb2e7c..4bd454fa844e 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -370,6 +370,9 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
370{ 370{
371 struct bio *bio; 371 struct bio *bio;
372 372
373 if (nr_iovecs > UIO_MAXIOV)
374 return NULL;
375
373 bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec), 376 bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
374 gfp_mask); 377 gfp_mask);
375 if (unlikely(!bio)) 378 if (unlikely(!bio))
@@ -697,8 +700,12 @@ static void bio_free_map_data(struct bio_map_data *bmd)
697static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count, 700static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
698 gfp_t gfp_mask) 701 gfp_t gfp_mask)
699{ 702{
700 struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask); 703 struct bio_map_data *bmd;
701 704
705 if (iov_count > UIO_MAXIOV)
706 return NULL;
707
708 bmd = kmalloc(sizeof(*bmd), gfp_mask);
702 if (!bmd) 709 if (!bmd)
703 return NULL; 710 return NULL;
704 711
@@ -827,6 +834,12 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
827 end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; 834 end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
828 start = uaddr >> PAGE_SHIFT; 835 start = uaddr >> PAGE_SHIFT;
829 836
837 /*
838 * Overflow, abort
839 */
840 if (end < start)
841 return ERR_PTR(-EINVAL);
842
830 nr_pages += end - start; 843 nr_pages += end - start;
831 len += iov[i].iov_len; 844 len += iov[i].iov_len;
832 } 845 }
@@ -955,6 +968,12 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
955 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 968 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
956 unsigned long start = uaddr >> PAGE_SHIFT; 969 unsigned long start = uaddr >> PAGE_SHIFT;
957 970
971 /*
972 * Overflow, abort
973 */
974 if (end < start)
975 return ERR_PTR(-EINVAL);
976
958 nr_pages += end - start; 977 nr_pages += end - start;
959 /* 978 /*
960 * buffer must be aligned to at least hardsector size for now 979 * buffer must be aligned to at least hardsector size for now
@@ -982,7 +1001,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
982 unsigned long start = uaddr >> PAGE_SHIFT; 1001 unsigned long start = uaddr >> PAGE_SHIFT;
983 const int local_nr_pages = end - start; 1002 const int local_nr_pages = end - start;
984 const int page_limit = cur_page + local_nr_pages; 1003 const int page_limit = cur_page + local_nr_pages;
985 1004
986 ret = get_user_pages_fast(uaddr, local_nr_pages, 1005 ret = get_user_pages_fast(uaddr, local_nr_pages,
987 write_to_vm, &pages[cur_page]); 1006 write_to_vm, &pages[cur_page]);
988 if (ret < local_nr_pages) { 1007 if (ret < local_nr_pages) {
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 39869c3c3efb..ef3a55bf86b6 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -2177,7 +2177,6 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
2177 2177
2178 setattr_copy(inode, attrs); 2178 setattr_copy(inode, attrs);
2179 mark_inode_dirty(inode); 2179 mark_inode_dirty(inode);
2180 return 0;
2181 2180
2182cifs_setattr_exit: 2181cifs_setattr_exit:
2183 kfree(full_path); 2182 kfree(full_path);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 2fa22f20cfc5..0c98672d0122 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -38,10 +38,10 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
38 struct cifs_sb_info *cifs_sb; 38 struct cifs_sb_info *cifs_sb;
39#ifdef CONFIG_CIFS_POSIX 39#ifdef CONFIG_CIFS_POSIX
40 struct cifsFileInfo *pSMBFile = filep->private_data; 40 struct cifsFileInfo *pSMBFile = filep->private_data;
41 struct cifsTconInfo *tcon = tlink_tcon(pSMBFile->tlink); 41 struct cifsTconInfo *tcon;
42 __u64 ExtAttrBits = 0; 42 __u64 ExtAttrBits = 0;
43 __u64 ExtAttrMask = 0; 43 __u64 ExtAttrMask = 0;
44 __u64 caps = le64_to_cpu(tcon->fsUnixInfo.Capability); 44 __u64 caps;
45#endif /* CONFIG_CIFS_POSIX */ 45#endif /* CONFIG_CIFS_POSIX */
46 46
47 xid = GetXid(); 47 xid = GetXid();
@@ -62,6 +62,10 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
62 break; 62 break;
63#ifdef CONFIG_CIFS_POSIX 63#ifdef CONFIG_CIFS_POSIX
64 case FS_IOC_GETFLAGS: 64 case FS_IOC_GETFLAGS:
65 if (pSMBFile == NULL)
66 break;
67 tcon = tlink_tcon(pSMBFile->tlink);
68 caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
65 if (CIFS_UNIX_EXTATTR_CAP & caps) { 69 if (CIFS_UNIX_EXTATTR_CAP & caps) {
66 rc = CIFSGetExtAttr(xid, tcon, pSMBFile->netfid, 70 rc = CIFSGetExtAttr(xid, tcon, pSMBFile->netfid,
67 &ExtAttrBits, &ExtAttrMask); 71 &ExtAttrBits, &ExtAttrMask);
@@ -73,6 +77,10 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
73 break; 77 break;
74 78
75 case FS_IOC_SETFLAGS: 79 case FS_IOC_SETFLAGS:
80 if (pSMBFile == NULL)
81 break;
82 tcon = tlink_tcon(pSMBFile->tlink);
83 caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
76 if (CIFS_UNIX_EXTATTR_CAP & caps) { 84 if (CIFS_UNIX_EXTATTR_CAP & caps) {
77 if (get_user(ExtAttrBits, (int __user *)arg)) { 85 if (get_user(ExtAttrBits, (int __user *)arg)) {
78 rc = -EFAULT; 86 rc = -EFAULT;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index d6cfac1f0a40..a5fe68189eed 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -932,8 +932,7 @@ struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag,
932 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { 932 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
933 *user = current_user(); 933 *user = current_user();
934 if (user_shm_lock(size, *user)) { 934 if (user_shm_lock(size, *user)) {
935 WARN_ONCE(1, 935 printk_once(KERN_WARNING "Using mlock ulimits for SHM_HUGETLB is deprecated\n");
936 "Using mlock ulimits for SHM_HUGETLB deprecated\n");
937 } else { 936 } else {
938 *user = NULL; 937 *user = NULL;
939 return ERR_PTR(-EPERM); 938 return ERR_PTR(-EPERM);
diff --git a/fs/ioprio.c b/fs/ioprio.c
index 748cfb92dcc6..2f7d05c89922 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -111,12 +111,14 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
111 read_lock(&tasklist_lock); 111 read_lock(&tasklist_lock);
112 switch (which) { 112 switch (which) {
113 case IOPRIO_WHO_PROCESS: 113 case IOPRIO_WHO_PROCESS:
114 rcu_read_lock();
114 if (!who) 115 if (!who)
115 p = current; 116 p = current;
116 else 117 else
117 p = find_task_by_vpid(who); 118 p = find_task_by_vpid(who);
118 if (p) 119 if (p)
119 ret = set_task_ioprio(p, ioprio); 120 ret = set_task_ioprio(p, ioprio);
121 rcu_read_unlock();
120 break; 122 break;
121 case IOPRIO_WHO_PGRP: 123 case IOPRIO_WHO_PGRP:
122 if (!who) 124 if (!who)
@@ -139,7 +141,12 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
139 break; 141 break;
140 142
141 do_each_thread(g, p) { 143 do_each_thread(g, p) {
142 if (__task_cred(p)->uid != who) 144 int match;
145
146 rcu_read_lock();
147 match = __task_cred(p)->uid == who;
148 rcu_read_unlock();
149 if (!match)
143 continue; 150 continue;
144 ret = set_task_ioprio(p, ioprio); 151 ret = set_task_ioprio(p, ioprio);
145 if (ret) 152 if (ret)
@@ -200,12 +207,14 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
200 read_lock(&tasklist_lock); 207 read_lock(&tasklist_lock);
201 switch (which) { 208 switch (which) {
202 case IOPRIO_WHO_PROCESS: 209 case IOPRIO_WHO_PROCESS:
210 rcu_read_lock();
203 if (!who) 211 if (!who)
204 p = current; 212 p = current;
205 else 213 else
206 p = find_task_by_vpid(who); 214 p = find_task_by_vpid(who);
207 if (p) 215 if (p)
208 ret = get_task_ioprio(p); 216 ret = get_task_ioprio(p);
217 rcu_read_unlock();
209 break; 218 break;
210 case IOPRIO_WHO_PGRP: 219 case IOPRIO_WHO_PGRP:
211 if (!who) 220 if (!who)
@@ -232,7 +241,12 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
232 break; 241 break;
233 242
234 do_each_thread(g, p) { 243 do_each_thread(g, p) {
235 if (__task_cred(p)->uid != user->uid) 244 int match;
245
246 rcu_read_lock();
247 match = __task_cred(p)->uid == user->uid;
248 rcu_read_unlock();
249 if (!match)
236 continue; 250 continue;
237 tmpio = get_task_ioprio(p); 251 tmpio = get_task_ioprio(p);
238 if (tmpio < 0) 252 if (tmpio < 0)
diff --git a/fs/locks.c b/fs/locks.c
index 65765cb6afed..0e62dd35d088 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1504,9 +1504,8 @@ static int do_fcntl_delete_lease(struct file *filp)
1504 1504
1505static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) 1505static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1506{ 1506{
1507 struct file_lock *fl; 1507 struct file_lock *fl, *ret;
1508 struct fasync_struct *new; 1508 struct fasync_struct *new;
1509 struct inode *inode = filp->f_path.dentry->d_inode;
1510 int error; 1509 int error;
1511 1510
1512 fl = lease_alloc(filp, arg); 1511 fl = lease_alloc(filp, arg);
@@ -1518,13 +1517,16 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1518 locks_free_lock(fl); 1517 locks_free_lock(fl);
1519 return -ENOMEM; 1518 return -ENOMEM;
1520 } 1519 }
1520 ret = fl;
1521 lock_flocks(); 1521 lock_flocks();
1522 error = __vfs_setlease(filp, arg, &fl); 1522 error = __vfs_setlease(filp, arg, &ret);
1523 if (error) { 1523 if (error) {
1524 unlock_flocks(); 1524 unlock_flocks();
1525 locks_free_lock(fl); 1525 locks_free_lock(fl);
1526 goto out_free_fasync; 1526 goto out_free_fasync;
1527 } 1527 }
1528 if (ret != fl)
1529 locks_free_lock(fl);
1528 1530
1529 /* 1531 /*
1530 * fasync_insert_entry() returns the old entry if any. 1532 * fasync_insert_entry() returns the old entry if any.
@@ -1532,17 +1534,10 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1532 * inserted it into the fasync list. Clear new so that 1534 * inserted it into the fasync list. Clear new so that
1533 * we don't release it here. 1535 * we don't release it here.
1534 */ 1536 */
1535 if (!fasync_insert_entry(fd, filp, &fl->fl_fasync, new)) 1537 if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
1536 new = NULL; 1538 new = NULL;
1537 1539
1538 if (error < 0) { 1540 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1539 /* remove lease just inserted by setlease */
1540 fl->fl_type = F_UNLCK | F_INPROGRESS;
1541 fl->fl_break_time = jiffies - 10;
1542 time_out_leases(inode);
1543 } else {
1544 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1545 }
1546 unlock_flocks(); 1541 unlock_flocks();
1547 1542
1548out_free_fasync: 1543out_free_fasync:
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index f1e5ec6b5105..ad2bfa68d534 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -673,16 +673,17 @@ static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
673 spin_unlock(&clp->cl_lock); 673 spin_unlock(&clp->cl_lock);
674} 674}
675 675
676static void nfsd4_register_conn(struct nfsd4_conn *conn) 676static int nfsd4_register_conn(struct nfsd4_conn *conn)
677{ 677{
678 conn->cn_xpt_user.callback = nfsd4_conn_lost; 678 conn->cn_xpt_user.callback = nfsd4_conn_lost;
679 register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); 679 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
680} 680}
681 681
682static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses) 682static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses)
683{ 683{
684 struct nfsd4_conn *conn; 684 struct nfsd4_conn *conn;
685 u32 flags = NFS4_CDFC4_FORE; 685 u32 flags = NFS4_CDFC4_FORE;
686 int ret;
686 687
687 if (ses->se_flags & SESSION4_BACK_CHAN) 688 if (ses->se_flags & SESSION4_BACK_CHAN)
688 flags |= NFS4_CDFC4_BACK; 689 flags |= NFS4_CDFC4_BACK;
@@ -690,7 +691,10 @@ static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses)
690 if (!conn) 691 if (!conn)
691 return nfserr_jukebox; 692 return nfserr_jukebox;
692 nfsd4_hash_conn(conn, ses); 693 nfsd4_hash_conn(conn, ses);
693 nfsd4_register_conn(conn); 694 ret = nfsd4_register_conn(conn);
695 if (ret)
696 /* oops; xprt is already down: */
697 nfsd4_conn_lost(&conn->cn_xpt_user);
694 return nfs_ok; 698 return nfs_ok;
695} 699}
696 700
@@ -1644,6 +1648,7 @@ static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_sessi
1644{ 1648{
1645 struct nfs4_client *clp = ses->se_client; 1649 struct nfs4_client *clp = ses->se_client;
1646 struct nfsd4_conn *c; 1650 struct nfsd4_conn *c;
1651 int ret;
1647 1652
1648 spin_lock(&clp->cl_lock); 1653 spin_lock(&clp->cl_lock);
1649 c = __nfsd4_find_conn(new->cn_xprt, ses); 1654 c = __nfsd4_find_conn(new->cn_xprt, ses);
@@ -1654,7 +1659,10 @@ static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_sessi
1654 } 1659 }
1655 __nfsd4_hash_conn(new, ses); 1660 __nfsd4_hash_conn(new, ses);
1656 spin_unlock(&clp->cl_lock); 1661 spin_unlock(&clp->cl_lock);
1657 nfsd4_register_conn(new); 1662 ret = nfsd4_register_conn(new);
1663 if (ret)
1664 /* oops; xprt is already down: */
1665 nfsd4_conn_lost(&new->cn_xpt_user);
1658 return; 1666 return;
1659} 1667}
1660 1668
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index ddb1f41376e5..911e61f348fc 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -418,7 +418,7 @@ out_no_root:
418static struct dentry *openprom_mount(struct file_system_type *fs_type, 418static struct dentry *openprom_mount(struct file_system_type *fs_type,
419 int flags, const char *dev_name, void *data) 419 int flags, const char *dev_name, void *data)
420{ 420{
421 return mount_single(fs_type, flags, data, openprom_fill_super) 421 return mount_single(fs_type, flags, data, openprom_fill_super);
422} 422}
423 423
424static struct file_system_type openprom_fs_type = { 424static struct file_system_type openprom_fs_type = {
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index c9af48fffcd7..7d287afccde5 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1111,11 +1111,12 @@ xfs_vm_writepage(
1111 uptodate = 0; 1111 uptodate = 0;
1112 1112
1113 /* 1113 /*
1114 * A hole may still be marked uptodate because discard_buffer 1114 * set_page_dirty dirties all buffers in a page, independent
1115 * leaves the flag set. 1115 * of their state. The dirty state however is entirely
1116 * meaningless for holes (!mapped && uptodate), so skip
1117 * buffers covering holes here.
1116 */ 1118 */
1117 if (!buffer_mapped(bh) && buffer_uptodate(bh)) { 1119 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
1118 ASSERT(!buffer_dirty(bh));
1119 imap_valid = 0; 1120 imap_valid = 0;
1120 continue; 1121 continue;
1121 } 1122 }
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 63fd2c07cb57..aa1d353def29 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1781,7 +1781,6 @@ xfs_buf_delwri_split(
1781 INIT_LIST_HEAD(list); 1781 INIT_LIST_HEAD(list);
1782 spin_lock(dwlk); 1782 spin_lock(dwlk);
1783 list_for_each_entry_safe(bp, n, dwq, b_list) { 1783 list_for_each_entry_safe(bp, n, dwq, b_list) {
1784 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1785 ASSERT(bp->b_flags & XBF_DELWRI); 1784 ASSERT(bp->b_flags & XBF_DELWRI);
1786 1785
1787 if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) { 1786 if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) {
@@ -1795,6 +1794,7 @@ xfs_buf_delwri_split(
1795 _XBF_RUN_QUEUES); 1794 _XBF_RUN_QUEUES);
1796 bp->b_flags |= XBF_WRITE; 1795 bp->b_flags |= XBF_WRITE;
1797 list_move_tail(&bp->b_list, list); 1796 list_move_tail(&bp->b_list, list);
1797 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1798 } else 1798 } else
1799 skipped++; 1799 skipped++;
1800 } 1800 }
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 2ea238f6d38e..ad442d9e392e 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -416,7 +416,7 @@ xfs_attrlist_by_handle(
416 if (IS_ERR(dentry)) 416 if (IS_ERR(dentry))
417 return PTR_ERR(dentry); 417 return PTR_ERR(dentry);
418 418
419 kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL); 419 kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
420 if (!kbuf) 420 if (!kbuf)
421 goto out_dput; 421 goto out_dput;
422 422
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 96107efc0c61..94d5fd6a2973 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -762,7 +762,8 @@ xfs_setup_inode(
762 inode->i_state = I_NEW; 762 inode->i_state = I_NEW;
763 763
764 inode_sb_list_add(inode); 764 inode_sb_list_add(inode);
765 insert_inode_hash(inode); 765 /* make the inode look hashed for the writeback code */
766 hlist_add_fake(&inode->i_hash);
766 767
767 inode->i_mode = ip->i_d.di_mode; 768 inode->i_mode = ip->i_d.di_mode;
768 inode->i_nlink = ip->i_d.di_nlink; 769 inode->i_nlink = ip->i_d.di_nlink;
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 9f3a78fe6ae4..064f964d4f3c 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -353,9 +353,6 @@ xfs_parseargs(
353 mp->m_qflags &= ~XFS_OQUOTA_ENFD; 353 mp->m_qflags &= ~XFS_OQUOTA_ENFD;
354 } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) { 354 } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
355 mp->m_flags |= XFS_MOUNT_DELAYLOG; 355 mp->m_flags |= XFS_MOUNT_DELAYLOG;
356 cmn_err(CE_WARN,
357 "Enabling EXPERIMENTAL delayed logging feature "
358 "- use at your own risk.\n");
359 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { 356 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
360 mp->m_flags &= ~XFS_MOUNT_DELAYLOG; 357 mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
361 } else if (!strcmp(this_char, "ihashsize")) { 358 } else if (!strcmp(this_char, "ihashsize")) {
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 37d33254981d..afb0d7cfad1c 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -853,6 +853,7 @@ restart:
853 if (trylock) { 853 if (trylock) {
854 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { 854 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
855 skipped++; 855 skipped++;
856 xfs_perag_put(pag);
856 continue; 857 continue;
857 } 858 }
858 first_index = pag->pag_ici_reclaim_cursor; 859 first_index = pag->pag_ici_reclaim_cursor;
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index 9b715dce5699..9124425b7f2f 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -744,9 +744,15 @@ xfs_filestream_new_ag(
744 * If the file's parent directory is known, take its iolock in exclusive 744 * If the file's parent directory is known, take its iolock in exclusive
745 * mode to prevent two sibling files from racing each other to migrate 745 * mode to prevent two sibling files from racing each other to migrate
746 * themselves and their parent to different AGs. 746 * themselves and their parent to different AGs.
747 *
748 * Note that we lock the parent directory iolock inside the child
749 * iolock here. That's fine as we never hold both parent and child
750 * iolock in any other place. This is different from the ilock,
751 * which requires locking of the child after the parent for namespace
752 * operations.
747 */ 753 */
748 if (pip) 754 if (pip)
749 xfs_ilock(pip, XFS_IOLOCK_EXCL); 755 xfs_ilock(pip, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
750 756
751 /* 757 /*
752 * A new AG needs to be found for the file. If the file's parent 758 * A new AG needs to be found for the file. If the file's parent
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index b1498ab5a399..19e9dfa1c254 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -275,6 +275,7 @@ xfs_free_perag(
275 pag = radix_tree_delete(&mp->m_perag_tree, agno); 275 pag = radix_tree_delete(&mp->m_perag_tree, agno);
276 spin_unlock(&mp->m_perag_lock); 276 spin_unlock(&mp->m_perag_lock);
277 ASSERT(pag); 277 ASSERT(pag);
278 ASSERT(atomic_read(&pag->pag_ref) == 0);
278 call_rcu(&pag->rcu_head, __xfs_free_perag); 279 call_rcu(&pag->rcu_head, __xfs_free_perag);
279 } 280 }
280} 281}
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index e0e64b113bd6..9bb6eda4cd21 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -346,8 +346,17 @@ xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid,
346#define xfs_trans_mod_dquot_byino(tp, ip, fields, delta) 346#define xfs_trans_mod_dquot_byino(tp, ip, fields, delta)
347#define xfs_trans_apply_dquot_deltas(tp) 347#define xfs_trans_apply_dquot_deltas(tp)
348#define xfs_trans_unreserve_and_mod_dquots(tp) 348#define xfs_trans_unreserve_and_mod_dquots(tp)
349#define xfs_trans_reserve_quota_nblks(tp, ip, nblks, ninos, flags) (0) 349static inline int xfs_trans_reserve_quota_nblks(struct xfs_trans *tp,
350#define xfs_trans_reserve_quota_bydquots(tp, mp, u, g, nb, ni, fl) (0) 350 struct xfs_inode *ip, long nblks, long ninos, uint flags)
351{
352 return 0;
353}
354static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp,
355 struct xfs_mount *mp, struct xfs_dquot *udqp,
356 struct xfs_dquot *gdqp, long nblks, long nions, uint flags)
357{
358 return 0;
359}
351#define xfs_qm_vop_create_dqattach(tp, ip, u, g) 360#define xfs_qm_vop_create_dqattach(tp, ip, u, g)
352#define xfs_qm_vop_rename_dqattach(it) (0) 361#define xfs_qm_vop_rename_dqattach(it) (0)
353#define xfs_qm_vop_chown(tp, ip, old, new) (NULL) 362#define xfs_qm_vop_chown(tp, ip, old, new) (NULL)
@@ -357,11 +366,14 @@ xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid,
357#define xfs_qm_dqdetach(ip) 366#define xfs_qm_dqdetach(ip)
358#define xfs_qm_dqrele(d) 367#define xfs_qm_dqrele(d)
359#define xfs_qm_statvfs(ip, s) 368#define xfs_qm_statvfs(ip, s)
360#define xfs_qm_sync(mp, fl) (0) 369static inline int xfs_qm_sync(struct xfs_mount *mp, int flags)
370{
371 return 0;
372}
361#define xfs_qm_newmount(mp, a, b) (0) 373#define xfs_qm_newmount(mp, a, b) (0)
362#define xfs_qm_mount_quotas(mp) 374#define xfs_qm_mount_quotas(mp)
363#define xfs_qm_unmount(mp) 375#define xfs_qm_unmount(mp)
364#define xfs_qm_unmount_quotas(mp) (0) 376#define xfs_qm_unmount_quotas(mp)
365#endif /* CONFIG_XFS_QUOTA */ 377#endif /* CONFIG_XFS_QUOTA */
366 378
367#define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \ 379#define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 5afa5b52063e..beafc156a535 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -432,6 +432,10 @@ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
432 * together with the @destroy function, 432 * together with the @destroy function,
433 * enables driver-specific objects derived from a ttm_buffer_object. 433 * enables driver-specific objects derived from a ttm_buffer_object.
434 * On successful return, the object kref and list_kref are set to 1. 434 * On successful return, the object kref and list_kref are set to 1.
435 * If a failure occurs, the function will call the @destroy function, or
436 * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
437 * illegal and will likely cause memory corruption.
438 *
435 * Returns 439 * Returns
436 * -ENOMEM: Out of memory. 440 * -ENOMEM: Out of memory.
437 * -EINVAL: Invalid placement flags. 441 * -EINVAL: Invalid placement flags.
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index d01b4ddbdc56..8e0c848326b6 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -206,14 +206,84 @@ struct ttm_tt {
206struct ttm_mem_type_manager; 206struct ttm_mem_type_manager;
207 207
208struct ttm_mem_type_manager_func { 208struct ttm_mem_type_manager_func {
209 /**
210 * struct ttm_mem_type_manager member init
211 *
212 * @man: Pointer to a memory type manager.
213 * @p_size: Implementation dependent, but typically the size of the
214 * range to be managed in pages.
215 *
216 * Called to initialize a private range manager. The function is
217 * expected to initialize the man::priv member.
218 * Returns 0 on success, negative error code on failure.
219 */
209 int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size); 220 int (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
221
222 /**
223 * struct ttm_mem_type_manager member takedown
224 *
225 * @man: Pointer to a memory type manager.
226 *
227 * Called to undo the setup done in init. All allocated resources
228 * should be freed.
229 */
210 int (*takedown)(struct ttm_mem_type_manager *man); 230 int (*takedown)(struct ttm_mem_type_manager *man);
231
232 /**
233 * struct ttm_mem_type_manager member get_node
234 *
235 * @man: Pointer to a memory type manager.
236 * @bo: Pointer to the buffer object we're allocating space for.
237 * @placement: Placement details.
238 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
239 *
240 * This function should allocate space in the memory type managed
241 * by @man. Placement details if
242 * applicable are given by @placement. If successful,
243 * @mem::mm_node should be set to a non-null value, and
244 * @mem::start should be set to a value identifying the beginning
245 * of the range allocated, and the function should return zero.
246 * If the memory region accomodate the buffer object, @mem::mm_node
247 * should be set to NULL, and the function should return 0.
248 * If a system error occured, preventing the request to be fulfilled,
249 * the function should return a negative error code.
250 *
251 * Note that @mem::mm_node will only be dereferenced by
252 * struct ttm_mem_type_manager functions and optionally by the driver,
253 * which has knowledge of the underlying type.
254 *
255 * This function may not be called from within atomic context, so
256 * an implementation can and must use either a mutex or a spinlock to
257 * protect any data structures managing the space.
258 */
211 int (*get_node)(struct ttm_mem_type_manager *man, 259 int (*get_node)(struct ttm_mem_type_manager *man,
212 struct ttm_buffer_object *bo, 260 struct ttm_buffer_object *bo,
213 struct ttm_placement *placement, 261 struct ttm_placement *placement,
214 struct ttm_mem_reg *mem); 262 struct ttm_mem_reg *mem);
263
264 /**
265 * struct ttm_mem_type_manager member put_node
266 *
267 * @man: Pointer to a memory type manager.
268 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
269 *
270 * This function frees memory type resources previously allocated
271 * and that are identified by @mem::mm_node and @mem::start. May not
272 * be called from within atomic context.
273 */
215 void (*put_node)(struct ttm_mem_type_manager *man, 274 void (*put_node)(struct ttm_mem_type_manager *man,
216 struct ttm_mem_reg *mem); 275 struct ttm_mem_reg *mem);
276
277 /**
278 * struct ttm_mem_type_manager member debug
279 *
280 * @man: Pointer to a memory type manager.
281 * @prefix: Prefix to be used in printout to identify the caller.
282 *
283 * This function is called to print out the state of the memory
284 * type manager to aid debugging of out-of-memory conditions.
285 * It may not be called from within atomic context.
286 */
217 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); 287 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
218}; 288};
219 289
@@ -231,14 +301,13 @@ struct ttm_mem_type_manager {
231 uint64_t size; 301 uint64_t size;
232 uint32_t available_caching; 302 uint32_t available_caching;
233 uint32_t default_caching; 303 uint32_t default_caching;
304 const struct ttm_mem_type_manager_func *func;
305 void *priv;
234 306
235 /* 307 /*
236 * Protected by the bdev->lru_lock. 308 * Protected by the global->lru_lock.
237 * TODO: Consider one lru_lock per ttm_mem_type_manager.
238 * Plays ill with list removal, though.
239 */ 309 */
240 const struct ttm_mem_type_manager_func *func; 310
241 void *priv;
242 struct list_head lru; 311 struct list_head lru;
243}; 312};
244 313
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
new file mode 100644
index 000000000000..96c038e43d66
--- /dev/null
+++ b/include/linux/atomic.h
@@ -0,0 +1,37 @@
1#ifndef _LINUX_ATOMIC_H
2#define _LINUX_ATOMIC_H
3#include <asm/atomic.h>
4
5/**
6 * atomic_inc_not_zero_hint - increment if not null
7 * @v: pointer of type atomic_t
8 * @hint: probable value of the atomic before the increment
9 *
10 * This version of atomic_inc_not_zero() gives a hint of probable
11 * value of the atomic. This helps processor to not read the memory
12 * before doing the atomic read/modify/write cycle, lowering
13 * number of bus transactions on some arches.
14 *
15 * Returns: 0 if increment was not done, 1 otherwise.
16 */
17#ifndef atomic_inc_not_zero_hint
18static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
19{
20 int val, c = hint;
21
22 /* sanity test, should be removed by compiler if hint is a constant */
23 if (!hint)
24 return atomic_inc_not_zero(v);
25
26 do {
27 val = atomic_cmpxchg(v, c, c + 1);
28 if (val == c)
29 return 1;
30 c = val;
31 } while (c);
32
33 return 0;
34}
35#endif
36
37#endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index ba679992d39b..35dcdb3589bc 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -66,10 +66,6 @@
66#define bio_offset(bio) bio_iovec((bio))->bv_offset 66#define bio_offset(bio) bio_iovec((bio))->bv_offset
67#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) 67#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
68#define bio_sectors(bio) ((bio)->bi_size >> 9) 68#define bio_sectors(bio) ((bio)->bi_size >> 9)
69#define bio_empty_barrier(bio) \
70 ((bio->bi_rw & REQ_HARDBARRIER) && \
71 !bio_has_data(bio) && \
72 !(bio->bi_rw & REQ_DISCARD))
73 69
74static inline unsigned int bio_cur_bytes(struct bio *bio) 70static inline unsigned int bio_cur_bytes(struct bio *bio)
75{ 71{
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 0437ab6bb54c..46ad5197537a 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -122,7 +122,6 @@ enum rq_flag_bits {
122 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ 122 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
123 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ 123 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
124 124
125 __REQ_HARDBARRIER, /* may not be passed by drive either */
126 __REQ_SYNC, /* request is sync (sync write or read) */ 125 __REQ_SYNC, /* request is sync (sync write or read) */
127 __REQ_META, /* metadata io request */ 126 __REQ_META, /* metadata io request */
128 __REQ_DISCARD, /* request to discard sectors */ 127 __REQ_DISCARD, /* request to discard sectors */
@@ -159,7 +158,6 @@ enum rq_flag_bits {
159#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) 158#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
160#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) 159#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
161#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) 160#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
162#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
163#define REQ_SYNC (1 << __REQ_SYNC) 161#define REQ_SYNC (1 << __REQ_SYNC)
164#define REQ_META (1 << __REQ_META) 162#define REQ_META (1 << __REQ_META)
165#define REQ_DISCARD (1 << __REQ_DISCARD) 163#define REQ_DISCARD (1 << __REQ_DISCARD)
@@ -168,8 +166,8 @@ enum rq_flag_bits {
168#define REQ_FAILFAST_MASK \ 166#define REQ_FAILFAST_MASK \
169 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 167 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
170#define REQ_COMMON_MASK \ 168#define REQ_COMMON_MASK \
171 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \ 169 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \
172 REQ_META | REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) 170 REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
173#define REQ_CLONE_MASK REQ_COMMON_MASK 171#define REQ_CLONE_MASK REQ_COMMON_MASK
174 172
175#define REQ_UNPLUG (1 << __REQ_UNPLUG) 173#define REQ_UNPLUG (1 << __REQ_UNPLUG)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5027a599077d..aae86fd10c4f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -552,8 +552,7 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync)
552 * it already be started by driver. 552 * it already be started by driver.
553 */ 553 */
554#define RQ_NOMERGE_FLAGS \ 554#define RQ_NOMERGE_FLAGS \
555 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER | \ 555 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
556 REQ_FLUSH | REQ_FUA)
557#define rq_mergeable(rq) \ 556#define rq_mergeable(rq) \
558 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 557 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
559 (((rq)->cmd_flags & REQ_DISCARD) || \ 558 (((rq)->cmd_flags & REQ_DISCARD) || \
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 9b2a0158f399..ef44c7a0638c 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -53,7 +53,7 @@
53 53
54 54
55extern const char *drbd_buildtag(void); 55extern const char *drbd_buildtag(void);
56#define REL_VERSION "8.3.9rc2" 56#define REL_VERSION "8.3.9"
57#define API_VERSION 88 57#define API_VERSION 88
58#define PRO_VERSION_MIN 86 58#define PRO_VERSION_MIN 86
59#define PRO_VERSION_MAX 95 59#define PRO_VERSION_MAX 95
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index e9138198e823..b676c585574e 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -5,6 +5,7 @@
5#include <linux/kernel.h> 5#include <linux/kernel.h>
6#include <linux/mm.h> 6#include <linux/mm.h>
7#include <linux/uaccess.h> 7#include <linux/uaccess.h>
8#include <linux/hardirq.h>
8 9
9#include <asm/cacheflush.h> 10#include <asm/cacheflush.h>
10 11
diff --git a/include/linux/i2c/adp5588.h b/include/linux/i2c/adp5588.h
index 3c5d6b6e765c..cec17cf6cac2 100644
--- a/include/linux/i2c/adp5588.h
+++ b/include/linux/i2c/adp5588.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller 2 * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller
3 * 3 *
4 * Copyright 2009 Analog Devices Inc. 4 * Copyright 2009-2010 Analog Devices Inc.
5 * 5 *
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
@@ -77,13 +77,26 @@
77 /* Configuration Register1 */ 77 /* Configuration Register1 */
78#define ADP5588_AUTO_INC (1 << 7) 78#define ADP5588_AUTO_INC (1 << 7)
79#define ADP5588_GPIEM_CFG (1 << 6) 79#define ADP5588_GPIEM_CFG (1 << 6)
80#define ADP5588_OVR_FLOW_M (1 << 5)
80#define ADP5588_INT_CFG (1 << 4) 81#define ADP5588_INT_CFG (1 << 4)
82#define ADP5588_OVR_FLOW_IEN (1 << 3)
83#define ADP5588_K_LCK_IM (1 << 2)
81#define ADP5588_GPI_IEN (1 << 1) 84#define ADP5588_GPI_IEN (1 << 1)
85#define ADP5588_KE_IEN (1 << 0)
82 86
83/* Interrupt Status Register */ 87/* Interrupt Status Register */
88#define ADP5588_CMP2_INT (1 << 5)
89#define ADP5588_CMP1_INT (1 << 4)
90#define ADP5588_OVR_FLOW_INT (1 << 3)
91#define ADP5588_K_LCK_INT (1 << 2)
84#define ADP5588_GPI_INT (1 << 1) 92#define ADP5588_GPI_INT (1 << 1)
85#define ADP5588_KE_INT (1 << 0) 93#define ADP5588_KE_INT (1 << 0)
86 94
95/* Key Lock and Event Counter Register */
96#define ADP5588_K_LCK_EN (1 << 6)
97#define ADP5588_LCK21 0x30
98#define ADP5588_KEC 0xF
99
87#define ADP5588_MAXGPIO 18 100#define ADP5588_MAXGPIO 18
88#define ADP5588_BANK(offs) ((offs) >> 3) 101#define ADP5588_BANK(offs) ((offs) >> 3)
89#define ADP5588_BIT(offs) (1u << ((offs) & 0x7)) 102#define ADP5588_BIT(offs) (1u << ((offs) & 0x7))
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index c2f3a72712ce..635e1faec412 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -339,6 +339,31 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
339 } 339 }
340} 340}
341 341
342/**
343 * vlan_get_protocol - get protocol EtherType.
344 * @skb: skbuff to query
345 *
346 * Returns the EtherType of the packet, regardless of whether it is
347 * vlan encapsulated (normal or hardware accelerated) or not.
348 */
349static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
350{
351 __be16 protocol = 0;
352
353 if (vlan_tx_tag_present(skb) ||
354 skb->protocol != cpu_to_be16(ETH_P_8021Q))
355 protocol = skb->protocol;
356 else {
357 __be16 proto, *protop;
358 protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr,
359 h_vlan_encapsulated_proto),
360 sizeof(proto), &proto);
361 if (likely(protop))
362 protocol = *protop;
363 }
364
365 return protocol;
366}
342#endif /* __KERNEL__ */ 367#endif /* __KERNEL__ */
343 368
344/* VLAN IOCTLs are found in sockios.h */ 369/* VLAN IOCTLs are found in sockios.h */
diff --git a/include/linux/input.h b/include/linux/input.h
index 51af441f3a21..6ef44465db8d 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -1406,6 +1406,8 @@ static inline void input_set_drvdata(struct input_dev *dev, void *data)
1406int __must_check input_register_device(struct input_dev *); 1406int __must_check input_register_device(struct input_dev *);
1407void input_unregister_device(struct input_dev *); 1407void input_unregister_device(struct input_dev *);
1408 1408
1409void input_reset_device(struct input_dev *);
1410
1409int __must_check input_register_handler(struct input_handler *); 1411int __must_check input_register_handler(struct input_handler *);
1410void input_unregister_handler(struct input_handler *); 1412void input_unregister_handler(struct input_handler *);
1411 1413
@@ -1421,7 +1423,7 @@ void input_release_device(struct input_handle *);
1421int input_open_device(struct input_handle *); 1423int input_open_device(struct input_handle *);
1422void input_close_device(struct input_handle *); 1424void input_close_device(struct input_handle *);
1423 1425
1424int input_flush_device(struct input_handle* handle, struct file* file); 1426int input_flush_device(struct input_handle *handle, struct file *file);
1425 1427
1426void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value); 1428void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value);
1427void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value); 1429void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value);
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 3e70b21884a9..b2eee896dcbc 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -76,7 +76,6 @@ int put_io_context(struct io_context *ioc);
76void exit_io_context(struct task_struct *task); 76void exit_io_context(struct task_struct *task);
77struct io_context *get_io_context(gfp_t gfp_flags, int node); 77struct io_context *get_io_context(gfp_t gfp_flags, int node);
78struct io_context *alloc_io_context(gfp_t gfp_flags, int node); 78struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
79void copy_io_context(struct io_context **pdst, struct io_context **psrc);
80#else 79#else
81static inline void exit_io_context(struct task_struct *task) 80static inline void exit_io_context(struct task_struct *task)
82{ 81{
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 450092c1e35f..fc3da9e4da19 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -60,7 +60,7 @@ extern const char linux_proc_banner[];
60#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) 60#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
61#define roundup(x, y) ( \ 61#define roundup(x, y) ( \
62{ \ 62{ \
63 typeof(y) __y = y; \ 63 const typeof(y) __y = y; \
64 (((x) + (__y - 1)) / __y) * __y; \ 64 (((x) + (__y - 1)) / __y) * __y; \
65} \ 65} \
66) 66)
@@ -293,6 +293,7 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
293 unsigned int interval_msec); 293 unsigned int interval_msec);
294 294
295extern int printk_delay_msec; 295extern int printk_delay_msec;
296extern int dmesg_restrict;
296 297
297/* 298/*
298 * Print a one-time message (analogous to WARN_ONCE() et al): 299 * Print a one-time message (analogous to WARN_ONCE() et al):
diff --git a/include/linux/leds-lp5521.h b/include/linux/leds-lp5521.h
new file mode 100644
index 000000000000..38368d785f08
--- /dev/null
+++ b/include/linux/leds-lp5521.h
@@ -0,0 +1,47 @@
1/*
2 * LP5521 LED chip driver.
3 *
4 * Copyright (C) 2010 Nokia Corporation
5 *
6 * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#ifndef __LINUX_LP5521_H
24#define __LINUX_LP5521_H
25
26/* See Documentation/leds/leds-lp5521.txt */
27
28struct lp5521_led_config {
29 u8 chan_nr;
30 u8 led_current; /* mA x10, 0 if led is not connected */
31 u8 max_current;
32};
33
34#define LP5521_CLOCK_AUTO 0
35#define LP5521_CLOCK_INT 1
36#define LP5521_CLOCK_EXT 2
37
38struct lp5521_platform_data {
39 struct lp5521_led_config *led_config;
40 u8 num_channels;
41 u8 clock_mode;
42 int (*setup_resources)(void);
43 void (*release_resources)(void);
44 void (*enable)(bool state);
45};
46
47#endif /* __LINUX_LP5521_H */
diff --git a/include/linux/leds-lp5523.h b/include/linux/leds-lp5523.h
new file mode 100644
index 000000000000..796747637b80
--- /dev/null
+++ b/include/linux/leds-lp5523.h
@@ -0,0 +1,47 @@
1/*
2 * LP5523 LED Driver
3 *
4 * Copyright (C) 2010 Nokia Corporation
5 *
6 * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#ifndef __LINUX_LP5523_H
24#define __LINUX_LP5523_H
25
26/* See Documentation/leds/leds-lp5523.txt */
27
28struct lp5523_led_config {
29 u8 chan_nr;
30 u8 led_current; /* mA x10, 0 if led is not connected */
31 u8 max_current;
32};
33
34#define LP5523_CLOCK_AUTO 0
35#define LP5523_CLOCK_INT 1
36#define LP5523_CLOCK_EXT 2
37
38struct lp5523_platform_data {
39 struct lp5523_led_config *led_config;
40 u8 num_channels;
41 u8 clock_mode;
42 int (*setup_resources)(void);
43 void (*release_resources)(void);
44 void (*enable)(bool state);
45};
46
47#endif /* __LINUX_LP5523_H */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index ba6986a11663..0f19df9e37b0 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -15,6 +15,7 @@
15#include <linux/list.h> 15#include <linux/list.h>
16#include <linux/spinlock.h> 16#include <linux/spinlock.h>
17#include <linux/rwsem.h> 17#include <linux/rwsem.h>
18#include <linux/timer.h>
18 19
19struct device; 20struct device;
20/* 21/*
@@ -45,10 +46,14 @@ struct led_classdev {
45 /* Get LED brightness level */ 46 /* Get LED brightness level */
46 enum led_brightness (*brightness_get)(struct led_classdev *led_cdev); 47 enum led_brightness (*brightness_get)(struct led_classdev *led_cdev);
47 48
48 /* Activate hardware accelerated blink, delays are in 49 /*
49 * miliseconds and if none is provided then a sensible default 50 * Activate hardware accelerated blink, delays are in milliseconds
50 * should be chosen. The call can adjust the timings if it can't 51 * and if both are zero then a sensible default should be chosen.
51 * match the values specified exactly. */ 52 * The call should adjust the timings in that case and if it can't
53 * match the values specified exactly.
54 * Deactivate blinking again when the brightness is set to a fixed
55 * value via the brightness_set() callback.
56 */
52 int (*blink_set)(struct led_classdev *led_cdev, 57 int (*blink_set)(struct led_classdev *led_cdev,
53 unsigned long *delay_on, 58 unsigned long *delay_on,
54 unsigned long *delay_off); 59 unsigned long *delay_off);
@@ -57,6 +62,10 @@ struct led_classdev {
57 struct list_head node; /* LED Device list */ 62 struct list_head node; /* LED Device list */
58 const char *default_trigger; /* Trigger to use */ 63 const char *default_trigger; /* Trigger to use */
59 64
65 unsigned long blink_delay_on, blink_delay_off;
66 struct timer_list blink_timer;
67 int blink_brightness;
68
60#ifdef CONFIG_LEDS_TRIGGERS 69#ifdef CONFIG_LEDS_TRIGGERS
61 /* Protects the trigger data below */ 70 /* Protects the trigger data below */
62 struct rw_semaphore trigger_lock; 71 struct rw_semaphore trigger_lock;
@@ -73,6 +82,36 @@ extern void led_classdev_unregister(struct led_classdev *led_cdev);
73extern void led_classdev_suspend(struct led_classdev *led_cdev); 82extern void led_classdev_suspend(struct led_classdev *led_cdev);
74extern void led_classdev_resume(struct led_classdev *led_cdev); 83extern void led_classdev_resume(struct led_classdev *led_cdev);
75 84
85/**
86 * led_blink_set - set blinking with software fallback
87 * @led_cdev: the LED to start blinking
88 * @delay_on: the time it should be on (in ms)
89 * @delay_off: the time it should ble off (in ms)
90 *
91 * This function makes the LED blink, attempting to use the
92 * hardware acceleration if possible, but falling back to
93 * software blinking if there is no hardware blinking or if
94 * the LED refuses the passed values.
95 *
96 * Note that if software blinking is active, simply calling
97 * led_cdev->brightness_set() will not stop the blinking,
98 * use led_classdev_brightness_set() instead.
99 */
100extern void led_blink_set(struct led_classdev *led_cdev,
101 unsigned long *delay_on,
102 unsigned long *delay_off);
103/**
104 * led_brightness_set - set LED brightness
105 * @led_cdev: the LED to set
106 * @brightness: the brightness to set it to
107 *
108 * Set an LED's brightness, and, if necessary, cancel the
109 * software blink timer that implements blinking when the
110 * hardware doesn't.
111 */
112extern void led_brightness_set(struct led_classdev *led_cdev,
113 enum led_brightness brightness);
114
76/* 115/*
77 * LED Triggers 116 * LED Triggers
78 */ 117 */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 89341c32631a..03317c8d4077 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -215,7 +215,7 @@ NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sk_buff *skb,
215 int ret; 215 int ret;
216 216
217 if (!cond || 217 if (!cond ||
218 (ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN) == 1)) 218 ((ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN)) == 1))
219 ret = okfn(skb); 219 ret = okfn(skb);
220 return ret; 220 return ret;
221} 221}
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 057bf22a8323..40150f345982 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -747,6 +747,16 @@ struct perf_event {
747 u64 tstamp_running; 747 u64 tstamp_running;
748 u64 tstamp_stopped; 748 u64 tstamp_stopped;
749 749
750 /*
751 * timestamp shadows the actual context timing but it can
752 * be safely used in NMI interrupt context. It reflects the
753 * context time as it was when the event was last scheduled in.
754 *
755 * ctx_time already accounts for ctx->timestamp. Therefore to
756 * compute ctx_time for a sample, simply add perf_clock().
757 */
758 u64 shadow_ctx_time;
759
750 struct perf_event_attr attr; 760 struct perf_event_attr attr;
751 struct hw_perf_event hw; 761 struct hw_perf_event hw;
752 762
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
index 01b3d759f1fc..e031e1a486d9 100644
--- a/include/linux/pwm_backlight.h
+++ b/include/linux/pwm_backlight.h
@@ -8,6 +8,7 @@ struct platform_pwm_backlight_data {
8 int pwm_id; 8 int pwm_id;
9 unsigned int max_brightness; 9 unsigned int max_brightness;
10 unsigned int dft_brightness; 10 unsigned int dft_brightness;
11 unsigned int lth_brightness;
11 unsigned int pwm_period_ns; 12 unsigned int pwm_period_ns;
12 int (*init)(struct device *dev); 13 int (*init)(struct device *dev);
13 int (*notify)(struct device *dev, int brightness); 14 int (*notify)(struct device *dev, int brightness);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index a39cbed9ee17..ab2baa5c4884 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -34,19 +34,13 @@
34 * needed for RCU lookups (because root->height is unreliable). The only 34 * needed for RCU lookups (because root->height is unreliable). The only
35 * time callers need worry about this is when doing a lookup_slot under 35 * time callers need worry about this is when doing a lookup_slot under
36 * RCU. 36 * RCU.
37 *
38 * Indirect pointer in fact is also used to tag the last pointer of a node
39 * when it is shrunk, before we rcu free the node. See shrink code for
40 * details.
37 */ 41 */
38#define RADIX_TREE_INDIRECT_PTR 1 42#define RADIX_TREE_INDIRECT_PTR 1
39#define RADIX_TREE_RETRY ((void *)-1UL)
40
41static inline void *radix_tree_ptr_to_indirect(void *ptr)
42{
43 return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR);
44}
45 43
46static inline void *radix_tree_indirect_to_ptr(void *ptr)
47{
48 return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
49}
50#define radix_tree_indirect_to_ptr(ptr) \ 44#define radix_tree_indirect_to_ptr(ptr) \
51 radix_tree_indirect_to_ptr((void __force *)(ptr)) 45 radix_tree_indirect_to_ptr((void __force *)(ptr))
52 46
@@ -140,16 +134,29 @@ do { \
140 * removed. 134 * removed.
141 * 135 *
142 * For use with radix_tree_lookup_slot(). Caller must hold tree at least read 136 * For use with radix_tree_lookup_slot(). Caller must hold tree at least read
143 * locked across slot lookup and dereference. More likely, will be used with 137 * locked across slot lookup and dereference. Not required if write lock is
144 * radix_tree_replace_slot(), as well, so caller will hold tree write locked. 138 * held (ie. items cannot be concurrently inserted).
139 *
140 * radix_tree_deref_retry must be used to confirm validity of the pointer if
141 * only the read lock is held.
145 */ 142 */
146static inline void *radix_tree_deref_slot(void **pslot) 143static inline void *radix_tree_deref_slot(void **pslot)
147{ 144{
148 void *ret = rcu_dereference(*pslot); 145 return rcu_dereference(*pslot);
149 if (unlikely(radix_tree_is_indirect_ptr(ret)))
150 ret = RADIX_TREE_RETRY;
151 return ret;
152} 146}
147
148/**
149 * radix_tree_deref_retry - check radix_tree_deref_slot
150 * @arg: pointer returned by radix_tree_deref_slot
151 * Returns: 0 if retry is not required, otherwise retry is required
152 *
153 * radix_tree_deref_retry must be used with radix_tree_deref_slot.
154 */
155static inline int radix_tree_deref_retry(void *arg)
156{
157 return unlikely((unsigned long)arg & RADIX_TREE_INDIRECT_PTR);
158}
159
153/** 160/**
154 * radix_tree_replace_slot - replace item in a slot 161 * radix_tree_replace_slot - replace item in a slot
155 * @pslot: pointer to slot, returned by radix_tree_lookup_slot 162 * @pslot: pointer to slot, returned by radix_tree_lookup_slot
diff --git a/include/linux/resource.h b/include/linux/resource.h
index 88d36f9145ba..d01c96c1966e 100644
--- a/include/linux/resource.h
+++ b/include/linux/resource.h
@@ -2,6 +2,7 @@
2#define _LINUX_RESOURCE_H 2#define _LINUX_RESOURCE_H
3 3
4#include <linux/time.h> 4#include <linux/time.h>
5#include <linux/types.h>
5 6
6/* 7/*
7 * Resource control/accounting header file for linux 8 * Resource control/accounting header file for linux
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index bbdb680ffbe9..aea0d438e3c7 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -82,18 +82,28 @@ struct svc_xprt {
82 struct net *xpt_net; 82 struct net *xpt_net;
83}; 83};
84 84
85static inline void register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) 85static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
86{ 86{
87 spin_lock(&xpt->xpt_lock); 87 spin_lock(&xpt->xpt_lock);
88 list_add(&u->list, &xpt->xpt_users); 88 list_del_init(&u->list);
89 spin_unlock(&xpt->xpt_lock); 89 spin_unlock(&xpt->xpt_lock);
90} 90}
91 91
92static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) 92static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
93{ 93{
94 spin_lock(&xpt->xpt_lock); 94 spin_lock(&xpt->xpt_lock);
95 list_del_init(&u->list); 95 if (test_bit(XPT_CLOSE, &xpt->xpt_flags)) {
96 /*
97 * The connection is about to be deleted soon (or,
98 * worse, may already be deleted--in which case we've
99 * already notified the xpt_users).
100 */
101 spin_unlock(&xpt->xpt_lock);
102 return -ENOTCONN;
103 }
104 list_add(&u->list, &xpt->xpt_users);
96 spin_unlock(&xpt->xpt_lock); 105 spin_unlock(&xpt->xpt_lock);
106 return 0;
97} 107}
98 108
99int svc_reg_xprt_class(struct svc_xprt_class *); 109int svc_reg_xprt_class(struct svc_xprt_class *);
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 2a754748dd5f..c7ea9bc8897c 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -50,7 +50,7 @@
50#define N_V253 19 /* Codec control over voice modem */ 50#define N_V253 19 /* Codec control over voice modem */
51#define N_CAIF 20 /* CAIF protocol for talking to modems */ 51#define N_CAIF 20 /* CAIF protocol for talking to modems */
52#define N_GSM0710 21 /* GSM 0710 Mux */ 52#define N_GSM0710 21 /* GSM 0710 Mux */
53#define N_TI_WL 22 /* for TI's WL BT, FM, GPS combo chips */ 53#define N_TI_WL 22 /* for TI's WL BT, FM, GPS combo chips */
54 54
55/* 55/*
56 * This character is the same as _POSIX_VDISABLE: it cannot be used as 56 * This character is the same as _POSIX_VDISABLE: it cannot be used as
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 35fe6ab222bb..24300d8a1bc1 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -797,7 +797,7 @@ struct usbdrv_wrap {
797 * @disconnect: Called when the interface is no longer accessible, usually 797 * @disconnect: Called when the interface is no longer accessible, usually
798 * because its device has been (or is being) disconnected or the 798 * because its device has been (or is being) disconnected or the
799 * driver module is being unloaded. 799 * driver module is being unloaded.
800 * @ioctl: Used for drivers that want to talk to userspace through 800 * @unlocked_ioctl: Used for drivers that want to talk to userspace through
801 * the "usbfs" filesystem. This lets devices provide ways to 801 * the "usbfs" filesystem. This lets devices provide ways to
802 * expose information to user space regardless of where they 802 * expose information to user space regardless of where they
803 * do (or don't) show up otherwise in the filesystem. 803 * do (or don't) show up otherwise in the filesystem.
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index ee2dd1d506ed..2387f9fc8138 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -89,6 +89,8 @@ struct musb_hdrc_config {
89 /* A GPIO controlling VRSEL in Blackfin */ 89 /* A GPIO controlling VRSEL in Blackfin */
90 unsigned int gpio_vrsel; 90 unsigned int gpio_vrsel;
91 unsigned int gpio_vrsel_active; 91 unsigned int gpio_vrsel_active;
92 /* musb CLKIN in Blackfin in MHZ */
93 unsigned char clkin;
92#endif 94#endif
93 95
94}; 96};
diff --git a/include/net/dn.h b/include/net/dn.h
index e5469f7b67a3..a514a3cf4573 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -225,7 +225,7 @@ extern int decnet_di_count;
225extern int decnet_dr_count; 225extern int decnet_dr_count;
226extern int decnet_no_fc_max_cwnd; 226extern int decnet_no_fc_max_cwnd;
227 227
228extern int sysctl_decnet_mem[3]; 228extern long sysctl_decnet_mem[3];
229extern int sysctl_decnet_wmem[3]; 229extern int sysctl_decnet_wmem[3];
230extern int sysctl_decnet_rmem[3]; 230extern int sysctl_decnet_rmem[3];
231 231
diff --git a/include/net/sock.h b/include/net/sock.h
index c7a736228ca2..a6338d039857 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -762,7 +762,7 @@ struct proto {
762 762
763 /* Memory pressure */ 763 /* Memory pressure */
764 void (*enter_memory_pressure)(struct sock *sk); 764 void (*enter_memory_pressure)(struct sock *sk);
765 atomic_t *memory_allocated; /* Current allocated memory. */ 765 atomic_long_t *memory_allocated; /* Current allocated memory. */
766 struct percpu_counter *sockets_allocated; /* Current number of sockets. */ 766 struct percpu_counter *sockets_allocated; /* Current number of sockets. */
767 /* 767 /*
768 * Pressure flag: try to collapse. 768 * Pressure flag: try to collapse.
@@ -771,7 +771,7 @@ struct proto {
771 * is strict, actions are advisory and have some latency. 771 * is strict, actions are advisory and have some latency.
772 */ 772 */
773 int *memory_pressure; 773 int *memory_pressure;
774 int *sysctl_mem; 774 long *sysctl_mem;
775 int *sysctl_wmem; 775 int *sysctl_wmem;
776 int *sysctl_rmem; 776 int *sysctl_rmem;
777 int max_header; 777 int max_header;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 4fee0424af7e..e36c874c7fb1 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -224,7 +224,7 @@ extern int sysctl_tcp_fack;
224extern int sysctl_tcp_reordering; 224extern int sysctl_tcp_reordering;
225extern int sysctl_tcp_ecn; 225extern int sysctl_tcp_ecn;
226extern int sysctl_tcp_dsack; 226extern int sysctl_tcp_dsack;
227extern int sysctl_tcp_mem[3]; 227extern long sysctl_tcp_mem[3];
228extern int sysctl_tcp_wmem[3]; 228extern int sysctl_tcp_wmem[3];
229extern int sysctl_tcp_rmem[3]; 229extern int sysctl_tcp_rmem[3];
230extern int sysctl_tcp_app_win; 230extern int sysctl_tcp_app_win;
@@ -247,7 +247,7 @@ extern int sysctl_tcp_cookie_size;
247extern int sysctl_tcp_thin_linear_timeouts; 247extern int sysctl_tcp_thin_linear_timeouts;
248extern int sysctl_tcp_thin_dupack; 248extern int sysctl_tcp_thin_dupack;
249 249
250extern atomic_t tcp_memory_allocated; 250extern atomic_long_t tcp_memory_allocated;
251extern struct percpu_counter tcp_sockets_allocated; 251extern struct percpu_counter tcp_sockets_allocated;
252extern int tcp_memory_pressure; 252extern int tcp_memory_pressure;
253 253
@@ -280,7 +280,7 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
280 } 280 }
281 281
282 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && 282 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
283 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]) 283 atomic_long_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
284 return true; 284 return true;
285 return false; 285 return false;
286} 286}
diff --git a/include/net/udp.h b/include/net/udp.h
index 200b82848c9a..bb967dd59bf7 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -105,10 +105,10 @@ static inline struct udp_hslot *udp_hashslot2(struct udp_table *table,
105 105
106extern struct proto udp_prot; 106extern struct proto udp_prot;
107 107
108extern atomic_t udp_memory_allocated; 108extern atomic_long_t udp_memory_allocated;
109 109
110/* sysctl variables for udp */ 110/* sysctl variables for udp */
111extern int sysctl_udp_mem[3]; 111extern long sysctl_udp_mem[3];
112extern int sysctl_udp_rmem_min; 112extern int sysctl_udp_rmem_min;
113extern int sysctl_udp_wmem_min; 113extern int sysctl_udp_wmem_min;
114 114
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index 877fb306d415..17110a4a4fc2 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -194,14 +194,7 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
194 194
195 account_global_scheduler_latency(tsk, &lat); 195 account_global_scheduler_latency(tsk, &lat);
196 196
197 /* 197 for (i = 0; i < tsk->latency_record_count; i++) {
198 * short term hack; if we're > 32 we stop; future we recycle:
199 */
200 tsk->latency_record_count++;
201 if (tsk->latency_record_count >= LT_SAVECOUNT)
202 goto out_unlock;
203
204 for (i = 0; i < LT_SAVECOUNT; i++) {
205 struct latency_record *mylat; 198 struct latency_record *mylat;
206 int same = 1; 199 int same = 1;
207 200
@@ -227,8 +220,14 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
227 } 220 }
228 } 221 }
229 222
223 /*
224 * short term hack; if we're > 32 we stop; future we recycle:
225 */
226 if (tsk->latency_record_count >= LT_SAVECOUNT)
227 goto out_unlock;
228
230 /* Allocated a new one: */ 229 /* Allocated a new one: */
231 i = tsk->latency_record_count; 230 i = tsk->latency_record_count++;
232 memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); 231 memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
233 232
234out_unlock: 233out_unlock:
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 517d827f4982..cb6c0d2af68f 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -674,6 +674,8 @@ event_sched_in(struct perf_event *event,
674 674
675 event->tstamp_running += ctx->time - event->tstamp_stopped; 675 event->tstamp_running += ctx->time - event->tstamp_stopped;
676 676
677 event->shadow_ctx_time = ctx->time - ctx->timestamp;
678
677 if (!is_software_event(event)) 679 if (!is_software_event(event))
678 cpuctx->active_oncpu++; 680 cpuctx->active_oncpu++;
679 ctx->nr_active++; 681 ctx->nr_active++;
@@ -3396,7 +3398,8 @@ static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
3396} 3398}
3397 3399
3398static void perf_output_read_one(struct perf_output_handle *handle, 3400static void perf_output_read_one(struct perf_output_handle *handle,
3399 struct perf_event *event) 3401 struct perf_event *event,
3402 u64 enabled, u64 running)
3400{ 3403{
3401 u64 read_format = event->attr.read_format; 3404 u64 read_format = event->attr.read_format;
3402 u64 values[4]; 3405 u64 values[4];
@@ -3404,11 +3407,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
3404 3407
3405 values[n++] = perf_event_count(event); 3408 values[n++] = perf_event_count(event);
3406 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 3409 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3407 values[n++] = event->total_time_enabled + 3410 values[n++] = enabled +
3408 atomic64_read(&event->child_total_time_enabled); 3411 atomic64_read(&event->child_total_time_enabled);
3409 } 3412 }
3410 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 3413 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3411 values[n++] = event->total_time_running + 3414 values[n++] = running +
3412 atomic64_read(&event->child_total_time_running); 3415 atomic64_read(&event->child_total_time_running);
3413 } 3416 }
3414 if (read_format & PERF_FORMAT_ID) 3417 if (read_format & PERF_FORMAT_ID)
@@ -3421,7 +3424,8 @@ static void perf_output_read_one(struct perf_output_handle *handle,
3421 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. 3424 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3422 */ 3425 */
3423static void perf_output_read_group(struct perf_output_handle *handle, 3426static void perf_output_read_group(struct perf_output_handle *handle,
3424 struct perf_event *event) 3427 struct perf_event *event,
3428 u64 enabled, u64 running)
3425{ 3429{
3426 struct perf_event *leader = event->group_leader, *sub; 3430 struct perf_event *leader = event->group_leader, *sub;
3427 u64 read_format = event->attr.read_format; 3431 u64 read_format = event->attr.read_format;
@@ -3431,10 +3435,10 @@ static void perf_output_read_group(struct perf_output_handle *handle,
3431 values[n++] = 1 + leader->nr_siblings; 3435 values[n++] = 1 + leader->nr_siblings;
3432 3436
3433 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 3437 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3434 values[n++] = leader->total_time_enabled; 3438 values[n++] = enabled;
3435 3439
3436 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 3440 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3437 values[n++] = leader->total_time_running; 3441 values[n++] = running;
3438 3442
3439 if (leader != event) 3443 if (leader != event)
3440 leader->pmu->read(leader); 3444 leader->pmu->read(leader);
@@ -3459,13 +3463,35 @@ static void perf_output_read_group(struct perf_output_handle *handle,
3459 } 3463 }
3460} 3464}
3461 3465
3466#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
3467 PERF_FORMAT_TOTAL_TIME_RUNNING)
3468
3462static void perf_output_read(struct perf_output_handle *handle, 3469static void perf_output_read(struct perf_output_handle *handle,
3463 struct perf_event *event) 3470 struct perf_event *event)
3464{ 3471{
3472 u64 enabled = 0, running = 0, now, ctx_time;
3473 u64 read_format = event->attr.read_format;
3474
3475 /*
3476 * compute total_time_enabled, total_time_running
3477 * based on snapshot values taken when the event
3478 * was last scheduled in.
3479 *
3480 * we cannot simply called update_context_time()
3481 * because of locking issue as we are called in
3482 * NMI context
3483 */
3484 if (read_format & PERF_FORMAT_TOTAL_TIMES) {
3485 now = perf_clock();
3486 ctx_time = event->shadow_ctx_time + now;
3487 enabled = ctx_time - event->tstamp_enabled;
3488 running = ctx_time - event->tstamp_running;
3489 }
3490
3465 if (event->attr.read_format & PERF_FORMAT_GROUP) 3491 if (event->attr.read_format & PERF_FORMAT_GROUP)
3466 perf_output_read_group(handle, event); 3492 perf_output_read_group(handle, event, enabled, running);
3467 else 3493 else
3468 perf_output_read_one(handle, event); 3494 perf_output_read_one(handle, event, enabled, running);
3469} 3495}
3470 3496
3471void perf_output_sample(struct perf_output_handle *handle, 3497void perf_output_sample(struct perf_output_handle *handle,
diff --git a/kernel/printk.c b/kernel/printk.c
index b2ebaee8c377..38e7d5868d60 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -261,6 +261,12 @@ static inline void boot_delay_msec(void)
261} 261}
262#endif 262#endif
263 263
264#ifdef CONFIG_SECURITY_DMESG_RESTRICT
265int dmesg_restrict = 1;
266#else
267int dmesg_restrict;
268#endif
269
264int do_syslog(int type, char __user *buf, int len, bool from_file) 270int do_syslog(int type, char __user *buf, int len, bool from_file)
265{ 271{
266 unsigned i, j, limit, count; 272 unsigned i, j, limit, count;
diff --git a/kernel/range.c b/kernel/range.c
index 471b66acabb5..37fa9b99ad58 100644
--- a/kernel/range.c
+++ b/kernel/range.c
@@ -119,7 +119,7 @@ static int cmp_range(const void *x1, const void *x2)
119 119
120int clean_sort_range(struct range *range, int az) 120int clean_sort_range(struct range *range, int az)
121{ 121{
122 int i, j, k = az - 1, nr_range = 0; 122 int i, j, k = az - 1, nr_range = az;
123 123
124 for (i = 0; i < k; i++) { 124 for (i = 0; i < k; i++) {
125 if (range[i].end) 125 if (range[i].end)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c33a1edb799f..b65bf634035e 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -704,6 +704,15 @@ static struct ctl_table kern_table[] = {
704 }, 704 },
705#endif 705#endif
706 { 706 {
707 .procname = "dmesg_restrict",
708 .data = &dmesg_restrict,
709 .maxlen = sizeof(int),
710 .mode = 0644,
711 .proc_handler = proc_dointvec_minmax,
712 .extra1 = &zero,
713 .extra2 = &one,
714 },
715 {
707 .procname = "ngroups_max", 716 .procname = "ngroups_max",
708 .data = &ngroups_max, 717 .data = &ngroups_max,
709 .maxlen = sizeof (int), 718 .maxlen = sizeof (int),
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index bc251ed66724..7b8ec0281548 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -168,7 +168,6 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
168static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), 168static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
169 BLK_TC_ACT(BLK_TC_WRITE) }; 169 BLK_TC_ACT(BLK_TC_WRITE) };
170 170
171#define BLK_TC_HARDBARRIER BLK_TC_BARRIER
172#define BLK_TC_RAHEAD BLK_TC_AHEAD 171#define BLK_TC_RAHEAD BLK_TC_AHEAD
173 172
174/* The ilog2() calls fall out because they're constant */ 173/* The ilog2() calls fall out because they're constant */
@@ -196,7 +195,6 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
196 return; 195 return;
197 196
198 what |= ddir_act[rw & WRITE]; 197 what |= ddir_act[rw & WRITE];
199 what |= MASK_TC_BIT(rw, HARDBARRIER);
200 what |= MASK_TC_BIT(rw, SYNC); 198 what |= MASK_TC_BIT(rw, SYNC);
201 what |= MASK_TC_BIT(rw, RAHEAD); 199 what |= MASK_TC_BIT(rw, RAHEAD);
202 what |= MASK_TC_BIT(rw, META); 200 what |= MASK_TC_BIT(rw, META);
@@ -1807,8 +1805,6 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
1807 1805
1808 if (rw & REQ_RAHEAD) 1806 if (rw & REQ_RAHEAD)
1809 rwbs[i++] = 'A'; 1807 rwbs[i++] = 'A';
1810 if (rw & REQ_HARDBARRIER)
1811 rwbs[i++] = 'B';
1812 if (rw & REQ_SYNC) 1808 if (rw & REQ_SYNC)
1813 rwbs[i++] = 'S'; 1809 rwbs[i++] = 'S';
1814 if (rw & REQ_META) 1810 if (rw & REQ_META)
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 6f412ab4c24f..5086bb962b4d 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -82,6 +82,16 @@ struct radix_tree_preload {
82}; 82};
83static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; 83static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
84 84
85static inline void *ptr_to_indirect(void *ptr)
86{
87 return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR);
88}
89
90static inline void *indirect_to_ptr(void *ptr)
91{
92 return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
93}
94
85static inline gfp_t root_gfp_mask(struct radix_tree_root *root) 95static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
86{ 96{
87 return root->gfp_mask & __GFP_BITS_MASK; 97 return root->gfp_mask & __GFP_BITS_MASK;
@@ -265,7 +275,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
265 return -ENOMEM; 275 return -ENOMEM;
266 276
267 /* Increase the height. */ 277 /* Increase the height. */
268 node->slots[0] = radix_tree_indirect_to_ptr(root->rnode); 278 node->slots[0] = indirect_to_ptr(root->rnode);
269 279
270 /* Propagate the aggregated tag info into the new root */ 280 /* Propagate the aggregated tag info into the new root */
271 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { 281 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
@@ -276,7 +286,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
276 newheight = root->height+1; 286 newheight = root->height+1;
277 node->height = newheight; 287 node->height = newheight;
278 node->count = 1; 288 node->count = 1;
279 node = radix_tree_ptr_to_indirect(node); 289 node = ptr_to_indirect(node);
280 rcu_assign_pointer(root->rnode, node); 290 rcu_assign_pointer(root->rnode, node);
281 root->height = newheight; 291 root->height = newheight;
282 } while (height > root->height); 292 } while (height > root->height);
@@ -309,7 +319,7 @@ int radix_tree_insert(struct radix_tree_root *root,
309 return error; 319 return error;
310 } 320 }
311 321
312 slot = radix_tree_indirect_to_ptr(root->rnode); 322 slot = indirect_to_ptr(root->rnode);
313 323
314 height = root->height; 324 height = root->height;
315 shift = (height-1) * RADIX_TREE_MAP_SHIFT; 325 shift = (height-1) * RADIX_TREE_MAP_SHIFT;
@@ -325,8 +335,7 @@ int radix_tree_insert(struct radix_tree_root *root,
325 rcu_assign_pointer(node->slots[offset], slot); 335 rcu_assign_pointer(node->slots[offset], slot);
326 node->count++; 336 node->count++;
327 } else 337 } else
328 rcu_assign_pointer(root->rnode, 338 rcu_assign_pointer(root->rnode, ptr_to_indirect(slot));
329 radix_tree_ptr_to_indirect(slot));
330 } 339 }
331 340
332 /* Go a level down */ 341 /* Go a level down */
@@ -374,7 +383,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
374 return NULL; 383 return NULL;
375 return is_slot ? (void *)&root->rnode : node; 384 return is_slot ? (void *)&root->rnode : node;
376 } 385 }
377 node = radix_tree_indirect_to_ptr(node); 386 node = indirect_to_ptr(node);
378 387
379 height = node->height; 388 height = node->height;
380 if (index > radix_tree_maxindex(height)) 389 if (index > radix_tree_maxindex(height))
@@ -393,7 +402,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
393 height--; 402 height--;
394 } while (height > 0); 403 } while (height > 0);
395 404
396 return is_slot ? (void *)slot:node; 405 return is_slot ? (void *)slot : indirect_to_ptr(node);
397} 406}
398 407
399/** 408/**
@@ -455,7 +464,7 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
455 height = root->height; 464 height = root->height;
456 BUG_ON(index > radix_tree_maxindex(height)); 465 BUG_ON(index > radix_tree_maxindex(height));
457 466
458 slot = radix_tree_indirect_to_ptr(root->rnode); 467 slot = indirect_to_ptr(root->rnode);
459 shift = (height - 1) * RADIX_TREE_MAP_SHIFT; 468 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
460 469
461 while (height > 0) { 470 while (height > 0) {
@@ -509,7 +518,7 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
509 518
510 shift = (height - 1) * RADIX_TREE_MAP_SHIFT; 519 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
511 pathp->node = NULL; 520 pathp->node = NULL;
512 slot = radix_tree_indirect_to_ptr(root->rnode); 521 slot = indirect_to_ptr(root->rnode);
513 522
514 while (height > 0) { 523 while (height > 0) {
515 int offset; 524 int offset;
@@ -579,7 +588,7 @@ int radix_tree_tag_get(struct radix_tree_root *root,
579 588
580 if (!radix_tree_is_indirect_ptr(node)) 589 if (!radix_tree_is_indirect_ptr(node))
581 return (index == 0); 590 return (index == 0);
582 node = radix_tree_indirect_to_ptr(node); 591 node = indirect_to_ptr(node);
583 592
584 height = node->height; 593 height = node->height;
585 if (index > radix_tree_maxindex(height)) 594 if (index > radix_tree_maxindex(height))
@@ -666,7 +675,7 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
666 } 675 }
667 676
668 shift = (height - 1) * RADIX_TREE_MAP_SHIFT; 677 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
669 slot = radix_tree_indirect_to_ptr(root->rnode); 678 slot = indirect_to_ptr(root->rnode);
670 679
671 /* 680 /*
672 * we fill the path from (root->height - 2) to 0, leaving the index at 681 * we fill the path from (root->height - 2) to 0, leaving the index at
@@ -897,7 +906,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
897 results[0] = node; 906 results[0] = node;
898 return 1; 907 return 1;
899 } 908 }
900 node = radix_tree_indirect_to_ptr(node); 909 node = indirect_to_ptr(node);
901 910
902 max_index = radix_tree_maxindex(node->height); 911 max_index = radix_tree_maxindex(node->height);
903 912
@@ -916,7 +925,8 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
916 slot = *(((void ***)results)[ret + i]); 925 slot = *(((void ***)results)[ret + i]);
917 if (!slot) 926 if (!slot)
918 continue; 927 continue;
919 results[ret + nr_found] = rcu_dereference_raw(slot); 928 results[ret + nr_found] =
929 indirect_to_ptr(rcu_dereference_raw(slot));
920 nr_found++; 930 nr_found++;
921 } 931 }
922 ret += nr_found; 932 ret += nr_found;
@@ -965,7 +975,7 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
965 results[0] = (void **)&root->rnode; 975 results[0] = (void **)&root->rnode;
966 return 1; 976 return 1;
967 } 977 }
968 node = radix_tree_indirect_to_ptr(node); 978 node = indirect_to_ptr(node);
969 979
970 max_index = radix_tree_maxindex(node->height); 980 max_index = radix_tree_maxindex(node->height);
971 981
@@ -1090,7 +1100,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
1090 results[0] = node; 1100 results[0] = node;
1091 return 1; 1101 return 1;
1092 } 1102 }
1093 node = radix_tree_indirect_to_ptr(node); 1103 node = indirect_to_ptr(node);
1094 1104
1095 max_index = radix_tree_maxindex(node->height); 1105 max_index = radix_tree_maxindex(node->height);
1096 1106
@@ -1109,7 +1119,8 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
1109 slot = *(((void ***)results)[ret + i]); 1119 slot = *(((void ***)results)[ret + i]);
1110 if (!slot) 1120 if (!slot)
1111 continue; 1121 continue;
1112 results[ret + nr_found] = rcu_dereference_raw(slot); 1122 results[ret + nr_found] =
1123 indirect_to_ptr(rcu_dereference_raw(slot));
1113 nr_found++; 1124 nr_found++;
1114 } 1125 }
1115 ret += nr_found; 1126 ret += nr_found;
@@ -1159,7 +1170,7 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
1159 results[0] = (void **)&root->rnode; 1170 results[0] = (void **)&root->rnode;
1160 return 1; 1171 return 1;
1161 } 1172 }
1162 node = radix_tree_indirect_to_ptr(node); 1173 node = indirect_to_ptr(node);
1163 1174
1164 max_index = radix_tree_maxindex(node->height); 1175 max_index = radix_tree_maxindex(node->height);
1165 1176
@@ -1195,7 +1206,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
1195 void *newptr; 1206 void *newptr;
1196 1207
1197 BUG_ON(!radix_tree_is_indirect_ptr(to_free)); 1208 BUG_ON(!radix_tree_is_indirect_ptr(to_free));
1198 to_free = radix_tree_indirect_to_ptr(to_free); 1209 to_free = indirect_to_ptr(to_free);
1199 1210
1200 /* 1211 /*
1201 * The candidate node has more than one child, or its child 1212 * The candidate node has more than one child, or its child
@@ -1208,16 +1219,39 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
1208 1219
1209 /* 1220 /*
1210 * We don't need rcu_assign_pointer(), since we are simply 1221 * We don't need rcu_assign_pointer(), since we are simply
1211 * moving the node from one part of the tree to another. If 1222 * moving the node from one part of the tree to another: if it
1212 * it was safe to dereference the old pointer to it 1223 * was safe to dereference the old pointer to it
1213 * (to_free->slots[0]), it will be safe to dereference the new 1224 * (to_free->slots[0]), it will be safe to dereference the new
1214 * one (root->rnode). 1225 * one (root->rnode) as far as dependent read barriers go.
1215 */ 1226 */
1216 newptr = to_free->slots[0]; 1227 newptr = to_free->slots[0];
1217 if (root->height > 1) 1228 if (root->height > 1)
1218 newptr = radix_tree_ptr_to_indirect(newptr); 1229 newptr = ptr_to_indirect(newptr);
1219 root->rnode = newptr; 1230 root->rnode = newptr;
1220 root->height--; 1231 root->height--;
1232
1233 /*
1234 * We have a dilemma here. The node's slot[0] must not be
1235 * NULLed in case there are concurrent lookups expecting to
1236 * find the item. However if this was a bottom-level node,
1237 * then it may be subject to the slot pointer being visible
1238 * to callers dereferencing it. If item corresponding to
1239 * slot[0] is subsequently deleted, these callers would expect
1240 * their slot to become empty sooner or later.
1241 *
1242 * For example, lockless pagecache will look up a slot, deref
1243 * the page pointer, and if the page is 0 refcount it means it
1244 * was concurrently deleted from pagecache so try the deref
1245 * again. Fortunately there is already a requirement for logic
1246 * to retry the entire slot lookup -- the indirect pointer
1247 * problem (replacing direct root node with an indirect pointer
1248 * also results in a stale slot). So tag the slot as indirect
1249 * to force callers to retry.
1250 */
1251 if (root->height == 0)
1252 *((unsigned long *)&to_free->slots[0]) |=
1253 RADIX_TREE_INDIRECT_PTR;
1254
1221 radix_tree_node_free(to_free); 1255 radix_tree_node_free(to_free);
1222 } 1256 }
1223} 1257}
@@ -1254,7 +1288,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
1254 root->rnode = NULL; 1288 root->rnode = NULL;
1255 goto out; 1289 goto out;
1256 } 1290 }
1257 slot = radix_tree_indirect_to_ptr(slot); 1291 slot = indirect_to_ptr(slot);
1258 1292
1259 shift = (height - 1) * RADIX_TREE_MAP_SHIFT; 1293 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
1260 pathp->node = NULL; 1294 pathp->node = NULL;
@@ -1296,8 +1330,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
1296 radix_tree_node_free(to_free); 1330 radix_tree_node_free(to_free);
1297 1331
1298 if (pathp->node->count) { 1332 if (pathp->node->count) {
1299 if (pathp->node == 1333 if (pathp->node == indirect_to_ptr(root->rnode))
1300 radix_tree_indirect_to_ptr(root->rnode))
1301 radix_tree_shrink(root); 1334 radix_tree_shrink(root);
1302 goto out; 1335 goto out;
1303 } 1336 }
diff --git a/mm/filemap.c b/mm/filemap.c
index 61ba5e405791..ea89840fc65f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -644,7 +644,9 @@ repeat:
644 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); 644 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
645 if (pagep) { 645 if (pagep) {
646 page = radix_tree_deref_slot(pagep); 646 page = radix_tree_deref_slot(pagep);
647 if (unlikely(!page || page == RADIX_TREE_RETRY)) 647 if (unlikely(!page))
648 goto out;
649 if (radix_tree_deref_retry(page))
648 goto repeat; 650 goto repeat;
649 651
650 if (!page_cache_get_speculative(page)) 652 if (!page_cache_get_speculative(page))
@@ -660,6 +662,7 @@ repeat:
660 goto repeat; 662 goto repeat;
661 } 663 }
662 } 664 }
665out:
663 rcu_read_unlock(); 666 rcu_read_unlock();
664 667
665 return page; 668 return page;
@@ -777,12 +780,11 @@ repeat:
777 page = radix_tree_deref_slot((void **)pages[i]); 780 page = radix_tree_deref_slot((void **)pages[i]);
778 if (unlikely(!page)) 781 if (unlikely(!page))
779 continue; 782 continue;
780 /* 783 if (radix_tree_deref_retry(page)) {
781 * this can only trigger if nr_found == 1, making livelock 784 if (ret)
782 * a non issue. 785 start = pages[ret-1]->index;
783 */
784 if (unlikely(page == RADIX_TREE_RETRY))
785 goto restart; 786 goto restart;
787 }
786 788
787 if (!page_cache_get_speculative(page)) 789 if (!page_cache_get_speculative(page))
788 goto repeat; 790 goto repeat;
@@ -830,11 +832,7 @@ repeat:
830 page = radix_tree_deref_slot((void **)pages[i]); 832 page = radix_tree_deref_slot((void **)pages[i]);
831 if (unlikely(!page)) 833 if (unlikely(!page))
832 continue; 834 continue;
833 /* 835 if (radix_tree_deref_retry(page))
834 * this can only trigger if nr_found == 1, making livelock
835 * a non issue.
836 */
837 if (unlikely(page == RADIX_TREE_RETRY))
838 goto restart; 836 goto restart;
839 837
840 if (page->mapping == NULL || page->index != index) 838 if (page->mapping == NULL || page->index != index)
@@ -887,11 +885,7 @@ repeat:
887 page = radix_tree_deref_slot((void **)pages[i]); 885 page = radix_tree_deref_slot((void **)pages[i]);
888 if (unlikely(!page)) 886 if (unlikely(!page))
889 continue; 887 continue;
890 /* 888 if (radix_tree_deref_retry(page))
891 * this can only trigger if nr_found == 1, making livelock
892 * a non issue.
893 */
894 if (unlikely(page == RADIX_TREE_RETRY))
895 goto restart; 889 goto restart;
896 890
897 if (!page_cache_get_speculative(page)) 891 if (!page_cache_get_speculative(page))
@@ -1029,6 +1023,9 @@ find_page:
1029 goto page_not_up_to_date; 1023 goto page_not_up_to_date;
1030 if (!trylock_page(page)) 1024 if (!trylock_page(page))
1031 goto page_not_up_to_date; 1025 goto page_not_up_to_date;
1026 /* Did it get truncated before we got the lock? */
1027 if (!page->mapping)
1028 goto page_not_up_to_date_locked;
1032 if (!mapping->a_ops->is_partially_uptodate(page, 1029 if (!mapping->a_ops->is_partially_uptodate(page,
1033 desc, offset)) 1030 desc, offset))
1034 goto page_not_up_to_date_locked; 1031 goto page_not_up_to_date_locked;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9a99cfaf0a19..2efa8ea07ff7 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4208,15 +4208,17 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
4208 4208
4209 memset(mem, 0, size); 4209 memset(mem, 0, size);
4210 mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); 4210 mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4211 if (!mem->stat) { 4211 if (!mem->stat)
4212 if (size < PAGE_SIZE) 4212 goto out_free;
4213 kfree(mem);
4214 else
4215 vfree(mem);
4216 mem = NULL;
4217 }
4218 spin_lock_init(&mem->pcp_counter_lock); 4213 spin_lock_init(&mem->pcp_counter_lock);
4219 return mem; 4214 return mem;
4215
4216out_free:
4217 if (size < PAGE_SIZE)
4218 kfree(mem);
4219 else
4220 vfree(mem);
4221 return NULL;
4220} 4222}
4221 4223
4222/* 4224/*
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 2d1bf7cf8851..4c5133873097 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -211,6 +211,7 @@ success:
211 mmu_notifier_invalidate_range_end(mm, start, end); 211 mmu_notifier_invalidate_range_end(mm, start, end);
212 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); 212 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
213 vm_stat_account(mm, newflags, vma->vm_file, nrpages); 213 vm_stat_account(mm, newflags, vma->vm_file, nrpages);
214 perf_event_mmap(vma);
214 return 0; 215 return 0;
215 216
216fail: 217fail:
@@ -299,7 +300,6 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
299 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); 300 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
300 if (error) 301 if (error)
301 goto out; 302 goto out;
302 perf_event_mmap(vma);
303 nstart = tmp; 303 nstart = tmp;
304 304
305 if (nstart < prev->vm_end) 305 if (nstart < prev->vm_end)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b8a6fdc21312..d31d7ce52c0e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -913,7 +913,7 @@ keep_lumpy:
913 * back off and wait for congestion to clear because further reclaim 913 * back off and wait for congestion to clear because further reclaim
914 * will encounter the same problem 914 * will encounter the same problem
915 */ 915 */
916 if (nr_dirty == nr_congested) 916 if (nr_dirty == nr_congested && nr_dirty != 0)
917 zone_set_flag(zone, ZONE_CONGESTED); 917 zone_set_flag(zone, ZONE_CONGESTED);
918 918
919 free_page_list(&free_pages); 919 free_page_list(&free_pages);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 26eaebf4aaa9..bb86d2932394 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1392,6 +1392,7 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr,
1392 ax25_cb *ax25; 1392 ax25_cb *ax25;
1393 int err = 0; 1393 int err = 0;
1394 1394
1395 memset(fsa, 0, sizeof(fsa));
1395 lock_sock(sk); 1396 lock_sock(sk);
1396 ax25 = ax25_sk(sk); 1397 ax25 = ax25_sk(sk);
1397 1398
@@ -1403,7 +1404,6 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr,
1403 1404
1404 fsa->fsa_ax25.sax25_family = AF_AX25; 1405 fsa->fsa_ax25.sax25_family = AF_AX25;
1405 fsa->fsa_ax25.sax25_call = ax25->dest_addr; 1406 fsa->fsa_ax25.sax25_call = ax25->dest_addr;
1406 fsa->fsa_ax25.sax25_ndigis = 0;
1407 1407
1408 if (ax25->digipeat != NULL) { 1408 if (ax25->digipeat != NULL) {
1409 ndigi = ax25->digipeat->ndigi; 1409 ndigi = ax25->digipeat->ndigi;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index bfef5bae0b3a..84093b0000b9 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1175,6 +1175,12 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff
1175 hci_send_cmd(hdev, 1175 hci_send_cmd(hdev,
1176 HCI_OP_READ_REMOTE_EXT_FEATURES, 1176 HCI_OP_READ_REMOTE_EXT_FEATURES,
1177 sizeof(cp), &cp); 1177 sizeof(cp), &cp);
1178 } else if (!ev->status && conn->out &&
1179 conn->sec_level == BT_SECURITY_HIGH) {
1180 struct hci_cp_auth_requested cp;
1181 cp.handle = ev->handle;
1182 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1183 sizeof(cp), &cp);
1178 } else { 1184 } else {
1179 conn->state = BT_CONNECTED; 1185 conn->state = BT_CONNECTED;
1180 hci_proto_connect_cfm(conn, ev->status); 1186 hci_proto_connect_cfm(conn, ev->status);
diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig
index 98fdfa1fbddd..86a91543172a 100644
--- a/net/bluetooth/hidp/Kconfig
+++ b/net/bluetooth/hidp/Kconfig
@@ -1,6 +1,6 @@
1config BT_HIDP 1config BT_HIDP
2 tristate "HIDP protocol support" 2 tristate "HIDP protocol support"
3 depends on BT && BT_L2CAP && INPUT 3 depends on BT && BT_L2CAP && INPUT && HID_SUPPORT
4 select HID 4 select HID
5 help 5 help
6 HIDP (Human Interface Device Protocol) is a transport layer 6 HIDP (Human Interface Device Protocol) is a transport layer
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index daa7a988d9a6..cd8f6ea03841 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -2421,11 +2421,11 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned
2421 break; 2421 break;
2422 2422
2423 case 2: 2423 case 2:
2424 *val = __le16_to_cpu(*((__le16 *) opt->val)); 2424 *val = get_unaligned_le16(opt->val);
2425 break; 2425 break;
2426 2426
2427 case 4: 2427 case 4:
2428 *val = __le32_to_cpu(*((__le32 *) opt->val)); 2428 *val = get_unaligned_le32(opt->val);
2429 break; 2429 break;
2430 2430
2431 default: 2431 default:
@@ -2452,11 +2452,11 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2452 break; 2452 break;
2453 2453
2454 case 2: 2454 case 2:
2455 *((__le16 *) opt->val) = cpu_to_le16(val); 2455 put_unaligned_le16(val, opt->val);
2456 break; 2456 break;
2457 2457
2458 case 4: 2458 case 4:
2459 *((__le32 *) opt->val) = cpu_to_le32(val); 2459 put_unaligned_le32(val, opt->val);
2460 break; 2460 break;
2461 2461
2462 default: 2462 default:
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 39a5d87e33b4..fa642aa652bd 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -79,7 +79,10 @@ static void rfcomm_make_uih(struct sk_buff *skb, u8 addr);
79 79
80static void rfcomm_process_connect(struct rfcomm_session *s); 80static void rfcomm_process_connect(struct rfcomm_session *s);
81 81
82static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst, int *err); 82static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
83 bdaddr_t *dst,
84 u8 sec_level,
85 int *err);
83static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst); 86static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst);
84static void rfcomm_session_del(struct rfcomm_session *s); 87static void rfcomm_session_del(struct rfcomm_session *s);
85 88
@@ -401,7 +404,7 @@ static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst,
401 404
402 s = rfcomm_session_get(src, dst); 405 s = rfcomm_session_get(src, dst);
403 if (!s) { 406 if (!s) {
404 s = rfcomm_session_create(src, dst, &err); 407 s = rfcomm_session_create(src, dst, d->sec_level, &err);
405 if (!s) 408 if (!s)
406 return err; 409 return err;
407 } 410 }
@@ -679,7 +682,10 @@ static void rfcomm_session_close(struct rfcomm_session *s, int err)
679 rfcomm_session_put(s); 682 rfcomm_session_put(s);
680} 683}
681 684
682static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst, int *err) 685static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
686 bdaddr_t *dst,
687 u8 sec_level,
688 int *err)
683{ 689{
684 struct rfcomm_session *s = NULL; 690 struct rfcomm_session *s = NULL;
685 struct sockaddr_l2 addr; 691 struct sockaddr_l2 addr;
@@ -704,6 +710,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst
704 sk = sock->sk; 710 sk = sock->sk;
705 lock_sock(sk); 711 lock_sock(sk);
706 l2cap_pi(sk)->imtu = l2cap_mtu; 712 l2cap_pi(sk)->imtu = l2cap_mtu;
713 l2cap_pi(sk)->sec_level = sec_level;
707 if (l2cap_ertm) 714 if (l2cap_ertm)
708 l2cap_pi(sk)->mode = L2CAP_MODE_ERTM; 715 l2cap_pi(sk)->mode = L2CAP_MODE_ERTM;
709 release_sock(sk); 716 release_sock(sk);
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 08ffe9e4be20..6faa8256e10c 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -125,7 +125,7 @@ struct bcm_sock {
125 struct list_head tx_ops; 125 struct list_head tx_ops;
126 unsigned long dropped_usr_msgs; 126 unsigned long dropped_usr_msgs;
127 struct proc_dir_entry *bcm_proc_read; 127 struct proc_dir_entry *bcm_proc_read;
128 char procname [9]; /* pointer printed in ASCII with \0 */ 128 char procname [20]; /* pointer printed in ASCII with \0 */
129}; 129};
130 130
131static inline struct bcm_sock *bcm_sk(const struct sock *sk) 131static inline struct bcm_sock *bcm_sk(const struct sock *sk)
diff --git a/net/core/dst.c b/net/core/dst.c
index 8abe628b79f1..b99c7c7ffce2 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -370,6 +370,7 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
370 370
371static struct notifier_block dst_dev_notifier = { 371static struct notifier_block dst_dev_notifier = {
372 .notifier_call = dst_dev_event, 372 .notifier_call = dst_dev_event,
373 .priority = -10, /* must be called after other network notifiers */
373}; 374};
374 375
375void __init dst_init(void) 376void __init dst_init(void)
diff --git a/net/core/filter.c b/net/core/filter.c
index 7beaec36b541..23e9b2a6b4c8 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -112,39 +112,41 @@ EXPORT_SYMBOL(sk_filter);
112 */ 112 */
113unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) 113unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
114{ 114{
115 struct sock_filter *fentry; /* We walk down these */
116 void *ptr; 115 void *ptr;
117 u32 A = 0; /* Accumulator */ 116 u32 A = 0; /* Accumulator */
118 u32 X = 0; /* Index Register */ 117 u32 X = 0; /* Index Register */
119 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ 118 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
119 unsigned long memvalid = 0;
120 u32 tmp; 120 u32 tmp;
121 int k; 121 int k;
122 int pc; 122 int pc;
123 123
124 BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG);
124 /* 125 /*
125 * Process array of filter instructions. 126 * Process array of filter instructions.
126 */ 127 */
127 for (pc = 0; pc < flen; pc++) { 128 for (pc = 0; pc < flen; pc++) {
128 fentry = &filter[pc]; 129 const struct sock_filter *fentry = &filter[pc];
130 u32 f_k = fentry->k;
129 131
130 switch (fentry->code) { 132 switch (fentry->code) {
131 case BPF_S_ALU_ADD_X: 133 case BPF_S_ALU_ADD_X:
132 A += X; 134 A += X;
133 continue; 135 continue;
134 case BPF_S_ALU_ADD_K: 136 case BPF_S_ALU_ADD_K:
135 A += fentry->k; 137 A += f_k;
136 continue; 138 continue;
137 case BPF_S_ALU_SUB_X: 139 case BPF_S_ALU_SUB_X:
138 A -= X; 140 A -= X;
139 continue; 141 continue;
140 case BPF_S_ALU_SUB_K: 142 case BPF_S_ALU_SUB_K:
141 A -= fentry->k; 143 A -= f_k;
142 continue; 144 continue;
143 case BPF_S_ALU_MUL_X: 145 case BPF_S_ALU_MUL_X:
144 A *= X; 146 A *= X;
145 continue; 147 continue;
146 case BPF_S_ALU_MUL_K: 148 case BPF_S_ALU_MUL_K:
147 A *= fentry->k; 149 A *= f_k;
148 continue; 150 continue;
149 case BPF_S_ALU_DIV_X: 151 case BPF_S_ALU_DIV_X:
150 if (X == 0) 152 if (X == 0)
@@ -152,49 +154,49 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
152 A /= X; 154 A /= X;
153 continue; 155 continue;
154 case BPF_S_ALU_DIV_K: 156 case BPF_S_ALU_DIV_K:
155 A /= fentry->k; 157 A /= f_k;
156 continue; 158 continue;
157 case BPF_S_ALU_AND_X: 159 case BPF_S_ALU_AND_X:
158 A &= X; 160 A &= X;
159 continue; 161 continue;
160 case BPF_S_ALU_AND_K: 162 case BPF_S_ALU_AND_K:
161 A &= fentry->k; 163 A &= f_k;
162 continue; 164 continue;
163 case BPF_S_ALU_OR_X: 165 case BPF_S_ALU_OR_X:
164 A |= X; 166 A |= X;
165 continue; 167 continue;
166 case BPF_S_ALU_OR_K: 168 case BPF_S_ALU_OR_K:
167 A |= fentry->k; 169 A |= f_k;
168 continue; 170 continue;
169 case BPF_S_ALU_LSH_X: 171 case BPF_S_ALU_LSH_X:
170 A <<= X; 172 A <<= X;
171 continue; 173 continue;
172 case BPF_S_ALU_LSH_K: 174 case BPF_S_ALU_LSH_K:
173 A <<= fentry->k; 175 A <<= f_k;
174 continue; 176 continue;
175 case BPF_S_ALU_RSH_X: 177 case BPF_S_ALU_RSH_X:
176 A >>= X; 178 A >>= X;
177 continue; 179 continue;
178 case BPF_S_ALU_RSH_K: 180 case BPF_S_ALU_RSH_K:
179 A >>= fentry->k; 181 A >>= f_k;
180 continue; 182 continue;
181 case BPF_S_ALU_NEG: 183 case BPF_S_ALU_NEG:
182 A = -A; 184 A = -A;
183 continue; 185 continue;
184 case BPF_S_JMP_JA: 186 case BPF_S_JMP_JA:
185 pc += fentry->k; 187 pc += f_k;
186 continue; 188 continue;
187 case BPF_S_JMP_JGT_K: 189 case BPF_S_JMP_JGT_K:
188 pc += (A > fentry->k) ? fentry->jt : fentry->jf; 190 pc += (A > f_k) ? fentry->jt : fentry->jf;
189 continue; 191 continue;
190 case BPF_S_JMP_JGE_K: 192 case BPF_S_JMP_JGE_K:
191 pc += (A >= fentry->k) ? fentry->jt : fentry->jf; 193 pc += (A >= f_k) ? fentry->jt : fentry->jf;
192 continue; 194 continue;
193 case BPF_S_JMP_JEQ_K: 195 case BPF_S_JMP_JEQ_K:
194 pc += (A == fentry->k) ? fentry->jt : fentry->jf; 196 pc += (A == f_k) ? fentry->jt : fentry->jf;
195 continue; 197 continue;
196 case BPF_S_JMP_JSET_K: 198 case BPF_S_JMP_JSET_K:
197 pc += (A & fentry->k) ? fentry->jt : fentry->jf; 199 pc += (A & f_k) ? fentry->jt : fentry->jf;
198 continue; 200 continue;
199 case BPF_S_JMP_JGT_X: 201 case BPF_S_JMP_JGT_X:
200 pc += (A > X) ? fentry->jt : fentry->jf; 202 pc += (A > X) ? fentry->jt : fentry->jf;
@@ -209,7 +211,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
209 pc += (A & X) ? fentry->jt : fentry->jf; 211 pc += (A & X) ? fentry->jt : fentry->jf;
210 continue; 212 continue;
211 case BPF_S_LD_W_ABS: 213 case BPF_S_LD_W_ABS:
212 k = fentry->k; 214 k = f_k;
213load_w: 215load_w:
214 ptr = load_pointer(skb, k, 4, &tmp); 216 ptr = load_pointer(skb, k, 4, &tmp);
215 if (ptr != NULL) { 217 if (ptr != NULL) {
@@ -218,7 +220,7 @@ load_w:
218 } 220 }
219 break; 221 break;
220 case BPF_S_LD_H_ABS: 222 case BPF_S_LD_H_ABS:
221 k = fentry->k; 223 k = f_k;
222load_h: 224load_h:
223 ptr = load_pointer(skb, k, 2, &tmp); 225 ptr = load_pointer(skb, k, 2, &tmp);
224 if (ptr != NULL) { 226 if (ptr != NULL) {
@@ -227,7 +229,7 @@ load_h:
227 } 229 }
228 break; 230 break;
229 case BPF_S_LD_B_ABS: 231 case BPF_S_LD_B_ABS:
230 k = fentry->k; 232 k = f_k;
231load_b: 233load_b:
232 ptr = load_pointer(skb, k, 1, &tmp); 234 ptr = load_pointer(skb, k, 1, &tmp);
233 if (ptr != NULL) { 235 if (ptr != NULL) {
@@ -242,32 +244,34 @@ load_b:
242 X = skb->len; 244 X = skb->len;
243 continue; 245 continue;
244 case BPF_S_LD_W_IND: 246 case BPF_S_LD_W_IND:
245 k = X + fentry->k; 247 k = X + f_k;
246 goto load_w; 248 goto load_w;
247 case BPF_S_LD_H_IND: 249 case BPF_S_LD_H_IND:
248 k = X + fentry->k; 250 k = X + f_k;
249 goto load_h; 251 goto load_h;
250 case BPF_S_LD_B_IND: 252 case BPF_S_LD_B_IND:
251 k = X + fentry->k; 253 k = X + f_k;
252 goto load_b; 254 goto load_b;
253 case BPF_S_LDX_B_MSH: 255 case BPF_S_LDX_B_MSH:
254 ptr = load_pointer(skb, fentry->k, 1, &tmp); 256 ptr = load_pointer(skb, f_k, 1, &tmp);
255 if (ptr != NULL) { 257 if (ptr != NULL) {
256 X = (*(u8 *)ptr & 0xf) << 2; 258 X = (*(u8 *)ptr & 0xf) << 2;
257 continue; 259 continue;
258 } 260 }
259 return 0; 261 return 0;
260 case BPF_S_LD_IMM: 262 case BPF_S_LD_IMM:
261 A = fentry->k; 263 A = f_k;
262 continue; 264 continue;
263 case BPF_S_LDX_IMM: 265 case BPF_S_LDX_IMM:
264 X = fentry->k; 266 X = f_k;
265 continue; 267 continue;
266 case BPF_S_LD_MEM: 268 case BPF_S_LD_MEM:
267 A = mem[fentry->k]; 269 A = (memvalid & (1UL << f_k)) ?
270 mem[f_k] : 0;
268 continue; 271 continue;
269 case BPF_S_LDX_MEM: 272 case BPF_S_LDX_MEM:
270 X = mem[fentry->k]; 273 X = (memvalid & (1UL << f_k)) ?
274 mem[f_k] : 0;
271 continue; 275 continue;
272 case BPF_S_MISC_TAX: 276 case BPF_S_MISC_TAX:
273 X = A; 277 X = A;
@@ -276,14 +280,16 @@ load_b:
276 A = X; 280 A = X;
277 continue; 281 continue;
278 case BPF_S_RET_K: 282 case BPF_S_RET_K:
279 return fentry->k; 283 return f_k;
280 case BPF_S_RET_A: 284 case BPF_S_RET_A:
281 return A; 285 return A;
282 case BPF_S_ST: 286 case BPF_S_ST:
283 mem[fentry->k] = A; 287 memvalid |= 1UL << f_k;
288 mem[f_k] = A;
284 continue; 289 continue;
285 case BPF_S_STX: 290 case BPF_S_STX:
286 mem[fentry->k] = X; 291 memvalid |= 1UL << f_k;
292 mem[f_k] = X;
287 continue; 293 continue;
288 default: 294 default:
289 WARN_ON(1); 295 WARN_ON(1);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 8121268ddbdd..841c287ef40a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -347,16 +347,17 @@ static size_t rtnl_link_get_size(const struct net_device *dev)
347 if (!ops) 347 if (!ops)
348 return 0; 348 return 0;
349 349
350 size = nlmsg_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ 350 size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
351 nlmsg_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ 351 nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
352 352
353 if (ops->get_size) 353 if (ops->get_size)
354 /* IFLA_INFO_DATA + nested data */ 354 /* IFLA_INFO_DATA + nested data */
355 size += nlmsg_total_size(sizeof(struct nlattr)) + 355 size += nla_total_size(sizeof(struct nlattr)) +
356 ops->get_size(dev); 356 ops->get_size(dev);
357 357
358 if (ops->get_xstats_size) 358 if (ops->get_xstats_size)
359 size += ops->get_xstats_size(dev); /* IFLA_INFO_XSTATS */ 359 /* IFLA_INFO_XSTATS */
360 size += nla_total_size(ops->get_xstats_size(dev));
360 361
361 return size; 362 return size;
362} 363}
diff --git a/net/core/sock.c b/net/core/sock.c
index 3eed5424e659..fb6080111461 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1653,10 +1653,10 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
1653{ 1653{
1654 struct proto *prot = sk->sk_prot; 1654 struct proto *prot = sk->sk_prot;
1655 int amt = sk_mem_pages(size); 1655 int amt = sk_mem_pages(size);
1656 int allocated; 1656 long allocated;
1657 1657
1658 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 1658 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
1659 allocated = atomic_add_return(amt, prot->memory_allocated); 1659 allocated = atomic_long_add_return(amt, prot->memory_allocated);
1660 1660
1661 /* Under limit. */ 1661 /* Under limit. */
1662 if (allocated <= prot->sysctl_mem[0]) { 1662 if (allocated <= prot->sysctl_mem[0]) {
@@ -1714,7 +1714,7 @@ suppress_allocation:
1714 1714
1715 /* Alas. Undo changes. */ 1715 /* Alas. Undo changes. */
1716 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 1716 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1717 atomic_sub(amt, prot->memory_allocated); 1717 atomic_long_sub(amt, prot->memory_allocated);
1718 return 0; 1718 return 0;
1719} 1719}
1720EXPORT_SYMBOL(__sk_mem_schedule); 1720EXPORT_SYMBOL(__sk_mem_schedule);
@@ -1727,12 +1727,12 @@ void __sk_mem_reclaim(struct sock *sk)
1727{ 1727{
1728 struct proto *prot = sk->sk_prot; 1728 struct proto *prot = sk->sk_prot;
1729 1729
1730 atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 1730 atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
1731 prot->memory_allocated); 1731 prot->memory_allocated);
1732 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; 1732 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
1733 1733
1734 if (prot->memory_pressure && *prot->memory_pressure && 1734 if (prot->memory_pressure && *prot->memory_pressure &&
1735 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0])) 1735 (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0]))
1736 *prot->memory_pressure = 0; 1736 *prot->memory_pressure = 0;
1737} 1737}
1738EXPORT_SYMBOL(__sk_mem_reclaim); 1738EXPORT_SYMBOL(__sk_mem_reclaim);
@@ -2452,12 +2452,12 @@ static char proto_method_implemented(const void *method)
2452 2452
2453static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 2453static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
2454{ 2454{
2455 seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s " 2455 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s "
2456 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 2456 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
2457 proto->name, 2457 proto->name,
2458 proto->obj_size, 2458 proto->obj_size,
2459 sock_prot_inuse_get(seq_file_net(seq), proto), 2459 sock_prot_inuse_get(seq_file_net(seq), proto),
2460 proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1, 2460 proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L,
2461 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI", 2461 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
2462 proto->max_header, 2462 proto->max_header,
2463 proto->slab == NULL ? "no" : "yes", 2463 proto->slab == NULL ? "no" : "yes",
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 18b8a2cbdf77..9ecef9968c39 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -155,7 +155,7 @@ static const struct proto_ops dn_proto_ops;
155static DEFINE_RWLOCK(dn_hash_lock); 155static DEFINE_RWLOCK(dn_hash_lock);
156static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE]; 156static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
157static struct hlist_head dn_wild_sk; 157static struct hlist_head dn_wild_sk;
158static atomic_t decnet_memory_allocated; 158static atomic_long_t decnet_memory_allocated;
159 159
160static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags); 160static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags);
161static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags); 161static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
index be3eb8e23288..28f8b5e5f73b 100644
--- a/net/decnet/sysctl_net_decnet.c
+++ b/net/decnet/sysctl_net_decnet.c
@@ -38,7 +38,7 @@ int decnet_log_martians = 1;
38int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW; 38int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW;
39 39
40/* Reasonable defaults, I hope, based on tcp's defaults */ 40/* Reasonable defaults, I hope, based on tcp's defaults */
41int sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 }; 41long sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 };
42int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 }; 42int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
43int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 }; 43int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
44 44
@@ -324,7 +324,7 @@ static ctl_table dn_table[] = {
324 .data = &sysctl_decnet_mem, 324 .data = &sysctl_decnet_mem,
325 .maxlen = sizeof(sysctl_decnet_mem), 325 .maxlen = sizeof(sysctl_decnet_mem),
326 .mode = 0644, 326 .mode = 0644,
327 .proc_handler = proc_dointvec, 327 .proc_handler = proc_doulongvec_minmax
328 }, 328 },
329 { 329 {
330 .procname = "decnet_rmem", 330 .procname = "decnet_rmem",
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6f49d6c087da..0f0e0f0279b8 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2307,10 +2307,8 @@ void ip_mc_drop_socket(struct sock *sk)
2307 inet->mc_list = iml->next_rcu; 2307 inet->mc_list = iml->next_rcu;
2308 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); 2308 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
2309 (void) ip_mc_leave_src(sk, iml, in_dev); 2309 (void) ip_mc_leave_src(sk, iml, in_dev);
2310 if (in_dev != NULL) { 2310 if (in_dev != NULL)
2311 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); 2311 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
2312 in_dev_put(in_dev);
2313 }
2314 /* decrease mem now to avoid the memleak warning */ 2312 /* decrease mem now to avoid the memleak warning */
2315 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); 2313 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
2316 call_rcu(&iml->rcu, ip_mc_socklist_reclaim); 2314 call_rcu(&iml->rcu, ip_mc_socklist_reclaim);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 4ae1f203f7cb..1b48eb1ed453 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -59,13 +59,13 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
59 local_bh_enable(); 59 local_bh_enable();
60 60
61 socket_seq_show(seq); 61 socket_seq_show(seq);
62 seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n", 62 seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n",
63 sock_prot_inuse_get(net, &tcp_prot), orphans, 63 sock_prot_inuse_get(net, &tcp_prot), orphans,
64 tcp_death_row.tw_count, sockets, 64 tcp_death_row.tw_count, sockets,
65 atomic_read(&tcp_memory_allocated)); 65 atomic_long_read(&tcp_memory_allocated));
66 seq_printf(seq, "UDP: inuse %d mem %d\n", 66 seq_printf(seq, "UDP: inuse %d mem %ld\n",
67 sock_prot_inuse_get(net, &udp_prot), 67 sock_prot_inuse_get(net, &udp_prot),
68 atomic_read(&udp_memory_allocated)); 68 atomic_long_read(&udp_memory_allocated));
69 seq_printf(seq, "UDPLITE: inuse %d\n", 69 seq_printf(seq, "UDPLITE: inuse %d\n",
70 sock_prot_inuse_get(net, &udplite_prot)); 70 sock_prot_inuse_get(net, &udplite_prot));
71 seq_printf(seq, "RAW: inuse %d\n", 71 seq_printf(seq, "RAW: inuse %d\n",
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index d96c1da4b17c..e91911d7aae2 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -398,7 +398,7 @@ static struct ctl_table ipv4_table[] = {
398 .data = &sysctl_tcp_mem, 398 .data = &sysctl_tcp_mem,
399 .maxlen = sizeof(sysctl_tcp_mem), 399 .maxlen = sizeof(sysctl_tcp_mem),
400 .mode = 0644, 400 .mode = 0644,
401 .proc_handler = proc_dointvec 401 .proc_handler = proc_doulongvec_minmax
402 }, 402 },
403 { 403 {
404 .procname = "tcp_wmem", 404 .procname = "tcp_wmem",
@@ -602,8 +602,7 @@ static struct ctl_table ipv4_table[] = {
602 .data = &sysctl_udp_mem, 602 .data = &sysctl_udp_mem,
603 .maxlen = sizeof(sysctl_udp_mem), 603 .maxlen = sizeof(sysctl_udp_mem),
604 .mode = 0644, 604 .mode = 0644,
605 .proc_handler = proc_dointvec_minmax, 605 .proc_handler = proc_doulongvec_minmax,
606 .extra1 = &zero
607 }, 606 },
608 { 607 {
609 .procname = "udp_rmem_min", 608 .procname = "udp_rmem_min",
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5f738c5c0dc4..2bb46d55f40c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -282,7 +282,7 @@ int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
282struct percpu_counter tcp_orphan_count; 282struct percpu_counter tcp_orphan_count;
283EXPORT_SYMBOL_GPL(tcp_orphan_count); 283EXPORT_SYMBOL_GPL(tcp_orphan_count);
284 284
285int sysctl_tcp_mem[3] __read_mostly; 285long sysctl_tcp_mem[3] __read_mostly;
286int sysctl_tcp_wmem[3] __read_mostly; 286int sysctl_tcp_wmem[3] __read_mostly;
287int sysctl_tcp_rmem[3] __read_mostly; 287int sysctl_tcp_rmem[3] __read_mostly;
288 288
@@ -290,7 +290,7 @@ EXPORT_SYMBOL(sysctl_tcp_mem);
290EXPORT_SYMBOL(sysctl_tcp_rmem); 290EXPORT_SYMBOL(sysctl_tcp_rmem);
291EXPORT_SYMBOL(sysctl_tcp_wmem); 291EXPORT_SYMBOL(sysctl_tcp_wmem);
292 292
293atomic_t tcp_memory_allocated; /* Current allocated memory. */ 293atomic_long_t tcp_memory_allocated; /* Current allocated memory. */
294EXPORT_SYMBOL(tcp_memory_allocated); 294EXPORT_SYMBOL(tcp_memory_allocated);
295 295
296/* 296/*
@@ -2244,7 +2244,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2244 /* Values greater than interface MTU won't take effect. However 2244 /* Values greater than interface MTU won't take effect. However
2245 * at the point when this call is done we typically don't yet 2245 * at the point when this call is done we typically don't yet
2246 * know which interface is going to be used */ 2246 * know which interface is going to be used */
2247 if (val < 8 || val > MAX_TCP_WINDOW) { 2247 if (val < 64 || val > MAX_TCP_WINDOW) {
2248 err = -EINVAL; 2248 err = -EINVAL;
2249 break; 2249 break;
2250 } 2250 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3357f69e353d..6d8ab1c4efc3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -259,8 +259,11 @@ static void tcp_fixup_sndbuf(struct sock *sk)
259 int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 + 259 int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 +
260 sizeof(struct sk_buff); 260 sizeof(struct sk_buff);
261 261
262 if (sk->sk_sndbuf < 3 * sndmem) 262 if (sk->sk_sndbuf < 3 * sndmem) {
263 sk->sk_sndbuf = min(3 * sndmem, sysctl_tcp_wmem[2]); 263 sk->sk_sndbuf = 3 * sndmem;
264 if (sk->sk_sndbuf > sysctl_tcp_wmem[2])
265 sk->sk_sndbuf = sysctl_tcp_wmem[2];
266 }
264} 267}
265 268
266/* 2. Tuning advertised window (window_clamp, rcv_ssthresh) 269/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -396,7 +399,7 @@ static void tcp_clamp_window(struct sock *sk)
396 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && 399 if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
397 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && 400 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
398 !tcp_memory_pressure && 401 !tcp_memory_pressure &&
399 atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) { 402 atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
400 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), 403 sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
401 sysctl_tcp_rmem[2]); 404 sysctl_tcp_rmem[2]);
402 } 405 }
@@ -4861,7 +4864,7 @@ static int tcp_should_expand_sndbuf(struct sock *sk)
4861 return 0; 4864 return 0;
4862 4865
4863 /* If we are under soft global TCP memory pressure, do not expand. */ 4866 /* If we are under soft global TCP memory pressure, do not expand. */
4864 if (atomic_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0]) 4867 if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
4865 return 0; 4868 return 0;
4866 4869
4867 /* If we filled the congestion window, do not expand. */ 4870 /* If we filled the congestion window, do not expand. */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 8f8527d41682..69ccbc1dde9c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -415,6 +415,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
415 !icsk->icsk_backoff) 415 !icsk->icsk_backoff)
416 break; 416 break;
417 417
418 if (sock_owned_by_user(sk))
419 break;
420
418 icsk->icsk_backoff--; 421 icsk->icsk_backoff--;
419 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) << 422 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) <<
420 icsk->icsk_backoff; 423 icsk->icsk_backoff;
@@ -429,11 +432,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
429 if (remaining) { 432 if (remaining) {
430 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 433 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
431 remaining, TCP_RTO_MAX); 434 remaining, TCP_RTO_MAX);
432 } else if (sock_owned_by_user(sk)) {
433 /* RTO revert clocked out retransmission,
434 * but socket is locked. Will defer. */
435 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
436 HZ/20, TCP_RTO_MAX);
437 } else { 435 } else {
438 /* RTO revert clocked out retransmission. 436 /* RTO revert clocked out retransmission.
439 * Will retransmit now */ 437 * Will retransmit now */
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 28cb2d733a3c..5e0a3a582a59 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -110,7 +110,7 @@
110struct udp_table udp_table __read_mostly; 110struct udp_table udp_table __read_mostly;
111EXPORT_SYMBOL(udp_table); 111EXPORT_SYMBOL(udp_table);
112 112
113int sysctl_udp_mem[3] __read_mostly; 113long sysctl_udp_mem[3] __read_mostly;
114EXPORT_SYMBOL(sysctl_udp_mem); 114EXPORT_SYMBOL(sysctl_udp_mem);
115 115
116int sysctl_udp_rmem_min __read_mostly; 116int sysctl_udp_rmem_min __read_mostly;
@@ -119,7 +119,7 @@ EXPORT_SYMBOL(sysctl_udp_rmem_min);
119int sysctl_udp_wmem_min __read_mostly; 119int sysctl_udp_wmem_min __read_mostly;
120EXPORT_SYMBOL(sysctl_udp_wmem_min); 120EXPORT_SYMBOL(sysctl_udp_wmem_min);
121 121
122atomic_t udp_memory_allocated; 122atomic_long_t udp_memory_allocated;
123EXPORT_SYMBOL(udp_memory_allocated); 123EXPORT_SYMBOL(udp_memory_allocated);
124 124
125#define MAX_UDP_PORTS 65536 125#define MAX_UDP_PORTS 65536
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e048ec62d109..b41ce0f0d514 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2740,10 +2740,6 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2740 /* Flag it for later restoration when link comes up */ 2740 /* Flag it for later restoration when link comes up */
2741 ifa->flags |= IFA_F_TENTATIVE; 2741 ifa->flags |= IFA_F_TENTATIVE;
2742 ifa->state = INET6_IFADDR_STATE_DAD; 2742 ifa->state = INET6_IFADDR_STATE_DAD;
2743
2744 write_unlock_bh(&idev->lock);
2745
2746 in6_ifa_hold(ifa);
2747 } else { 2743 } else {
2748 list_del(&ifa->if_list); 2744 list_del(&ifa->if_list);
2749 2745
@@ -2758,19 +2754,15 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2758 ifa->state = INET6_IFADDR_STATE_DEAD; 2754 ifa->state = INET6_IFADDR_STATE_DEAD;
2759 spin_unlock_bh(&ifa->state_lock); 2755 spin_unlock_bh(&ifa->state_lock);
2760 2756
2761 if (state == INET6_IFADDR_STATE_DEAD) 2757 if (state == INET6_IFADDR_STATE_DEAD) {
2762 goto put_ifa; 2758 in6_ifa_put(ifa);
2759 } else {
2760 __ipv6_ifa_notify(RTM_DELADDR, ifa);
2761 atomic_notifier_call_chain(&inet6addr_chain,
2762 NETDEV_DOWN, ifa);
2763 }
2764 write_lock_bh(&idev->lock);
2763 } 2765 }
2764
2765 __ipv6_ifa_notify(RTM_DELADDR, ifa);
2766 if (ifa->state == INET6_IFADDR_STATE_DEAD)
2767 atomic_notifier_call_chain(&inet6addr_chain,
2768 NETDEV_DOWN, ifa);
2769
2770put_ifa:
2771 in6_ifa_put(ifa);
2772
2773 write_lock_bh(&idev->lock);
2774 } 2766 }
2775 2767
2776 list_splice(&keep_list, &idev->addr_list); 2768 list_splice(&keep_list, &idev->addr_list);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 3a3f129a44cb..79d43aa8fa8d 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -286,7 +286,7 @@ found:
286 286
287 /* Check for overlap with preceding fragment. */ 287 /* Check for overlap with preceding fragment. */
288 if (prev && 288 if (prev &&
289 (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset > 0) 289 (NFCT_FRAG6_CB(prev)->offset + prev->len) > offset)
290 goto discard_fq; 290 goto discard_fq;
291 291
292 /* Look for overlap with succeeding segment. */ 292 /* Look for overlap with succeeding segment. */
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index fc328339be99..96455ffb76fb 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1945,8 +1945,12 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1945 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); 1945 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
1946 struct neighbour *neigh; 1946 struct neighbour *neigh;
1947 1947
1948 if (rt == NULL) 1948 if (rt == NULL) {
1949 if (net_ratelimit())
1950 pr_warning("IPv6: Maximum number of routes reached,"
1951 " consider increasing route/max_size.\n");
1949 return ERR_PTR(-ENOMEM); 1952 return ERR_PTR(-ENOMEM);
1953 }
1950 1954
1951 dev_hold(net->loopback_dev); 1955 dev_hold(net->loopback_dev);
1952 in6_dev_hold(idev); 1956 in6_dev_hold(idev);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index f9163b12c7f1..7aa85591dbe7 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -391,6 +391,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
391 u32 hw_reconf_flags = 0; 391 u32 hw_reconf_flags = 0;
392 int i; 392 int i;
393 393
394 if (local->scan_sdata == sdata)
395 ieee80211_scan_cancel(local);
396
394 clear_bit(SDATA_STATE_RUNNING, &sdata->state); 397 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
395 398
396 /* 399 /*
@@ -523,9 +526,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
523 synchronize_rcu(); 526 synchronize_rcu();
524 skb_queue_purge(&sdata->skb_queue); 527 skb_queue_purge(&sdata->skb_queue);
525 528
526 if (local->scan_sdata == sdata)
527 ieee80211_scan_cancel(local);
528
529 /* 529 /*
530 * Disable beaconing here for mesh only, AP and IBSS 530 * Disable beaconing here for mesh only, AP and IBSS
531 * are already taken care of. 531 * are already taken care of.
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 3616f27b9d46..8298e676f5a0 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1610,9 +1610,11 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1610 1610
1611 err = -EINVAL; 1611 err = -EINVAL;
1612 vnet_hdr_len = sizeof(vnet_hdr); 1612 vnet_hdr_len = sizeof(vnet_hdr);
1613 if ((len -= vnet_hdr_len) < 0) 1613 if (len < vnet_hdr_len)
1614 goto out_free; 1614 goto out_free;
1615 1615
1616 len -= vnet_hdr_len;
1617
1616 if (skb_is_gso(skb)) { 1618 if (skb_is_gso(skb)) {
1617 struct skb_shared_info *sinfo = skb_shinfo(skb); 1619 struct skb_shared_info *sinfo = skb_shinfo(skb);
1618 1620
@@ -1719,7 +1721,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1719 rcu_read_lock(); 1721 rcu_read_lock();
1720 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); 1722 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
1721 if (dev) 1723 if (dev)
1722 strlcpy(uaddr->sa_data, dev->name, 15); 1724 strncpy(uaddr->sa_data, dev->name, 14);
1723 else 1725 else
1724 memset(uaddr->sa_data, 0, 14); 1726 memset(uaddr->sa_data, 0, 14);
1725 rcu_read_unlock(); 1727 rcu_read_unlock();
@@ -1742,6 +1744,7 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1742 sll->sll_family = AF_PACKET; 1744 sll->sll_family = AF_PACKET;
1743 sll->sll_ifindex = po->ifindex; 1745 sll->sll_ifindex = po->ifindex;
1744 sll->sll_protocol = po->num; 1746 sll->sll_protocol = po->num;
1747 sll->sll_pkttype = 0;
1745 rcu_read_lock(); 1748 rcu_read_lock();
1746 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); 1749 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
1747 if (dev) { 1750 if (dev) {
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 1ef29c74d85e..e58f9476f29c 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -92,7 +92,7 @@ static struct sctp_af *sctp_af_v6_specific;
92struct kmem_cache *sctp_chunk_cachep __read_mostly; 92struct kmem_cache *sctp_chunk_cachep __read_mostly;
93struct kmem_cache *sctp_bucket_cachep __read_mostly; 93struct kmem_cache *sctp_bucket_cachep __read_mostly;
94 94
95int sysctl_sctp_mem[3]; 95long sysctl_sctp_mem[3];
96int sysctl_sctp_rmem[3]; 96int sysctl_sctp_rmem[3];
97int sysctl_sctp_wmem[3]; 97int sysctl_sctp_wmem[3];
98 98
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index e34ca9cc1167..6bd554323a34 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -111,12 +111,12 @@ static void sctp_sock_migrate(struct sock *, struct sock *,
111static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; 111static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG;
112 112
113extern struct kmem_cache *sctp_bucket_cachep; 113extern struct kmem_cache *sctp_bucket_cachep;
114extern int sysctl_sctp_mem[3]; 114extern long sysctl_sctp_mem[3];
115extern int sysctl_sctp_rmem[3]; 115extern int sysctl_sctp_rmem[3];
116extern int sysctl_sctp_wmem[3]; 116extern int sysctl_sctp_wmem[3];
117 117
118static int sctp_memory_pressure; 118static int sctp_memory_pressure;
119static atomic_t sctp_memory_allocated; 119static atomic_long_t sctp_memory_allocated;
120struct percpu_counter sctp_sockets_allocated; 120struct percpu_counter sctp_sockets_allocated;
121 121
122static void sctp_enter_memory_pressure(struct sock *sk) 122static void sctp_enter_memory_pressure(struct sock *sk)
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 832590bbe0c0..50cb57f0919e 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -54,7 +54,7 @@ static int sack_timer_max = 500;
54static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ 54static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
55static int rwnd_scale_max = 16; 55static int rwnd_scale_max = 16;
56 56
57extern int sysctl_sctp_mem[3]; 57extern long sysctl_sctp_mem[3];
58extern int sysctl_sctp_rmem[3]; 58extern int sysctl_sctp_rmem[3];
59extern int sysctl_sctp_wmem[3]; 59extern int sysctl_sctp_wmem[3];
60 60
@@ -203,7 +203,7 @@ static ctl_table sctp_table[] = {
203 .data = &sysctl_sctp_mem, 203 .data = &sysctl_sctp_mem,
204 .maxlen = sizeof(sysctl_sctp_mem), 204 .maxlen = sizeof(sysctl_sctp_mem),
205 .mode = 0644, 205 .mode = 0644,
206 .proc_handler = proc_dointvec, 206 .proc_handler = proc_doulongvec_minmax
207 }, 207 },
208 { 208 {
209 .procname = "sctp_rmem", 209 .procname = "sctp_rmem",
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 33217fc3d697..e9f0d5004483 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -396,6 +396,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
396 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 396 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
397 struct tipc_sock *tsock = tipc_sk(sock->sk); 397 struct tipc_sock *tsock = tipc_sk(sock->sk);
398 398
399 memset(addr, 0, sizeof(*addr));
399 if (peer) { 400 if (peer) {
400 if ((sock->state != SS_CONNECTED) && 401 if ((sock->state != SS_CONNECTED) &&
401 ((peer != 2) || (sock->state != SS_DISCONNECTING))) 402 ((peer != 2) || (sock->state != SS_DISCONNECTING)))
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index c506241f8637..4e78e3f26798 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -224,8 +224,8 @@ static int nl80211_prepare_netdev_dump(struct sk_buff *skb,
224 } 224 }
225 225
226 *rdev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx); 226 *rdev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx);
227 if (IS_ERR(dev)) { 227 if (IS_ERR(*rdev)) {
228 err = PTR_ERR(dev); 228 err = PTR_ERR(*rdev);
229 goto out_rtnl; 229 goto out_rtnl;
230 } 230 }
231 231
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 3a8c4c419cd4..55187c8f6420 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -61,6 +61,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
61 while (len > 0) { 61 while (len > 0) {
62 switch (*p & X25_FAC_CLASS_MASK) { 62 switch (*p & X25_FAC_CLASS_MASK) {
63 case X25_FAC_CLASS_A: 63 case X25_FAC_CLASS_A:
64 if (len < 2)
65 return 0;
64 switch (*p) { 66 switch (*p) {
65 case X25_FAC_REVERSE: 67 case X25_FAC_REVERSE:
66 if((p[1] & 0x81) == 0x81) { 68 if((p[1] & 0x81) == 0x81) {
@@ -104,6 +106,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
104 len -= 2; 106 len -= 2;
105 break; 107 break;
106 case X25_FAC_CLASS_B: 108 case X25_FAC_CLASS_B:
109 if (len < 3)
110 return 0;
107 switch (*p) { 111 switch (*p) {
108 case X25_FAC_PACKET_SIZE: 112 case X25_FAC_PACKET_SIZE:
109 facilities->pacsize_in = p[1]; 113 facilities->pacsize_in = p[1];
@@ -125,6 +129,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
125 len -= 3; 129 len -= 3;
126 break; 130 break;
127 case X25_FAC_CLASS_C: 131 case X25_FAC_CLASS_C:
132 if (len < 4)
133 return 0;
128 printk(KERN_DEBUG "X.25: unknown facility %02X, " 134 printk(KERN_DEBUG "X.25: unknown facility %02X, "
129 "values %02X, %02X, %02X\n", 135 "values %02X, %02X, %02X\n",
130 p[0], p[1], p[2], p[3]); 136 p[0], p[1], p[2], p[3]);
@@ -132,6 +138,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
132 len -= 4; 138 len -= 4;
133 break; 139 break;
134 case X25_FAC_CLASS_D: 140 case X25_FAC_CLASS_D:
141 if (len < p[1] + 2)
142 return 0;
135 switch (*p) { 143 switch (*p) {
136 case X25_FAC_CALLING_AE: 144 case X25_FAC_CALLING_AE:
137 if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) 145 if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
@@ -149,9 +157,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
149 break; 157 break;
150 default: 158 default:
151 printk(KERN_DEBUG "X.25: unknown facility %02X," 159 printk(KERN_DEBUG "X.25: unknown facility %02X,"
152 "length %d, values %02X, %02X, " 160 "length %d\n", p[0], p[1]);
153 "%02X, %02X\n",
154 p[0], p[1], p[2], p[3], p[4], p[5]);
155 break; 161 break;
156 } 162 }
157 len -= p[1] + 2; 163 len -= p[1] + 2;
diff --git a/security/Kconfig b/security/Kconfig
index bd72ae623494..e80da955e687 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -39,6 +39,18 @@ config KEYS_DEBUG_PROC_KEYS
39 39
40 If you are unsure as to whether this is required, answer N. 40 If you are unsure as to whether this is required, answer N.
41 41
42config SECURITY_DMESG_RESTRICT
43 bool "Restrict unprivileged access to the kernel syslog"
44 default n
45 help
46 This enforces restrictions on unprivileged users reading the kernel
47 syslog via dmesg(8).
48
49 If this option is not selected, no restrictions will be enforced
50 unless the dmesg_restrict sysctl is explicitly set to (1).
51
52 If you are unsure how to answer this question, answer N.
53
42config SECURITY 54config SECURITY
43 bool "Enable different security models" 55 bool "Enable different security models"
44 depends on SYSFS 56 depends on SYSFS
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index cf1de4462ccd..b7106f192b75 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -922,7 +922,7 @@ static int __init apparmor_init(void)
922 error = register_security(&apparmor_ops); 922 error = register_security(&apparmor_ops);
923 if (error) { 923 if (error) {
924 AA_ERROR("Unable to register AppArmor\n"); 924 AA_ERROR("Unable to register AppArmor\n");
925 goto register_security_out; 925 goto set_init_cxt_out;
926 } 926 }
927 927
928 /* Report that AppArmor successfully initialized */ 928 /* Report that AppArmor successfully initialized */
@@ -936,6 +936,9 @@ static int __init apparmor_init(void)
936 936
937 return error; 937 return error;
938 938
939set_init_cxt_out:
940 aa_free_task_context(current->real_cred->security);
941
939register_security_out: 942register_security_out:
940 aa_free_root_ns(); 943 aa_free_root_ns();
941 944
@@ -944,7 +947,6 @@ alloc_out:
944 947
945 apparmor_enabled = 0; 948 apparmor_enabled = 0;
946 return error; 949 return error;
947
948} 950}
949 951
950security_initcall(apparmor_init); 952security_initcall(apparmor_init);
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 52cc865f1464..4f0eadee78b8 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -306,7 +306,7 @@ static struct aa_namespace *alloc_namespace(const char *prefix,
306 return ns; 306 return ns;
307 307
308fail_unconfined: 308fail_unconfined:
309 kzfree(ns->base.name); 309 kzfree(ns->base.hname);
310fail_ns: 310fail_ns:
311 kzfree(ns); 311 kzfree(ns);
312 return NULL; 312 return NULL;
diff --git a/security/commoncap.c b/security/commoncap.c
index 5e632b4857e4..04b80f9912bf 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -895,6 +895,8 @@ int cap_syslog(int type, bool from_file)
895{ 895{
896 if (type != SYSLOG_ACTION_OPEN && from_file) 896 if (type != SYSLOG_ACTION_OPEN && from_file)
897 return 0; 897 return 0;
898 if (dmesg_restrict && !capable(CAP_SYS_ADMIN))
899 return -EPERM;
898 if ((type != SYSLOG_ACTION_READ_ALL && 900 if ((type != SYSLOG_ACTION_READ_ALL &&
899 type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN)) 901 type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN))
900 return -EPERM; 902 return -EPERM;
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 122ec9dc4853..26aff6bf9e50 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -8,7 +8,11 @@ perf-trace - Read perf.data (created by perf record) and display trace output
8SYNOPSIS 8SYNOPSIS
9-------- 9--------
10[verse] 10[verse]
11'perf trace' {record <script> | report <script> [args] } 11'perf trace' [<options>]
12'perf trace' [<options>] record <script> [<record-options>] <command>
13'perf trace' [<options>] report <script> [script-args]
14'perf trace' [<options>] <script> <required-script-args> [<record-options>] <command>
15'perf trace' [<options>] <top-script> [script-args]
12 16
13DESCRIPTION 17DESCRIPTION
14----------- 18-----------
@@ -24,23 +28,53 @@ There are several variants of perf trace:
24 available via 'perf trace -l'). The following variants allow you to 28 available via 'perf trace -l'). The following variants allow you to
25 record and run those scripts: 29 record and run those scripts:
26 30
27 'perf trace record <script>' to record the events required for 'perf 31 'perf trace record <script> <command>' to record the events required
28 trace report'. <script> is the name displayed in the output of 32 for 'perf trace report'. <script> is the name displayed in the
29 'perf trace --list' i.e. the actual script name minus any language 33 output of 'perf trace --list' i.e. the actual script name minus any
30 extension. 34 language extension. If <command> is not specified, the events are
35 recorded using the -a (system-wide) 'perf record' option.
31 36
32 'perf trace report <script>' to run and display the results of 37 'perf trace report <script> [args]' to run and display the results
33 <script>. <script> is the name displayed in the output of 'perf 38 of <script>. <script> is the name displayed in the output of 'perf
34 trace --list' i.e. the actual script name minus any language 39 trace --list' i.e. the actual script name minus any language
35 extension. The perf.data output from a previous run of 'perf trace 40 extension. The perf.data output from a previous run of 'perf trace
36 record <script>' is used and should be present for this command to 41 record <script>' is used and should be present for this command to
37 succeed. 42 succeed. [args] refers to the (mainly optional) args expected by
43 the script.
44
45 'perf trace <script> <required-script-args> <command>' to both
46 record the events required for <script> and to run the <script>
47 using 'live-mode' i.e. without writing anything to disk. <script>
48 is the name displayed in the output of 'perf trace --list' i.e. the
49 actual script name minus any language extension. If <command> is
50 not specified, the events are recorded using the -a (system-wide)
51 'perf record' option. If <script> has any required args, they
52 should be specified before <command>. This mode doesn't allow for
53 optional script args to be specified; if optional script args are
54 desired, they can be specified using separate 'perf trace record'
55 and 'perf trace report' commands, with the stdout of the record step
56 piped to the stdin of the report script, using the '-o -' and '-i -'
57 options of the corresponding commands.
58
59 'perf trace <top-script>' to both record the events required for
60 <top-script> and to run the <top-script> using 'live-mode'
61 i.e. without writing anything to disk. <top-script> is the name
62 displayed in the output of 'perf trace --list' i.e. the actual
63 script name minus any language extension; a <top-script> is defined
64 as any script name ending with the string 'top'.
65
66 [<record-options>] can be passed to the record steps of 'perf trace
67 record' and 'live-mode' variants; this isn't possible however for
68 <top-script> 'live-mode' or 'perf trace report' variants.
38 69
39 See the 'SEE ALSO' section for links to language-specific 70 See the 'SEE ALSO' section for links to language-specific
40 information on how to write and run your own trace scripts. 71 information on how to write and run your own trace scripts.
41 72
42OPTIONS 73OPTIONS
43------- 74-------
75<command>...::
76 Any command you can specify in a shell.
77
44-D:: 78-D::
45--dump-raw-trace=:: 79--dump-raw-trace=::
46 Display verbose dump of the trace data. 80 Display verbose dump of the trace data.
@@ -64,6 +98,13 @@ OPTIONS
64 Generate perf-trace.[ext] starter script for given language, 98 Generate perf-trace.[ext] starter script for given language,
65 using current perf.data. 99 using current perf.data.
66 100
101-a::
102 Force system-wide collection. Scripts run without a <command>
103 normally use -a by default, while scripts run with a <command>
104 normally don't - this option allows the latter to be run in
105 system-wide mode.
106
107
67SEE ALSO 108SEE ALSO
68-------- 109--------
69linkperf:perf-record[1], linkperf:perf-trace-perl[1], 110linkperf:perf-record[1], linkperf:perf-trace-perl[1],
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 4e75583ddd6d..93bd2ff001fb 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -790,7 +790,7 @@ static const char * const record_usage[] = {
790 790
791static bool force, append_file; 791static bool force, append_file;
792 792
793static const struct option options[] = { 793const struct option record_options[] = {
794 OPT_CALLBACK('e', "event", NULL, "event", 794 OPT_CALLBACK('e', "event", NULL, "event",
795 "event selector. use 'perf list' to list available events", 795 "event selector. use 'perf list' to list available events",
796 parse_events), 796 parse_events),
@@ -839,16 +839,16 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
839{ 839{
840 int i, j, err = -ENOMEM; 840 int i, j, err = -ENOMEM;
841 841
842 argc = parse_options(argc, argv, options, record_usage, 842 argc = parse_options(argc, argv, record_options, record_usage,
843 PARSE_OPT_STOP_AT_NON_OPTION); 843 PARSE_OPT_STOP_AT_NON_OPTION);
844 if (!argc && target_pid == -1 && target_tid == -1 && 844 if (!argc && target_pid == -1 && target_tid == -1 &&
845 !system_wide && !cpu_list) 845 !system_wide && !cpu_list)
846 usage_with_options(record_usage, options); 846 usage_with_options(record_usage, record_options);
847 847
848 if (force && append_file) { 848 if (force && append_file) {
849 fprintf(stderr, "Can't overwrite and append at the same time." 849 fprintf(stderr, "Can't overwrite and append at the same time."
850 " You need to choose between -f and -A"); 850 " You need to choose between -f and -A");
851 usage_with_options(record_usage, options); 851 usage_with_options(record_usage, record_options);
852 } else if (append_file) { 852 } else if (append_file) {
853 write_mode = WRITE_APPEND; 853 write_mode = WRITE_APPEND;
854 } else { 854 } else {
@@ -871,7 +871,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
871 if (thread_num <= 0) { 871 if (thread_num <= 0) {
872 fprintf(stderr, "Can't find all threads of pid %d\n", 872 fprintf(stderr, "Can't find all threads of pid %d\n",
873 target_pid); 873 target_pid);
874 usage_with_options(record_usage, options); 874 usage_with_options(record_usage, record_options);
875 } 875 }
876 } else { 876 } else {
877 all_tids=malloc(sizeof(pid_t)); 877 all_tids=malloc(sizeof(pid_t));
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index b513e40974f4..dd625808c2a5 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -69,7 +69,6 @@ static int target_tid = -1;
69static pid_t *all_tids = NULL; 69static pid_t *all_tids = NULL;
70static int thread_num = 0; 70static int thread_num = 0;
71static bool inherit = false; 71static bool inherit = false;
72static int profile_cpu = -1;
73static int nr_cpus = 0; 72static int nr_cpus = 0;
74static int realtime_prio = 0; 73static int realtime_prio = 0;
75static bool group = false; 74static bool group = false;
@@ -558,13 +557,13 @@ static void print_sym_table(void)
558 else 557 else
559 printf(" (all"); 558 printf(" (all");
560 559
561 if (profile_cpu != -1) 560 if (cpu_list)
562 printf(", cpu: %d)\n", profile_cpu); 561 printf(", CPU%s: %s)\n", nr_cpus > 1 ? "s" : "", cpu_list);
563 else { 562 else {
564 if (target_tid != -1) 563 if (target_tid != -1)
565 printf(")\n"); 564 printf(")\n");
566 else 565 else
567 printf(", %d CPUs)\n", nr_cpus); 566 printf(", %d CPU%s)\n", nr_cpus, nr_cpus > 1 ? "s" : "");
568 } 567 }
569 568
570 printf("%-*.*s\n", win_width, win_width, graph_dotted_line); 569 printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
@@ -1187,11 +1186,10 @@ int group_fd;
1187static void start_counter(int i, int counter) 1186static void start_counter(int i, int counter)
1188{ 1187{
1189 struct perf_event_attr *attr; 1188 struct perf_event_attr *attr;
1190 int cpu; 1189 int cpu = -1;
1191 int thread_index; 1190 int thread_index;
1192 1191
1193 cpu = profile_cpu; 1192 if (target_tid == -1)
1194 if (target_tid == -1 && profile_cpu == -1)
1195 cpu = cpumap[i]; 1193 cpu = cpumap[i];
1196 1194
1197 attr = attrs + counter; 1195 attr = attrs + counter;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 2f8df45c4dcb..86cfe3800e6b 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -10,6 +10,7 @@
10#include "util/symbol.h" 10#include "util/symbol.h"
11#include "util/thread.h" 11#include "util/thread.h"
12#include "util/trace-event.h" 12#include "util/trace-event.h"
13#include "util/parse-options.h"
13#include "util/util.h" 14#include "util/util.h"
14 15
15static char const *script_name; 16static char const *script_name;
@@ -17,6 +18,7 @@ static char const *generate_script_lang;
17static bool debug_mode; 18static bool debug_mode;
18static u64 last_timestamp; 19static u64 last_timestamp;
19static u64 nr_unordered; 20static u64 nr_unordered;
21extern const struct option record_options[];
20 22
21static int default_start_script(const char *script __unused, 23static int default_start_script(const char *script __unused,
22 int argc __unused, 24 int argc __unused,
@@ -328,7 +330,7 @@ static struct script_desc *script_desc__new(const char *name)
328{ 330{
329 struct script_desc *s = zalloc(sizeof(*s)); 331 struct script_desc *s = zalloc(sizeof(*s));
330 332
331 if (s != NULL) 333 if (s != NULL && name)
332 s->name = strdup(name); 334 s->name = strdup(name);
333 335
334 return s; 336 return s;
@@ -337,6 +339,8 @@ static struct script_desc *script_desc__new(const char *name)
337static void script_desc__delete(struct script_desc *s) 339static void script_desc__delete(struct script_desc *s)
338{ 340{
339 free(s->name); 341 free(s->name);
342 free(s->half_liner);
343 free(s->args);
340 free(s); 344 free(s);
341} 345}
342 346
@@ -537,8 +541,40 @@ static char *get_script_path(const char *script_root, const char *suffix)
537 return path; 541 return path;
538} 542}
539 543
544static bool is_top_script(const char *script_path)
545{
546 return ends_with((char *)script_path, "top") == NULL ? false : true;
547}
548
549static int has_required_arg(char *script_path)
550{
551 struct script_desc *desc;
552 int n_args = 0;
553 char *p;
554
555 desc = script_desc__new(NULL);
556
557 if (read_script_info(desc, script_path))
558 goto out;
559
560 if (!desc->args)
561 goto out;
562
563 for (p = desc->args; *p; p++)
564 if (*p == '<')
565 n_args++;
566out:
567 script_desc__delete(desc);
568
569 return n_args;
570}
571
540static const char * const trace_usage[] = { 572static const char * const trace_usage[] = {
541 "perf trace [<options>] <command>", 573 "perf trace [<options>]",
574 "perf trace [<options>] record <script> [<record-options>] <command>",
575 "perf trace [<options>] report <script> [script-args]",
576 "perf trace [<options>] <script> [<record-options>] <command>",
577 "perf trace [<options>] <top-script> [script-args]",
542 NULL 578 NULL
543}; 579};
544 580
@@ -564,50 +600,81 @@ static const struct option options[] = {
564 OPT_END() 600 OPT_END()
565}; 601};
566 602
603static bool have_cmd(int argc, const char **argv)
604{
605 char **__argv = malloc(sizeof(const char *) * argc);
606
607 if (!__argv)
608 die("malloc");
609 memcpy(__argv, argv, sizeof(const char *) * argc);
610 argc = parse_options(argc, (const char **)__argv, record_options,
611 NULL, PARSE_OPT_STOP_AT_NON_OPTION);
612 free(__argv);
613
614 return argc != 0;
615}
616
567int cmd_trace(int argc, const char **argv, const char *prefix __used) 617int cmd_trace(int argc, const char **argv, const char *prefix __used)
568{ 618{
619 char *rec_script_path = NULL;
620 char *rep_script_path = NULL;
569 struct perf_session *session; 621 struct perf_session *session;
570 const char *suffix = NULL; 622 char *script_path = NULL;
571 const char **__argv; 623 const char **__argv;
572 char *script_path; 624 bool system_wide;
573 int i, err; 625 int i, j, err;
574 626
575 if (argc >= 2 && strncmp(argv[1], "rec", strlen("rec")) == 0) { 627 setup_scripting();
576 if (argc < 3) { 628
577 fprintf(stderr, 629 argc = parse_options(argc, argv, options, trace_usage,
578 "Please specify a record script\n"); 630 PARSE_OPT_STOP_AT_NON_OPTION);
579 return -1; 631
580 } 632 if (argc > 1 && !strncmp(argv[0], "rec", strlen("rec"))) {
581 suffix = RECORD_SUFFIX; 633 rec_script_path = get_script_path(argv[1], RECORD_SUFFIX);
634 if (!rec_script_path)
635 return cmd_record(argc, argv, NULL);
582 } 636 }
583 637
584 if (argc >= 2 && strncmp(argv[1], "rep", strlen("rep")) == 0) { 638 if (argc > 1 && !strncmp(argv[0], "rep", strlen("rep"))) {
585 if (argc < 3) { 639 rep_script_path = get_script_path(argv[1], REPORT_SUFFIX);
640 if (!rep_script_path) {
586 fprintf(stderr, 641 fprintf(stderr,
587 "Please specify a report script\n"); 642 "Please specify a valid report script"
643 "(see 'perf trace -l' for listing)\n");
588 return -1; 644 return -1;
589 } 645 }
590 suffix = REPORT_SUFFIX;
591 } 646 }
592 647
593 /* make sure PERF_EXEC_PATH is set for scripts */ 648 /* make sure PERF_EXEC_PATH is set for scripts */
594 perf_set_argv_exec_path(perf_exec_path()); 649 perf_set_argv_exec_path(perf_exec_path());
595 650
596 if (!suffix && argc >= 2 && strncmp(argv[1], "-", strlen("-")) != 0) { 651 if (argc && !script_name && !rec_script_path && !rep_script_path) {
597 char *record_script_path, *report_script_path;
598 int live_pipe[2]; 652 int live_pipe[2];
653 int rep_args;
599 pid_t pid; 654 pid_t pid;
600 655
601 record_script_path = get_script_path(argv[1], RECORD_SUFFIX); 656 rec_script_path = get_script_path(argv[0], RECORD_SUFFIX);
602 if (!record_script_path) { 657 rep_script_path = get_script_path(argv[0], REPORT_SUFFIX);
603 fprintf(stderr, "record script not found\n"); 658
604 return -1; 659 if (!rec_script_path && !rep_script_path) {
660 fprintf(stderr, " Couldn't find script %s\n\n See perf"
661 " trace -l for available scripts.\n", argv[0]);
662 usage_with_options(trace_usage, options);
605 } 663 }
606 664
607 report_script_path = get_script_path(argv[1], REPORT_SUFFIX); 665 if (is_top_script(argv[0])) {
608 if (!report_script_path) { 666 rep_args = argc - 1;
609 fprintf(stderr, "report script not found\n"); 667 } else {
610 return -1; 668 int rec_args;
669
670 rep_args = has_required_arg(rep_script_path);
671 rec_args = (argc - 1) - rep_args;
672 if (rec_args < 0) {
673 fprintf(stderr, " %s script requires options."
674 "\n\n See perf trace -l for available "
675 "scripts and options.\n", argv[0]);
676 usage_with_options(trace_usage, options);
677 }
611 } 678 }
612 679
613 if (pipe(live_pipe) < 0) { 680 if (pipe(live_pipe) < 0) {
@@ -622,60 +689,84 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
622 } 689 }
623 690
624 if (!pid) { 691 if (!pid) {
692 system_wide = true;
693 j = 0;
694
625 dup2(live_pipe[1], 1); 695 dup2(live_pipe[1], 1);
626 close(live_pipe[0]); 696 close(live_pipe[0]);
627 697
628 __argv = malloc(6 * sizeof(const char *)); 698 if (!is_top_script(argv[0]))
629 __argv[0] = "/bin/sh"; 699 system_wide = !have_cmd(argc - rep_args,
630 __argv[1] = record_script_path; 700 &argv[rep_args]);
631 __argv[2] = "-q"; 701
632 __argv[3] = "-o"; 702 __argv = malloc((argc + 6) * sizeof(const char *));
633 __argv[4] = "-"; 703 if (!__argv)
634 __argv[5] = NULL; 704 die("malloc");
705
706 __argv[j++] = "/bin/sh";
707 __argv[j++] = rec_script_path;
708 if (system_wide)
709 __argv[j++] = "-a";
710 __argv[j++] = "-q";
711 __argv[j++] = "-o";
712 __argv[j++] = "-";
713 for (i = rep_args + 1; i < argc; i++)
714 __argv[j++] = argv[i];
715 __argv[j++] = NULL;
635 716
636 execvp("/bin/sh", (char **)__argv); 717 execvp("/bin/sh", (char **)__argv);
718 free(__argv);
637 exit(-1); 719 exit(-1);
638 } 720 }
639 721
640 dup2(live_pipe[0], 0); 722 dup2(live_pipe[0], 0);
641 close(live_pipe[1]); 723 close(live_pipe[1]);
642 724
643 __argv = malloc((argc + 3) * sizeof(const char *)); 725 __argv = malloc((argc + 4) * sizeof(const char *));
644 __argv[0] = "/bin/sh"; 726 if (!__argv)
645 __argv[1] = report_script_path; 727 die("malloc");
646 for (i = 2; i < argc; i++) 728 j = 0;
647 __argv[i] = argv[i]; 729 __argv[j++] = "/bin/sh";
648 __argv[i++] = "-i"; 730 __argv[j++] = rep_script_path;
649 __argv[i++] = "-"; 731 for (i = 1; i < rep_args + 1; i++)
650 __argv[i++] = NULL; 732 __argv[j++] = argv[i];
733 __argv[j++] = "-i";
734 __argv[j++] = "-";
735 __argv[j++] = NULL;
651 736
652 execvp("/bin/sh", (char **)__argv); 737 execvp("/bin/sh", (char **)__argv);
738 free(__argv);
653 exit(-1); 739 exit(-1);
654 } 740 }
655 741
656 if (suffix) { 742 if (rec_script_path)
657 script_path = get_script_path(argv[2], suffix); 743 script_path = rec_script_path;
658 if (!script_path) { 744 if (rep_script_path)
659 fprintf(stderr, "script not found\n"); 745 script_path = rep_script_path;
660 return -1; 746
661 } 747 if (script_path) {
662 748 system_wide = false;
663 __argv = malloc((argc + 1) * sizeof(const char *)); 749 j = 0;
664 __argv[0] = "/bin/sh"; 750
665 __argv[1] = script_path; 751 if (rec_script_path)
666 for (i = 3; i < argc; i++) 752 system_wide = !have_cmd(argc - 1, &argv[1]);
667 __argv[i - 1] = argv[i]; 753
668 __argv[argc - 1] = NULL; 754 __argv = malloc((argc + 2) * sizeof(const char *));
755 if (!__argv)
756 die("malloc");
757 __argv[j++] = "/bin/sh";
758 __argv[j++] = script_path;
759 if (system_wide)
760 __argv[j++] = "-a";
761 for (i = 2; i < argc; i++)
762 __argv[j++] = argv[i];
763 __argv[j++] = NULL;
669 764
670 execvp("/bin/sh", (char **)__argv); 765 execvp("/bin/sh", (char **)__argv);
766 free(__argv);
671 exit(-1); 767 exit(-1);
672 } 768 }
673 769
674 setup_scripting();
675
676 argc = parse_options(argc, argv, options, trace_usage,
677 PARSE_OPT_STOP_AT_NON_OPTION);
678
679 if (symbol__init() < 0) 770 if (symbol__init() < 0)
680 return -1; 771 return -1;
681 if (!script_name) 772 if (!script_name)
diff --git a/tools/perf/scripts/perl/bin/failed-syscalls-record b/tools/perf/scripts/perl/bin/failed-syscalls-record
index eb5846bcb565..8104895a7b67 100644
--- a/tools/perf/scripts/perl/bin/failed-syscalls-record
+++ b/tools/perf/scripts/perl/bin/failed-syscalls-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e raw_syscalls:sys_exit $@ 2perf record -e raw_syscalls:sys_exit $@
diff --git a/tools/perf/scripts/perl/bin/rw-by-file-record b/tools/perf/scripts/perl/bin/rw-by-file-record
index 5bfaae5a6cba..33efc8673aae 100644
--- a/tools/perf/scripts/perl/bin/rw-by-file-record
+++ b/tools/perf/scripts/perl/bin/rw-by-file-record
@@ -1,3 +1,3 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e syscalls:sys_enter_read -e syscalls:sys_enter_write $@ 2perf record -e syscalls:sys_enter_read -e syscalls:sys_enter_write $@
3 3
diff --git a/tools/perf/scripts/perl/bin/rw-by-pid-record b/tools/perf/scripts/perl/bin/rw-by-pid-record
index 6e0b2f7755ac..7cb9db230448 100644
--- a/tools/perf/scripts/perl/bin/rw-by-pid-record
+++ b/tools/perf/scripts/perl/bin/rw-by-pid-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@ 2perf record -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
diff --git a/tools/perf/scripts/perl/bin/rwtop-record b/tools/perf/scripts/perl/bin/rwtop-record
index 6e0b2f7755ac..7cb9db230448 100644
--- a/tools/perf/scripts/perl/bin/rwtop-record
+++ b/tools/perf/scripts/perl/bin/rwtop-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@ 2perf record -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
diff --git a/tools/perf/scripts/perl/bin/wakeup-latency-record b/tools/perf/scripts/perl/bin/wakeup-latency-record
index 9f2acaaae9f0..464251a1bd7e 100644
--- a/tools/perf/scripts/perl/bin/wakeup-latency-record
+++ b/tools/perf/scripts/perl/bin/wakeup-latency-record
@@ -1,5 +1,5 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e sched:sched_switch -e sched:sched_wakeup $@ 2perf record -e sched:sched_switch -e sched:sched_wakeup $@
3 3
4 4
5 5
diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-record b/tools/perf/scripts/perl/bin/workqueue-stats-record
index 85301f2471ff..8edda9078d5d 100644
--- a/tools/perf/scripts/perl/bin/workqueue-stats-record
+++ b/tools/perf/scripts/perl/bin/workqueue-stats-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion $@ 2perf record -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion $@
diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
index eb5846bcb565..8104895a7b67 100644
--- a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
+++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e raw_syscalls:sys_exit $@ 2perf record -e raw_syscalls:sys_exit $@
diff --git a/tools/perf/scripts/python/bin/futex-contention-record b/tools/perf/scripts/python/bin/futex-contention-record
index 5ecbb433caf4..b1495c9a9b20 100644
--- a/tools/perf/scripts/python/bin/futex-contention-record
+++ b/tools/perf/scripts/python/bin/futex-contention-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e syscalls:sys_enter_futex -e syscalls:sys_exit_futex $@ 2perf record -e syscalls:sys_enter_futex -e syscalls:sys_exit_futex $@
diff --git a/tools/perf/scripts/python/bin/netdev-times-record b/tools/perf/scripts/python/bin/netdev-times-record
index d931a828126b..558754b840a9 100644
--- a/tools/perf/scripts/python/bin/netdev-times-record
+++ b/tools/perf/scripts/python/bin/netdev-times-record
@@ -1,5 +1,5 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e net:net_dev_xmit -e net:net_dev_queue \ 2perf record -e net:net_dev_xmit -e net:net_dev_queue \
3 -e net:netif_receive_skb -e net:netif_rx \ 3 -e net:netif_receive_skb -e net:netif_rx \
4 -e skb:consume_skb -e skb:kfree_skb \ 4 -e skb:consume_skb -e skb:kfree_skb \
5 -e skb:skb_copy_datagram_iovec -e napi:napi_poll \ 5 -e skb:skb_copy_datagram_iovec -e napi:napi_poll \
diff --git a/tools/perf/scripts/python/bin/sched-migration-record b/tools/perf/scripts/python/bin/sched-migration-record
index 17a3e9bd9e8f..7493fddbe995 100644
--- a/tools/perf/scripts/python/bin/sched-migration-record
+++ b/tools/perf/scripts/python/bin/sched-migration-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -m 16384 -a -e sched:sched_wakeup -e sched:sched_wakeup_new -e sched:sched_switch -e sched:sched_migrate_task $@ 2perf record -m 16384 -e sched:sched_wakeup -e sched:sched_wakeup_new -e sched:sched_switch -e sched:sched_migrate_task $@
diff --git a/tools/perf/scripts/python/bin/sctop-record b/tools/perf/scripts/python/bin/sctop-record
index 1fc5998b721d..4efbfaa7f6a5 100644
--- a/tools/perf/scripts/python/bin/sctop-record
+++ b/tools/perf/scripts/python/bin/sctop-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e raw_syscalls:sys_enter $@ 2perf record -e raw_syscalls:sys_enter $@
diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-record b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
index 1fc5998b721d..4efbfaa7f6a5 100644
--- a/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
+++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e raw_syscalls:sys_enter $@ 2perf record -e raw_syscalls:sys_enter $@
diff --git a/tools/perf/scripts/python/bin/syscall-counts-record b/tools/perf/scripts/python/bin/syscall-counts-record
index 1fc5998b721d..4efbfaa7f6a5 100644
--- a/tools/perf/scripts/python/bin/syscall-counts-record
+++ b/tools/perf/scripts/python/bin/syscall-counts-record
@@ -1,2 +1,2 @@
1#!/bin/bash 1#!/bin/bash
2perf record -a -e raw_syscalls:sys_enter $@ 2perf record -e raw_syscalls:sys_enter $@
diff --git a/tools/perf/util/ui/util.c b/tools/perf/util/ui/util.c
index 9706d9d40279..056c69521a38 100644
--- a/tools/perf/util/ui/util.c
+++ b/tools/perf/util/ui/util.c
@@ -104,9 +104,10 @@ out_destroy_form:
104 return rc; 104 return rc;
105} 105}
106 106
107static const char yes[] = "Yes", no[] = "No";
108
107bool ui__dialog_yesno(const char *msg) 109bool ui__dialog_yesno(const char *msg)
108{ 110{
109 /* newtWinChoice should really be accepting const char pointers... */ 111 /* newtWinChoice should really be accepting const char pointers... */
110 char yes[] = "Yes", no[] = "No"; 112 return newtWinChoice(NULL, (char *)yes, (char *)no, (char *)msg) == 1;
111 return newtWinChoice(NULL, yes, no, (char *)msg) == 1;
112} 113}