aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/media-entities.tmpl1
-rw-r--r--Documentation/DocBook/v4l/media-ioc-setup-link.xml2
-rw-r--r--Documentation/DocBook/v4l/pixfmt-y12.xml79
-rw-r--r--Documentation/DocBook/v4l/pixfmt.xml1
-rw-r--r--Documentation/DocBook/v4l/subdev-formats.xml59
-rw-r--r--Documentation/hwmon/max1606462
-rw-r--r--Documentation/hwmon/max3444079
-rw-r--r--Documentation/hwmon/max868869
-rw-r--r--Documentation/hwmon/pmbus34
-rw-r--r--Documentation/hwmon/smm6658
-rw-r--r--Documentation/hwmon/submitting-patches109
-rw-r--r--Documentation/input/event-codes.txt262
-rw-r--r--Documentation/md.txt10
-rw-r--r--Documentation/sound/alsa/SB-Live-mixer.txt6
-rw-r--r--Documentation/video4linux/sh_mobile_ceu_camera.txt6
-rw-r--r--MAINTAINERS3
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/kernel/Makefile2
-rw-r--r--arch/alpha/kernel/core_mcpcia.c12
-rw-r--r--arch/alpha/kernel/err_titan.c4
-rw-r--r--arch/alpha/kernel/irq_alpha.c2
-rw-r--r--arch/alpha/kernel/setup.c6
-rw-r--r--arch/alpha/kernel/smc37c93x.c3
-rw-r--r--arch/alpha/kernel/sys_wildfire.c5
-rw-r--r--arch/alpha/kernel/time.c1
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/Kconfig.debug11
-rw-r--r--arch/arm/common/Makefile1
-rw-r--r--arch/arm/include/asm/cputype.h1
-rw-r--r--arch/arm/include/asm/thread_notify.h1
-rw-r--r--arch/arm/include/asm/unistd.h4
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/calls.S4
-rw-r--r--arch/arm/kernel/elf.c17
-rw-r--r--arch/arm/kernel/hw_breakpoint.c7
-rw-r--r--arch/arm/kernel/perf_event.c2
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/traps.c3
-rw-r--r--arch/arm/mach-davinci/dm355.c2
-rw-r--r--arch/arm/mach-davinci/dm644x.c2
-rw-r--r--arch/arm/mach-mmp/include/mach/gpio.h2
-rw-r--r--arch/arm/mach-mmp/include/mach/mfp-pxa168.h9
-rw-r--r--arch/arm/mach-msm/board-qsd8x50.c5
-rw-r--r--arch/arm/mach-msm/timer.c2
-rw-r--r--arch/arm/mach-pxa/include/mach/gpio.h17
-rw-r--r--arch/arm/mach-pxa/include/mach/irqs.h3
-rw-r--r--arch/arm/mach-pxa/pxa25x.c2
-rw-r--r--arch/arm/mach-pxa/pxa27x.c2
-rw-r--r--arch/arm/mach-s3c2440/mach-gta02.c5
-rw-r--r--arch/arm/mach-tegra/gpio.c6
-rw-r--r--arch/arm/mach-tegra/tegra2_clocks.c9
-rw-r--r--arch/arm/mach-ux500/board-mop500.c19
-rw-r--r--arch/arm/mm/mmap.c4
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/proc-v6.S2
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm/mm/proc-xsc3.S2
-rw-r--r--arch/arm/mm/proc-xscale.S2
-rw-r--r--arch/arm/plat-s5p/pm.c11
-rw-r--r--arch/arm/plat-samsung/pm-check.c6
-rw-r--r--arch/arm/plat-samsung/pm.c5
-rw-r--r--arch/arm/vfp/vfpmodule.c34
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/cputable.h16
-rw-r--r--arch/powerpc/include/asm/pte-common.h2
-rw-r--r--arch/powerpc/include/asm/uninorth.h2
-rw-r--r--arch/powerpc/kernel/cputable.c2
-rw-r--r--arch/powerpc/kernel/crash.c12
-rw-r--r--arch/powerpc/kernel/legacy_serial.c8
-rw-r--r--arch/powerpc/kernel/perf_event.c37
-rw-r--r--arch/powerpc/kernel/time.c3
-rw-r--r--arch/powerpc/platforms/powermac/smp.c8
-rw-r--r--arch/powerpc/platforms/pseries/setup.c12
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c5
-rw-r--r--arch/s390/crypto/prng.c2
-rw-r--r--arch/s390/kvm/sie64a.S4
-rw-r--r--arch/s390/mm/fault.c4
-rw-r--r--arch/s390/mm/pageattr.c5
-rw-r--r--arch/x86/include/asm/gart.h24
-rw-r--r--arch/x86/include/asm/msr-index.h4
-rw-r--r--arch/x86/include/asm/numa.h2
-rw-r--r--arch/x86/kernel/aperture_64.c2
-rw-r--r--arch/x86/kernel/apm_32.c5
-rw-r--r--arch/x86/kernel/cpu/amd.c19
-rw-r--r--arch/x86/kernel/cpu/perf_event.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c22
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c2
-rw-r--r--arch/x86/kernel/pci-gart_64.c9
-rw-r--r--arch/x86/mm/numa.c31
-rw-r--r--arch/x86/mm/numa_emulation.c20
-rw-r--r--arch/x86/platform/ce4100/falconfalls.dts2
-rw-r--r--arch/x86/platform/mrst/mrst.c10
-rw-r--r--arch/x86/xen/mmu.c13
-rw-r--r--arch/x86/xen/setup.c2
-rw-r--r--arch/xtensa/kernel/irq.c18
-rw-r--r--block/blk-core.c178
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c4
-rw-r--r--block/blk-sysfs.c11
-rw-r--r--block/cfq-iosched.c26
-rw-r--r--block/elevator.c7
-rw-r--r--block/genhd.c8
-rw-r--r--drivers/acpi/scan.c4
-rw-r--r--drivers/ata/ahci.c8
-rw-r--r--drivers/ata/ahci.h4
-rw-r--r--drivers/ata/ata_piix.c8
-rw-r--r--drivers/ata/libahci.c60
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/ata/libata-eh.c6
-rw-r--r--drivers/ata/pata_at91.c22
-rw-r--r--drivers/base/power/main.c1
-rw-r--r--drivers/base/power/wakeup.c2
-rw-r--r--drivers/base/syscore.c2
-rw-r--r--drivers/char/agp/generic.c19
-rw-r--r--drivers/char/virtio_console.c11
-rw-r--r--drivers/connector/connector.c1
-rw-r--r--drivers/edac/amd64_edac.c88
-rw-r--r--drivers/edac/amd64_edac.h3
-rw-r--r--drivers/edac/edac_mc_sysfs.c11
-rw-r--r--drivers/gpu/drm/i915/intel_display.c73
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c5
-rw-r--r--drivers/gpu/drm/radeon/atom.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c91
-rw-r--r--drivers/gpu/drm/radeon/r600.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c6
-rw-r--r--drivers/hwmon/pmbus_core.c1
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c22
-rw-r--r--drivers/i2c/i2c-core.c6
-rw-r--r--drivers/ide/ide-cd.c1
-rw-r--r--drivers/ide/ide-cd_ioctl.c6
-rw-r--r--drivers/ide/ide-gd.c7
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c2
-rw-r--r--drivers/input/evdev.c33
-rw-r--r--drivers/input/input.c40
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c6
-rw-r--r--drivers/input/misc/xen-kbdfront.c13
-rw-r--r--drivers/input/touchscreen/h3600_ts_input.c17
-rw-r--r--drivers/md/dm-raid.c8
-rw-r--r--drivers/md/md.c88
-rw-r--r--drivers/md/md.h26
-rw-r--r--drivers/md/raid1.c29
-rw-r--r--drivers/md/raid10.c27
-rw-r--r--drivers/md/raid5.c66
-rw-r--r--drivers/md/raid5.h2
-rw-r--r--drivers/media/common/tuners/tda18271-common.c11
-rw-r--r--drivers/media/common/tuners/tda18271-fe.c21
-rw-r--r--drivers/media/common/tuners/tda18271-maps.c12
-rw-r--r--drivers/media/dvb/b2c2/flexcop-pci.c2
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig4
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c6
-rw-r--r--drivers/media/media-entity.c8
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c2
-rw-r--r--drivers/media/video/Kconfig2
-rw-r--r--drivers/media/video/cx18/cx18-streams.c10
-rw-r--r--drivers/media/video/cx23885/Kconfig1
-rw-r--r--drivers/media/video/imx074.c2
-rw-r--r--drivers/media/video/omap3isp/isp.c34
-rw-r--r--drivers/media/video/omap3isp/isp.h12
-rw-r--r--drivers/media/video/omap3isp/ispccdc.c37
-rw-r--r--drivers/media/video/omap3isp/isppreview.c2
-rw-r--r--drivers/media/video/omap3isp/ispqueue.c6
-rw-r--r--drivers/media/video/omap3isp/ispresizer.c75
-rw-r--r--drivers/media/video/omap3isp/ispstat.h6
-rw-r--r--drivers/media/video/omap3isp/ispvideo.c108
-rw-r--r--drivers/media/video/omap3isp/ispvideo.h3
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c8
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c74
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c10
-rw-r--r--drivers/media/video/sh_mobile_csi2.c11
-rw-r--r--drivers/media/video/soc_camera.c7
-rw-r--r--drivers/media/video/v4l2-dev.c15
-rw-r--r--drivers/media/video/videobuf-dma-contig.c2
-rw-r--r--drivers/media/video/videobuf2-core.c17
-rw-r--r--drivers/media/video/videobuf2-dma-contig.c2
-rw-r--r--drivers/mtd/nand/diskonchip.c2
-rw-r--r--drivers/net/bna/bfa_ioc.c31
-rw-r--r--drivers/net/bna/bfa_ioc.h1
-rw-r--r--drivers/net/bna/bfa_ioc_ct.c28
-rw-r--r--drivers/net/bna/bfi.h6
-rw-r--r--drivers/net/bna/bnad.c1
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c9
-rw-r--r--drivers/net/bonding/bond_alb.c6
-rw-r--r--drivers/net/bonding/bond_alb.h4
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/loopback.c3
-rw-r--r--drivers/net/natsemi.c3
-rw-r--r--drivers/net/netxen/netxen_nic.h4
-rw-r--r--drivers/net/netxen/netxen_nic_main.c17
-rw-r--r--drivers/net/qlcnic/qlcnic.h1
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c14
-rw-r--r--drivers/net/sfc/efx.c6
-rw-r--r--drivers/net/sfc/io.h2
-rw-r--r--drivers/net/sfc/net_driver.h2
-rw-r--r--drivers/net/sfc/nic.c22
-rw-r--r--drivers/net/sfc/nic.h1
-rw-r--r--drivers/net/sfc/selftest.c25
-rw-r--r--drivers/net/sfc/tx.c3
-rw-r--r--drivers/net/sis900.c23
-rw-r--r--drivers/net/stmmac/dwmac_lib.c28
-rw-r--r--drivers/net/stmmac/stmmac_main.c49
-rw-r--r--drivers/net/tokenring/3c359.c4
-rw-r--r--drivers/net/tokenring/lanstreamer.c2
-rw-r--r--drivers/net/tokenring/olympic.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c6
-rw-r--r--drivers/net/wireless/ath/regd_common.h1
-rw-r--r--drivers/net/wireless/iwlegacy/Kconfig9
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-hw.h2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-hw.h3
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c17
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.c7
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c3
-rw-r--r--drivers/net/wireless/mwl8k.c9
-rw-r--r--drivers/net/wireless/p54/txrx.c2
-rw-r--r--drivers/parport/parport_pc.c8
-rw-r--r--drivers/pci/Kconfig4
-rw-r--r--drivers/pci/Makefile4
-rw-r--r--drivers/pci/intel-iommu.c55
-rw-r--r--drivers/pcmcia/pcmcia_resource.c2
-rw-r--r--drivers/pcmcia/pxa2xx_balloon3.c5
-rw-r--r--drivers/pcmcia/pxa2xx_trizeps4.c15
-rw-r--r--drivers/rtc/class.c2
-rw-r--r--drivers/rtc/interface.c26
-rw-r--r--drivers/rtc/rtc-bfin.c2
-rw-r--r--drivers/rtc/rtc-coh901331.c4
-rw-r--r--drivers/rtc/rtc-omap.c2
-rw-r--r--drivers/rtc/rtc-s3c.c2
-rw-r--r--drivers/s390/block/dasd.c40
-rw-r--r--drivers/s390/block/dasd_devmap.c30
-rw-r--r--drivers/s390/block/dasd_eckd.c5
-rw-r--r--drivers/s390/block/dasd_genhd.c2
-rw-r--r--drivers/s390/block/dasd_int.h3
-rw-r--r--drivers/s390/block/dasd_ioctl.c128
-rw-r--r--drivers/s390/cio/qdio_main.c17
-rw-r--r--drivers/scsi/scsi_lib.c17
-rw-r--r--drivers/scsi/scsi_transport_fc.c19
-rw-r--r--drivers/staging/rt2860/common/cmm_data_pci.c2
-rw-r--r--drivers/staging/rt2860/common/cmm_data_usb.c2
-rw-r--r--drivers/staging/spectra/ffsport.c2
-rw-r--r--drivers/staging/tidspbridge/dynload/cload.c2
-rw-r--r--drivers/staging/tty/specialix.c2
-rw-r--r--drivers/tty/n_gsm.c8
-rw-r--r--drivers/tty/serial/imx.c3
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/usb/core/devices.c10
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/core/hub.c12
-rw-r--r--drivers/usb/gadget/f_audio.c1
-rw-r--r--drivers/usb/gadget/f_eem.c8
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c20
-rw-r--r--drivers/usb/gadget/inode.c4
-rw-r--r--drivers/usb/gadget/pch_udc.c8
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c2
-rw-r--r--drivers/usb/host/ehci-q.c15
-rw-r--r--drivers/usb/host/isp1760-hcd.c2
-rw-r--r--drivers/usb/host/ohci-au1xxx.c2
-rw-r--r--drivers/usb/host/pci-quirks.c117
-rw-r--r--drivers/usb/host/xhci-mem.c106
-rw-r--r--drivers/usb/host/xhci-pci.c4
-rw-r--r--drivers/usb/host/xhci-ring.c219
-rw-r--r--drivers/usb/host/xhci.c23
-rw-r--r--drivers/usb/host/xhci.h11
-rw-r--r--drivers/usb/musb/Kconfig6
-rw-r--r--drivers/usb/musb/blackfin.c24
-rw-r--r--drivers/usb/musb/cppi_dma.c27
-rw-r--r--drivers/usb/musb/musb_core.c2
-rw-r--r--drivers/usb/musb/musb_core.h5
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/musb/musbhsdma.c8
-rw-r--r--drivers/usb/musb/omap2430.c3
-rw-r--r--drivers/usb/musb/ux500.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c5
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h12
-rw-r--r--drivers/usb/serial/option.c5
-rw-r--r--drivers/usb/serial/qcserial.c31
-rw-r--r--drivers/video/pxafb.c4
-rw-r--r--drivers/virtio/virtio_pci.c15
-rw-r--r--drivers/virtio/virtio_ring.c1
-rw-r--r--drivers/watchdog/iTCO_wdt.c97
-rw-r--r--drivers/xen/manage.c9
-rw-r--r--fs/9p/fid.c15
-rw-r--r--fs/9p/v9fs.h1
-rw-r--r--fs/9p/vfs_dentry.c4
-rw-r--r--fs/9p/vfs_inode_dotl.c2
-rw-r--r--fs/9p/vfs_super.c80
-rw-r--r--fs/btrfs/acl.c9
-rw-r--r--fs/btrfs/ctree.h11
-rw-r--r--fs/btrfs/disk-io.c3
-rw-r--r--fs/btrfs/extent-tree.c129
-rw-r--r--fs/btrfs/extent_io.c84
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/file.c21
-rw-r--r--fs/btrfs/free-space-cache.c132
-rw-r--r--fs/btrfs/inode.c185
-rw-r--r--fs/btrfs/ioctl.c2
-rw-r--r--fs/btrfs/super.c42
-rw-r--r--fs/btrfs/transaction.c48
-rw-r--r--fs/btrfs/transaction.h4
-rw-r--r--fs/btrfs/tree-log.c7
-rw-r--r--fs/btrfs/volumes.c10
-rw-r--r--fs/btrfs/xattr.c33
-rw-r--r--fs/cifs/connect.c5
-rw-r--r--fs/dcache.c89
-rw-r--r--fs/ecryptfs/crypto.c21
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h7
-rw-r--r--fs/ecryptfs/file.c25
-rw-r--r--fs/ecryptfs/inode.c60
-rw-r--r--fs/ecryptfs/kthread.c6
-rw-r--r--fs/ecryptfs/main.c72
-rw-r--r--fs/ecryptfs/super.c16
-rw-r--r--fs/filesystems.c3
-rw-r--r--fs/gfs2/aops.c2
-rw-r--r--fs/gfs2/dir.c2
-rw-r--r--fs/gfs2/file.c58
-rw-r--r--fs/gfs2/glock.c6
-rw-r--r--fs/gfs2/glops.c4
-rw-r--r--fs/gfs2/inode.c56
-rw-r--r--fs/gfs2/inode.h3
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/gfs2/rgrp.c4
-rw-r--r--fs/gfs2/super.c14
-rw-r--r--fs/namei.c1
-rw-r--r--fs/nfsd/nfs4state.c3
-rw-r--r--fs/nfsd/vfs.c9
-rw-r--r--fs/ocfs2/ocfs2_fs.h2
-rw-r--r--fs/proc/base.c9
-rw-r--r--fs/ubifs/debug.h152
-rw-r--r--fs/ubifs/file.c3
-rw-r--r--fs/ubifs/recovery.c26
-rw-r--r--fs/ubifs/super.c29
-rw-r--r--fs/xattr.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_message.c4
-rw-r--r--include/linux/bit_spinlock.h8
-rw-r--r--include/linux/blkdev.h54
-rw-r--r--include/linux/dcache.h4
-rw-r--r--include/linux/device-mapper.h1
-rw-r--r--include/linux/input.h10
-rw-r--r--include/linux/input/mt.h6
-rw-r--r--include/linux/libata.h3
-rw-r--r--include/linux/list_bl.h11
-rw-r--r--include/linux/pid.h2
-rw-r--r--include/linux/posix-clock.h5
-rw-r--r--include/linux/rtc.h2
-rw-r--r--include/linux/security.h2
-rw-r--r--include/linux/usb/usbnet.h4
-rw-r--r--include/linux/v4l2-mediabus.h7
-rw-r--r--include/linux/videodev2.h1
-rw-r--r--include/media/v4l2-device.h2
-rw-r--r--include/net/9p/9p.h2
-rw-r--r--include/net/9p/client.h5
-rw-r--r--include/trace/events/block.h30
-rw-r--r--init/Kconfig16
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/kexec.c7
-rw-r--r--kernel/perf_event.c12
-rw-r--r--kernel/pid.c5
-rw-r--r--kernel/power/hibernate.c10
-rw-r--r--kernel/power/suspend.c5
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/sched_fair.c14
-rw-r--r--kernel/time/posix-clock.c24
-rw-r--r--kernel/trace/blktrace.c33
-rw-r--r--net/9p/client.c29
-rw-r--r--net/9p/protocol.c7
-rw-r--r--net/9p/trans_common.c2
-rw-r--r--net/9p/trans_virtio.c15
-rw-r--r--net/bridge/br_netfilter.c6
-rw-r--r--net/caif/cfdgml.c6
-rw-r--r--net/caif/cfmuxl.c4
-rw-r--r--net/core/dev.c10
-rw-r--r--net/ieee802154/Makefile2
-rw-r--r--net/ipv4/inet_connection_sock.c5
-rw-r--r--net/ipv4/inetpeer.c13
-rw-r--r--net/ipv4/ip_options.c6
-rw-r--r--net/ipv4/sysctl_net_ipv4.c3
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/irda/af_irda.c3
-rw-r--r--net/l2tp/l2tp_ip.c2
-rw-r--r--net/llc/llc_input.c3
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c4
-rw-r--r--net/netfilter/ipset/ip_set_core.c18
-rw-r--r--net/netfilter/xt_set.c18
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/ulpevent.c2
-rw-r--r--scripts/kconfig/conf.c2
-rw-r--r--security/capability.c2
-rw-r--r--security/security.c6
-rw-r--r--security/selinux/avc.c36
-rw-r--r--security/selinux/hooks.c24
-rw-r--r--security/selinux/include/avc.h18
-rw-r--r--security/smack/smack_lsm.c6
-rw-r--r--sound/aoa/codecs/tas.c2
-rw-r--r--sound/pci/hda/hda_codec.c4
-rw-r--r--sound/pci/hda/patch_realtek.c25
-rw-r--r--sound/soc/codecs/jz4740.c2
-rw-r--r--sound/soc/codecs/sn95031.c2
-rw-r--r--sound/soc/codecs/wm8903.c38
-rw-r--r--sound/soc/codecs/wm8994.c16
-rw-r--r--sound/soc/codecs/wm_hubs.c8
-rw-r--r--sound/soc/mid-x86/sst_platform.c10
-rw-r--r--sound/soc/samsung/pcm.c4
-rw-r--r--sound/soc/sh/fsi.c22
-rw-r--r--sound/soc/soc-core.c5
-rw-r--r--sound/soc/tegra/harmony.c1
-rw-r--r--tools/perf/builtin-record.c7
-rw-r--r--tools/perf/builtin-stat.c7
-rw-r--r--tools/perf/builtin-test.c10
-rw-r--r--tools/perf/builtin-top.c3
-rw-r--r--tools/perf/util/cgroup.c2
-rw-r--r--tools/perf/util/evlist.c14
-rw-r--r--tools/perf/util/evsel.c27
-rw-r--r--tools/perf/util/evsel.h6
-rw-r--r--tools/perf/util/python.c9
-rw-r--r--tools/perf/util/ui/browsers/annotate.c6
-rw-r--r--tools/perf/util/ui/browsers/hists.c2
440 files changed, 4791 insertions, 2221 deletions
diff --git a/Documentation/DocBook/media-entities.tmpl b/Documentation/DocBook/media-entities.tmpl
index 5d259c632cdf..fea63b45471a 100644
--- a/Documentation/DocBook/media-entities.tmpl
+++ b/Documentation/DocBook/media-entities.tmpl
@@ -294,6 +294,7 @@
294<!ENTITY sub-srggb10 SYSTEM "v4l/pixfmt-srggb10.xml"> 294<!ENTITY sub-srggb10 SYSTEM "v4l/pixfmt-srggb10.xml">
295<!ENTITY sub-srggb8 SYSTEM "v4l/pixfmt-srggb8.xml"> 295<!ENTITY sub-srggb8 SYSTEM "v4l/pixfmt-srggb8.xml">
296<!ENTITY sub-y10 SYSTEM "v4l/pixfmt-y10.xml"> 296<!ENTITY sub-y10 SYSTEM "v4l/pixfmt-y10.xml">
297<!ENTITY sub-y12 SYSTEM "v4l/pixfmt-y12.xml">
297<!ENTITY sub-pixfmt SYSTEM "v4l/pixfmt.xml"> 298<!ENTITY sub-pixfmt SYSTEM "v4l/pixfmt.xml">
298<!ENTITY sub-cropcap SYSTEM "v4l/vidioc-cropcap.xml"> 299<!ENTITY sub-cropcap SYSTEM "v4l/vidioc-cropcap.xml">
299<!ENTITY sub-dbg-g-register SYSTEM "v4l/vidioc-dbg-g-register.xml"> 300<!ENTITY sub-dbg-g-register SYSTEM "v4l/vidioc-dbg-g-register.xml">
diff --git a/Documentation/DocBook/v4l/media-ioc-setup-link.xml b/Documentation/DocBook/v4l/media-ioc-setup-link.xml
index 2331e76ded17..cec97af4dab4 100644
--- a/Documentation/DocBook/v4l/media-ioc-setup-link.xml
+++ b/Documentation/DocBook/v4l/media-ioc-setup-link.xml
@@ -34,7 +34,7 @@
34 <varlistentry> 34 <varlistentry>
35 <term><parameter>request</parameter></term> 35 <term><parameter>request</parameter></term>
36 <listitem> 36 <listitem>
37 <para>MEDIA_IOC_ENUM_LINKS</para> 37 <para>MEDIA_IOC_SETUP_LINK</para>
38 </listitem> 38 </listitem>
39 </varlistentry> 39 </varlistentry>
40 <varlistentry> 40 <varlistentry>
diff --git a/Documentation/DocBook/v4l/pixfmt-y12.xml b/Documentation/DocBook/v4l/pixfmt-y12.xml
new file mode 100644
index 000000000000..ff417b858cc9
--- /dev/null
+++ b/Documentation/DocBook/v4l/pixfmt-y12.xml
@@ -0,0 +1,79 @@
1<refentry id="V4L2-PIX-FMT-Y12">
2 <refmeta>
3 <refentrytitle>V4L2_PIX_FMT_Y12 ('Y12 ')</refentrytitle>
4 &manvol;
5 </refmeta>
6 <refnamediv>
7 <refname><constant>V4L2_PIX_FMT_Y12</constant></refname>
8 <refpurpose>Grey-scale image</refpurpose>
9 </refnamediv>
10 <refsect1>
11 <title>Description</title>
12
13 <para>This is a grey-scale image with a depth of 12 bits per pixel. Pixels
14are stored in 16-bit words with unused high bits padded with 0. The least
15significant byte is stored at lower memory addresses (little-endian).</para>
16
17 <example>
18 <title><constant>V4L2_PIX_FMT_Y12</constant> 4 &times; 4
19pixel image</title>
20
21 <formalpara>
22 <title>Byte Order.</title>
23 <para>Each cell is one byte.
24 <informaltable frame="none">
25 <tgroup cols="9" align="center">
26 <colspec align="left" colwidth="2*" />
27 <tbody valign="top">
28 <row>
29 <entry>start&nbsp;+&nbsp;0:</entry>
30 <entry>Y'<subscript>00low</subscript></entry>
31 <entry>Y'<subscript>00high</subscript></entry>
32 <entry>Y'<subscript>01low</subscript></entry>
33 <entry>Y'<subscript>01high</subscript></entry>
34 <entry>Y'<subscript>02low</subscript></entry>
35 <entry>Y'<subscript>02high</subscript></entry>
36 <entry>Y'<subscript>03low</subscript></entry>
37 <entry>Y'<subscript>03high</subscript></entry>
38 </row>
39 <row>
40 <entry>start&nbsp;+&nbsp;8:</entry>
41 <entry>Y'<subscript>10low</subscript></entry>
42 <entry>Y'<subscript>10high</subscript></entry>
43 <entry>Y'<subscript>11low</subscript></entry>
44 <entry>Y'<subscript>11high</subscript></entry>
45 <entry>Y'<subscript>12low</subscript></entry>
46 <entry>Y'<subscript>12high</subscript></entry>
47 <entry>Y'<subscript>13low</subscript></entry>
48 <entry>Y'<subscript>13high</subscript></entry>
49 </row>
50 <row>
51 <entry>start&nbsp;+&nbsp;16:</entry>
52 <entry>Y'<subscript>20low</subscript></entry>
53 <entry>Y'<subscript>20high</subscript></entry>
54 <entry>Y'<subscript>21low</subscript></entry>
55 <entry>Y'<subscript>21high</subscript></entry>
56 <entry>Y'<subscript>22low</subscript></entry>
57 <entry>Y'<subscript>22high</subscript></entry>
58 <entry>Y'<subscript>23low</subscript></entry>
59 <entry>Y'<subscript>23high</subscript></entry>
60 </row>
61 <row>
62 <entry>start&nbsp;+&nbsp;24:</entry>
63 <entry>Y'<subscript>30low</subscript></entry>
64 <entry>Y'<subscript>30high</subscript></entry>
65 <entry>Y'<subscript>31low</subscript></entry>
66 <entry>Y'<subscript>31high</subscript></entry>
67 <entry>Y'<subscript>32low</subscript></entry>
68 <entry>Y'<subscript>32high</subscript></entry>
69 <entry>Y'<subscript>33low</subscript></entry>
70 <entry>Y'<subscript>33high</subscript></entry>
71 </row>
72 </tbody>
73 </tgroup>
74 </informaltable>
75 </para>
76 </formalpara>
77 </example>
78 </refsect1>
79</refentry>
diff --git a/Documentation/DocBook/v4l/pixfmt.xml b/Documentation/DocBook/v4l/pixfmt.xml
index c6fdcbbd1b41..40af4beb48b9 100644
--- a/Documentation/DocBook/v4l/pixfmt.xml
+++ b/Documentation/DocBook/v4l/pixfmt.xml
@@ -696,6 +696,7 @@ information.</para>
696 &sub-packed-yuv; 696 &sub-packed-yuv;
697 &sub-grey; 697 &sub-grey;
698 &sub-y10; 698 &sub-y10;
699 &sub-y12;
699 &sub-y16; 700 &sub-y16;
700 &sub-yuyv; 701 &sub-yuyv;
701 &sub-uyvy; 702 &sub-uyvy;
diff --git a/Documentation/DocBook/v4l/subdev-formats.xml b/Documentation/DocBook/v4l/subdev-formats.xml
index 7041127d6dfc..d7ccd25edcc1 100644
--- a/Documentation/DocBook/v4l/subdev-formats.xml
+++ b/Documentation/DocBook/v4l/subdev-formats.xml
@@ -456,6 +456,23 @@
456 <entry>b<subscript>1</subscript></entry> 456 <entry>b<subscript>1</subscript></entry>
457 <entry>b<subscript>0</subscript></entry> 457 <entry>b<subscript>0</subscript></entry>
458 </row> 458 </row>
459 <row id="V4L2-MBUS-FMT-SGBRG8-1X8">
460 <entry>V4L2_MBUS_FMT_SGBRG8_1X8</entry>
461 <entry>0x3013</entry>
462 <entry></entry>
463 <entry>-</entry>
464 <entry>-</entry>
465 <entry>-</entry>
466 <entry>-</entry>
467 <entry>g<subscript>7</subscript></entry>
468 <entry>g<subscript>6</subscript></entry>
469 <entry>g<subscript>5</subscript></entry>
470 <entry>g<subscript>4</subscript></entry>
471 <entry>g<subscript>3</subscript></entry>
472 <entry>g<subscript>2</subscript></entry>
473 <entry>g<subscript>1</subscript></entry>
474 <entry>g<subscript>0</subscript></entry>
475 </row>
459 <row id="V4L2-MBUS-FMT-SGRBG8-1X8"> 476 <row id="V4L2-MBUS-FMT-SGRBG8-1X8">
460 <entry>V4L2_MBUS_FMT_SGRBG8_1X8</entry> 477 <entry>V4L2_MBUS_FMT_SGRBG8_1X8</entry>
461 <entry>0x3002</entry> 478 <entry>0x3002</entry>
@@ -473,6 +490,23 @@
473 <entry>g<subscript>1</subscript></entry> 490 <entry>g<subscript>1</subscript></entry>
474 <entry>g<subscript>0</subscript></entry> 491 <entry>g<subscript>0</subscript></entry>
475 </row> 492 </row>
493 <row id="V4L2-MBUS-FMT-SRGGB8-1X8">
494 <entry>V4L2_MBUS_FMT_SRGGB8_1X8</entry>
495 <entry>0x3014</entry>
496 <entry></entry>
497 <entry>-</entry>
498 <entry>-</entry>
499 <entry>-</entry>
500 <entry>-</entry>
501 <entry>r<subscript>7</subscript></entry>
502 <entry>r<subscript>6</subscript></entry>
503 <entry>r<subscript>5</subscript></entry>
504 <entry>r<subscript>4</subscript></entry>
505 <entry>r<subscript>3</subscript></entry>
506 <entry>r<subscript>2</subscript></entry>
507 <entry>r<subscript>1</subscript></entry>
508 <entry>r<subscript>0</subscript></entry>
509 </row>
476 <row id="V4L2-MBUS-FMT-SBGGR10-DPCM8-1X8"> 510 <row id="V4L2-MBUS-FMT-SBGGR10-DPCM8-1X8">
477 <entry>V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8</entry> 511 <entry>V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8</entry>
478 <entry>0x300b</entry> 512 <entry>0x300b</entry>
@@ -2159,6 +2193,31 @@
2159 <entry>u<subscript>1</subscript></entry> 2193 <entry>u<subscript>1</subscript></entry>
2160 <entry>u<subscript>0</subscript></entry> 2194 <entry>u<subscript>0</subscript></entry>
2161 </row> 2195 </row>
2196 <row id="V4L2-MBUS-FMT-Y12-1X12">
2197 <entry>V4L2_MBUS_FMT_Y12_1X12</entry>
2198 <entry>0x2013</entry>
2199 <entry></entry>
2200 <entry>-</entry>
2201 <entry>-</entry>
2202 <entry>-</entry>
2203 <entry>-</entry>
2204 <entry>-</entry>
2205 <entry>-</entry>
2206 <entry>-</entry>
2207 <entry>-</entry>
2208 <entry>y<subscript>11</subscript></entry>
2209 <entry>y<subscript>10</subscript></entry>
2210 <entry>y<subscript>9</subscript></entry>
2211 <entry>y<subscript>8</subscript></entry>
2212 <entry>y<subscript>7</subscript></entry>
2213 <entry>y<subscript>6</subscript></entry>
2214 <entry>y<subscript>5</subscript></entry>
2215 <entry>y<subscript>4</subscript></entry>
2216 <entry>y<subscript>3</subscript></entry>
2217 <entry>y<subscript>2</subscript></entry>
2218 <entry>y<subscript>1</subscript></entry>
2219 <entry>y<subscript>0</subscript></entry>
2220 </row>
2162 <row id="V4L2-MBUS-FMT-UYVY8-1X16"> 2221 <row id="V4L2-MBUS-FMT-UYVY8-1X16">
2163 <entry>V4L2_MBUS_FMT_UYVY8_1X16</entry> 2222 <entry>V4L2_MBUS_FMT_UYVY8_1X16</entry>
2164 <entry>0x200f</entry> 2223 <entry>0x200f</entry>
diff --git a/Documentation/hwmon/max16064 b/Documentation/hwmon/max16064
new file mode 100644
index 000000000000..41728999e142
--- /dev/null
+++ b/Documentation/hwmon/max16064
@@ -0,0 +1,62 @@
1Kernel driver max16064
2======================
3
4Supported chips:
5 * Maxim MAX16064
6 Prefix: 'max16064'
7 Addresses scanned: -
8 Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf
9
10Author: Guenter Roeck <guenter.roeck@ericsson.com>
11
12
13Description
14-----------
15
16This driver supports hardware montoring for Maxim MAX16064 Quad Power-Supply
17Controller with Active-Voltage Output Control and PMBus Interface.
18
19The driver is a client driver to the core PMBus driver.
20Please see Documentation/hwmon/pmbus for details on PMBus client drivers.
21
22
23Usage Notes
24-----------
25
26This driver does not auto-detect devices. You will have to instantiate the
27devices explicitly. Please see Documentation/i2c/instantiating-devices for
28details.
29
30
31Platform data support
32---------------------
33
34The driver supports standard PMBus driver platform data.
35
36
37Sysfs entries
38-------------
39
40The following attributes are supported. Limits are read-write; all other
41attributes are read-only.
42
43in[1-4]_label "vout[1-4]"
44in[1-4]_input Measured voltage. From READ_VOUT register.
45in[1-4]_min Minumum Voltage. From VOUT_UV_WARN_LIMIT register.
46in[1-4]_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
47in[1-4]_lcrit Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register.
48in[1-4]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
49in[1-4]_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
50in[1-4]_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
51in[1-4]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
52in[1-4]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
53
54temp1_input Measured temperature. From READ_TEMPERATURE_1 register.
55temp1_max Maximum temperature. From OT_WARN_LIMIT register.
56temp1_crit Critical high temperature. From OT_FAULT_LIMIT register.
57temp1_max_alarm Chip temperature high alarm. Set by comparing
58 READ_TEMPERATURE_1 with OT_WARN_LIMIT if TEMP_OT_WARNING
59 status is set.
60temp1_crit_alarm Chip temperature critical high alarm. Set by comparing
61 READ_TEMPERATURE_1 with OT_FAULT_LIMIT if TEMP_OT_FAULT
62 status is set.
diff --git a/Documentation/hwmon/max34440 b/Documentation/hwmon/max34440
new file mode 100644
index 000000000000..6c525dd07d59
--- /dev/null
+++ b/Documentation/hwmon/max34440
@@ -0,0 +1,79 @@
1Kernel driver max34440
2======================
3
4Supported chips:
5 * Maxim MAX34440
6 Prefixes: 'max34440'
7 Addresses scanned: -
8 Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34440.pdf
9 * Maxim MAX34441
10 PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller
11 Prefixes: 'max34441'
12 Addresses scanned: -
13 Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34441.pdf
14
15Author: Guenter Roeck <guenter.roeck@ericsson.com>
16
17
18Description
19-----------
20
21This driver supports hardware montoring for Maxim MAX34440 PMBus 6-Channel
22Power-Supply Manager and MAX34441 PMBus 5-Channel Power-Supply Manager
23and Intelligent Fan Controller.
24
25The driver is a client driver to the core PMBus driver. Please see
26Documentation/hwmon/pmbus for details on PMBus client drivers.
27
28
29Usage Notes
30-----------
31
32This driver does not auto-detect devices. You will have to instantiate the
33devices explicitly. Please see Documentation/i2c/instantiating-devices for
34details.
35
36
37Platform data support
38---------------------
39
40The driver supports standard PMBus driver platform data.
41
42
43Sysfs entries
44-------------
45
46The following attributes are supported. Limits are read-write; all other
47attributes are read-only.
48
49in[1-6]_label "vout[1-6]".
50in[1-6]_input Measured voltage. From READ_VOUT register.
51in[1-6]_min Minumum Voltage. From VOUT_UV_WARN_LIMIT register.
52in[1-6]_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
53in[1-6]_lcrit Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register.
54in[1-6]_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
55in[1-6]_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
56in[1-6]_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
57in[1-6]_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
58in[1-6]_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
59
60curr[1-6]_label "iout[1-6]".
61curr[1-6]_input Measured current. From READ_IOUT register.
62curr[1-6]_max Maximum current. From IOUT_OC_WARN_LIMIT register.
63curr[1-6]_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
64curr[1-6]_max_alarm Current high alarm. From IOUT_OC_WARNING status.
65curr[1-6]_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status.
66
67 in6 and curr6 attributes only exist for MAX34440.
68
69temp[1-8]_input Measured temperatures. From READ_TEMPERATURE_1 register.
70 temp1 is the chip's internal temperature. temp2..temp5
71 are remote I2C temperature sensors. For MAX34441, temp6
72 is a remote thermal-diode sensor. For MAX34440, temp6..8
73 are remote I2C temperature sensors.
74temp[1-8]_max Maximum temperature. From OT_WARN_LIMIT register.
75temp[1-8]_crit Critical high temperature. From OT_FAULT_LIMIT register.
76temp[1-8]_max_alarm Temperature high alarm.
77temp[1-8]_crit_alarm Temperature critical high alarm.
78
79 temp7 and temp8 attributes only exist for MAX34440.
diff --git a/Documentation/hwmon/max8688 b/Documentation/hwmon/max8688
new file mode 100644
index 000000000000..0ddd3a412030
--- /dev/null
+++ b/Documentation/hwmon/max8688
@@ -0,0 +1,69 @@
1Kernel driver max8688
2=====================
3
4Supported chips:
5 * Maxim MAX8688
6 Prefix: 'max8688'
7 Addresses scanned: -
8 Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf
9
10Author: Guenter Roeck <guenter.roeck@ericsson.com>
11
12
13Description
14-----------
15
16This driver supports hardware montoring for Maxim MAX8688 Digital Power-Supply
17Controller/Monitor with PMBus Interface.
18
19The driver is a client driver to the core PMBus driver. Please see
20Documentation/hwmon/pmbus for details on PMBus client drivers.
21
22
23Usage Notes
24-----------
25
26This driver does not auto-detect devices. You will have to instantiate the
27devices explicitly. Please see Documentation/i2c/instantiating-devices for
28details.
29
30
31Platform data support
32---------------------
33
34The driver supports standard PMBus driver platform data.
35
36
37Sysfs entries
38-------------
39
40The following attributes are supported. Limits are read-write; all other
41attributes are read-only.
42
43in1_label "vout1"
44in1_input Measured voltage. From READ_VOUT register.
45in1_min Minumum Voltage. From VOUT_UV_WARN_LIMIT register.
46in1_max Maximum voltage. From VOUT_OV_WARN_LIMIT register.
47in1_lcrit Critical minumum Voltage. VOUT_UV_FAULT_LIMIT register.
48in1_crit Critical maximum voltage. From VOUT_OV_FAULT_LIMIT register.
49in1_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
50in1_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
51in1_lcrit_alarm Voltage critical low alarm. From VOLTAGE_UV_FAULT status.
52in1_crit_alarm Voltage critical high alarm. From VOLTAGE_OV_FAULT status.
53
54curr1_label "iout1"
55curr1_input Measured current. From READ_IOUT register.
56curr1_max Maximum current. From IOUT_OC_WARN_LIMIT register.
57curr1_crit Critical maximum current. From IOUT_OC_FAULT_LIMIT register.
58curr1_max_alarm Current high alarm. From IOUT_OC_WARN_LIMIT register.
59curr1_crit_alarm Current critical high alarm. From IOUT_OC_FAULT status.
60
61temp1_input Measured temperature. From READ_TEMPERATURE_1 register.
62temp1_max Maximum temperature. From OT_WARN_LIMIT register.
63temp1_crit Critical high temperature. From OT_FAULT_LIMIT register.
64temp1_max_alarm Chip temperature high alarm. Set by comparing
65 READ_TEMPERATURE_1 with OT_WARN_LIMIT if TEMP_OT_WARNING
66 status is set.
67temp1_crit_alarm Chip temperature critical high alarm. Set by comparing
68 READ_TEMPERATURE_1 with OT_FAULT_LIMIT if TEMP_OT_FAULT
69 status is set.
diff --git a/Documentation/hwmon/pmbus b/Documentation/hwmon/pmbus
index dc4933e96344..5e462fc7f99b 100644
--- a/Documentation/hwmon/pmbus
+++ b/Documentation/hwmon/pmbus
@@ -13,26 +13,6 @@ Supported chips:
13 Prefix: 'ltc2978' 13 Prefix: 'ltc2978'
14 Addresses scanned: - 14 Addresses scanned: -
15 Datasheet: http://cds.linear.com/docs/Datasheet/2978fa.pdf 15 Datasheet: http://cds.linear.com/docs/Datasheet/2978fa.pdf
16 * Maxim MAX16064
17 Quad Power-Supply Controller
18 Prefix: 'max16064'
19 Addresses scanned: -
20 Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf
21 * Maxim MAX34440
22 PMBus 6-Channel Power-Supply Manager
23 Prefixes: 'max34440'
24 Addresses scanned: -
25 Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34440.pdf
26 * Maxim MAX34441
27 PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller
28 Prefixes: 'max34441'
29 Addresses scanned: -
30 Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34441.pdf
31 * Maxim MAX8688
32 Digital Power-Supply Controller/Monitor
33 Prefix: 'max8688'
34 Addresses scanned: -
35 Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf
36 * Generic PMBus devices 16 * Generic PMBus devices
37 Prefix: 'pmbus' 17 Prefix: 'pmbus'
38 Addresses scanned: - 18 Addresses scanned: -
@@ -175,11 +155,13 @@ currX_crit Critical maximum current.
175 From IIN_OC_FAULT_LIMIT or IOUT_OC_FAULT_LIMIT register. 155 From IIN_OC_FAULT_LIMIT or IOUT_OC_FAULT_LIMIT register.
176currX_alarm Current high alarm. 156currX_alarm Current high alarm.
177 From IIN_OC_WARNING or IOUT_OC_WARNING status. 157 From IIN_OC_WARNING or IOUT_OC_WARNING status.
158currX_max_alarm Current high alarm.
159 From IIN_OC_WARN_LIMIT or IOUT_OC_WARN_LIMIT status.
178currX_lcrit_alarm Output current critical low alarm. 160currX_lcrit_alarm Output current critical low alarm.
179 From IOUT_UC_FAULT status. 161 From IOUT_UC_FAULT status.
180currX_crit_alarm Current critical high alarm. 162currX_crit_alarm Current critical high alarm.
181 From IIN_OC_FAULT or IOUT_OC_FAULT status. 163 From IIN_OC_FAULT or IOUT_OC_FAULT status.
182currX_label "iin" or "vinY" 164currX_label "iin" or "ioutY"
183 165
184powerX_input Measured power. From READ_PIN or READ_POUT register. 166powerX_input Measured power. From READ_PIN or READ_POUT register.
185powerX_cap Output power cap. From POUT_MAX register. 167powerX_cap Output power cap. From POUT_MAX register.
@@ -193,13 +175,13 @@ powerX_crit_alarm Output power critical high alarm.
193 From POUT_OP_FAULT status. 175 From POUT_OP_FAULT status.
194powerX_label "pin" or "poutY" 176powerX_label "pin" or "poutY"
195 177
196tempX_input Measured tempererature. 178tempX_input Measured temperature.
197 From READ_TEMPERATURE_X register. 179 From READ_TEMPERATURE_X register.
198tempX_min Mimimum tempererature. From UT_WARN_LIMIT register. 180tempX_min Mimimum temperature. From UT_WARN_LIMIT register.
199tempX_max Maximum tempererature. From OT_WARN_LIMIT register. 181tempX_max Maximum temperature. From OT_WARN_LIMIT register.
200tempX_lcrit Critical low tempererature. 182tempX_lcrit Critical low temperature.
201 From UT_FAULT_LIMIT register. 183 From UT_FAULT_LIMIT register.
202tempX_crit Critical high tempererature. 184tempX_crit Critical high temperature.
203 From OT_FAULT_LIMIT register. 185 From OT_FAULT_LIMIT register.
204tempX_min_alarm Chip temperature low alarm. Set by comparing 186tempX_min_alarm Chip temperature low alarm. Set by comparing
205 READ_TEMPERATURE_X with UT_WARN_LIMIT if 187 READ_TEMPERATURE_X with UT_WARN_LIMIT if
diff --git a/Documentation/hwmon/smm665 b/Documentation/hwmon/smm665
index 3820fc9ca52d..59e316140542 100644
--- a/Documentation/hwmon/smm665
+++ b/Documentation/hwmon/smm665
@@ -150,8 +150,8 @@ in8_crit_alarm Channel F critical alarm
150in9_crit_alarm AIN1 critical alarm 150in9_crit_alarm AIN1 critical alarm
151in10_crit_alarm AIN2 critical alarm 151in10_crit_alarm AIN2 critical alarm
152 152
153temp1_input Chip tempererature 153temp1_input Chip temperature
154temp1_min Mimimum chip tempererature 154temp1_min Mimimum chip temperature
155temp1_max Maximum chip tempererature 155temp1_max Maximum chip temperature
156temp1_crit Critical chip tempererature 156temp1_crit Critical chip temperature
157temp1_crit_alarm Temperature critical alarm 157temp1_crit_alarm Temperature critical alarm
diff --git a/Documentation/hwmon/submitting-patches b/Documentation/hwmon/submitting-patches
new file mode 100644
index 000000000000..86f42e8e9e49
--- /dev/null
+++ b/Documentation/hwmon/submitting-patches
@@ -0,0 +1,109 @@
1 How to Get Your Patch Accepted Into the Hwmon Subsystem
2 -------------------------------------------------------
3
4This text is is a collection of suggestions for people writing patches or
5drivers for the hwmon subsystem. Following these suggestions will greatly
6increase the chances of your change being accepted.
7
8
91. General
10----------
11
12* It should be unnecessary to mention, but please read and follow
13 Documentation/SubmitChecklist
14 Documentation/SubmittingDrivers
15 Documentation/SubmittingPatches
16 Documentation/CodingStyle
17
18* If your patch generates checkpatch warnings, please refrain from explanations
19 such as "I don't like that coding style". Keep in mind that each unnecessary
20 warning helps hiding a real problem. If you don't like the kernel coding
21 style, don't write kernel drivers.
22
23* Please test your patch thoroughly. We are not your test group.
24 Sometimes a patch can not or not completely be tested because of missing
25 hardware. In such cases, you should test-build the code on at least one
26 architecture. If run-time testing was not achieved, it should be written
27 explicitly below the patch header.
28
29* If your patch (or the driver) is affected by configuration options such as
30 CONFIG_SMP or CONFIG_HOTPLUG, make sure it compiles for all configuration
31 variants.
32
33
342. Adding functionality to existing drivers
35-------------------------------------------
36
37* Make sure the documentation in Documentation/hwmon/<driver_name> is up to
38 date.
39
40* Make sure the information in Kconfig is up to date.
41
42* If the added functionality requires some cleanup or structural changes, split
43 your patch into a cleanup part and the actual addition. This makes it easier
44 to review your changes, and to bisect any resulting problems.
45
46* Never mix bug fixes, cleanup, and functional enhancements in a single patch.
47
48
493. New drivers
50--------------
51
52* Running your patch or driver file(s) through checkpatch does not mean its
53 formatting is clean. If unsure about formatting in your new driver, run it
54 through Lindent. Lindent is not perfect, and you may have to do some minor
55 cleanup, but it is a good start.
56
57* Consider adding yourself to MAINTAINERS.
58
59* Document the driver in Documentation/hwmon/<driver_name>.
60
61* Add the driver to Kconfig and Makefile in alphabetical order.
62
63* Make sure that all dependencies are listed in Kconfig. For new drivers, it
64 is most likely prudent to add a dependency on EXPERIMENTAL.
65
66* Avoid forward declarations if you can. Rearrange the code if necessary.
67
68* Avoid calculations in macros and macro-generated functions. While such macros
69 may save a line or so in the source, it obfuscates the code and makes code
70 review more difficult. It may also result in code which is more complicated
71 than necessary. Use inline functions or just regular functions instead.
72
73* If the driver has a detect function, make sure it is silent. Debug messages
74 and messages printed after a successful detection are acceptable, but it
75 must not print messages such as "Chip XXX not found/supported".
76
77 Keep in mind that the detect function will run for all drivers supporting an
78 address if a chip is detected on that address. Unnecessary messages will just
79 pollute the kernel log and not provide any value.
80
81* Provide a detect function if and only if a chip can be detected reliably.
82
83* Avoid writing to chip registers in the detect function. If you have to write,
84 only do it after you have already gathered enough data to be certain that the
85 detection is going to be successful.
86
87 Keep in mind that the chip might not be what your driver believes it is, and
88 writing to it might cause a bad misconfiguration.
89
90* Make sure there are no race conditions in the probe function. Specifically,
91 completely initialize your chip first, then create sysfs entries and register
92 with the hwmon subsystem.
93
94* Do not provide support for deprecated sysfs attributes.
95
96* Do not create non-standard attributes unless really needed. If you have to use
97 non-standard attributes, or you believe you do, discuss it on the mailing list
98 first. Either case, provide a detailed explanation why you need the
99 non-standard attribute(s).
100 Standard attributes are specified in Documentation/hwmon/sysfs-interface.
101
102* When deciding which sysfs attributes to support, look at the chip's
103 capabilities. While we do not expect your driver to support everything the
104 chip may offer, it should at least support all limits and alarms.
105
106* Last but not least, please check if a driver for your chip already exists
107 before starting to write a new driver. Especially for temperature sensors,
108 new chips are often variants of previously released chips. In some cases,
109 a presumably new chip may simply have been relabeled.
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
new file mode 100644
index 000000000000..23fcb05175be
--- /dev/null
+++ b/Documentation/input/event-codes.txt
@@ -0,0 +1,262 @@
1The input protocol uses a map of types and codes to express input device values
2to userspace. This document describes the types and codes and how and when they
3may be used.
4
5A single hardware event generates multiple input events. Each input event
6contains the new value of a single data item. A special event type, EV_SYN, is
7used to separate input events into packets of input data changes occurring at
8the same moment in time. In the following, the term "event" refers to a single
9input event encompassing a type, code, and value.
10
11The input protocol is a stateful protocol. Events are emitted only when values
12of event codes have changed. However, the state is maintained within the Linux
13input subsystem; drivers do not need to maintain the state and may attempt to
14emit unchanged values without harm. Userspace may obtain the current state of
15event code values using the EVIOCG* ioctls defined in linux/input.h. The event
16reports supported by a device are also provided by sysfs in
17class/input/event*/device/capabilities/, and the properties of a device are
18provided in class/input/event*/device/properties.
19
20Types:
21==========
22Types are groupings of codes under a logical input construct. Each type has a
23set of applicable codes to be used in generating events. See the Codes section
24for details on valid codes for each type.
25
26* EV_SYN:
27 - Used as markers to separate events. Events may be separated in time or in
28 space, such as with the multitouch protocol.
29
30* EV_KEY:
31 - Used to describe state changes of keyboards, buttons, or other key-like
32 devices.
33
34* EV_REL:
35 - Used to describe relative axis value changes, e.g. moving the mouse 5 units
36 to the left.
37
38* EV_ABS:
39 - Used to describe absolute axis value changes, e.g. describing the
40 coordinates of a touch on a touchscreen.
41
42* EV_MSC:
43 - Used to describe miscellaneous input data that do not fit into other types.
44
45* EV_SW:
46 - Used to describe binary state input switches.
47
48* EV_LED:
49 - Used to turn LEDs on devices on and off.
50
51* EV_SND:
52 - Used to output sound to devices.
53
54* EV_REP:
55 - Used for autorepeating devices.
56
57* EV_FF:
58 - Used to send force feedback commands to an input device.
59
60* EV_PWR:
61 - A special type for power button and switch input.
62
63* EV_FF_STATUS:
64 - Used to receive force feedback device status.
65
66Codes:
67==========
68Codes define the precise type of event.
69
70EV_SYN:
71----------
72EV_SYN event values are undefined. Their usage is defined only by when they are
73sent in the evdev event stream.
74
75* SYN_REPORT:
76 - Used to synchronize and separate events into packets of input data changes
77 occurring at the same moment in time. For example, motion of a mouse may set
78 the REL_X and REL_Y values for one motion, then emit a SYN_REPORT. The next
79 motion will emit more REL_X and REL_Y values and send another SYN_REPORT.
80
81* SYN_CONFIG:
82 - TBD
83
84* SYN_MT_REPORT:
85 - Used to synchronize and separate touch events. See the
86 multi-touch-protocol.txt document for more information.
87
88* SYN_DROPPED:
89 - Used to indicate buffer overrun in the evdev client's event queue.
90 Client should ignore all events up to and including next SYN_REPORT
91 event and query the device (using EVIOCG* ioctls) to obtain its
92 current state.
93
94EV_KEY:
95----------
96EV_KEY events take the form KEY_<name> or BTN_<name>. For example, KEY_A is used
97to represent the 'A' key on a keyboard. When a key is depressed, an event with
98the key's code is emitted with value 1. When the key is released, an event is
99emitted with value 0. Some hardware send events when a key is repeated. These
100events have a value of 2. In general, KEY_<name> is used for keyboard keys, and
101BTN_<name> is used for other types of momentary switch events.
102
103A few EV_KEY codes have special meanings:
104
105* BTN_TOOL_<name>:
106 - These codes are used in conjunction with input trackpads, tablets, and
107 touchscreens. These devices may be used with fingers, pens, or other tools.
108 When an event occurs and a tool is used, the corresponding BTN_TOOL_<name>
109 code should be set to a value of 1. When the tool is no longer interacting
110 with the input device, the BTN_TOOL_<name> code should be reset to 0. All
111 trackpads, tablets, and touchscreens should use at least one BTN_TOOL_<name>
112 code when events are generated.
113
114* BTN_TOUCH:
115 BTN_TOUCH is used for touch contact. While an input tool is determined to be
116 within meaningful physical contact, the value of this property must be set
117 to 1. Meaningful physical contact may mean any contact, or it may mean
118 contact conditioned by an implementation defined property. For example, a
119 touchpad may set the value to 1 only when the touch pressure rises above a
120 certain value. BTN_TOUCH may be combined with BTN_TOOL_<name> codes. For
121 example, a pen tablet may set BTN_TOOL_PEN to 1 and BTN_TOUCH to 0 while the
122 pen is hovering over but not touching the tablet surface.
123
124Note: For appropriate function of the legacy mousedev emulation driver,
125BTN_TOUCH must be the first evdev code emitted in a synchronization frame.
126
127Note: Historically a touch device with BTN_TOOL_FINGER and BTN_TOUCH was
128interpreted as a touchpad by userspace, while a similar device without
129BTN_TOOL_FINGER was interpreted as a touchscreen. For backwards compatibility
130with current userspace it is recommended to follow this distinction. In the
131future, this distinction will be deprecated and the device properties ioctl
132EVIOCGPROP, defined in linux/input.h, will be used to convey the device type.
133
134* BTN_TOOL_FINGER, BTN_TOOL_DOUBLETAP, BTN_TOOL_TRIPLETAP, BTN_TOOL_QUADTAP:
135 - These codes denote one, two, three, and four finger interaction on a
136 trackpad or touchscreen. For example, if the user uses two fingers and moves
137 them on the touchpad in an effort to scroll content on screen,
138 BTN_TOOL_DOUBLETAP should be set to value 1 for the duration of the motion.
139 Note that all BTN_TOOL_<name> codes and the BTN_TOUCH code are orthogonal in
140 purpose. A trackpad event generated by finger touches should generate events
141 for one code from each group. At most only one of these BTN_TOOL_<name>
142 codes should have a value of 1 during any synchronization frame.
143
144Note: Historically some drivers emitted multiple of the finger count codes with
145a value of 1 in the same synchronization frame. This usage is deprecated.
146
147Note: In multitouch drivers, the input_mt_report_finger_count() function should
148be used to emit these codes. Please see multi-touch-protocol.txt for details.
149
150EV_REL:
151----------
152EV_REL events describe relative changes in a property. For example, a mouse may
153move to the left by a certain number of units, but its absolute position in
154space is unknown. If the absolute position is known, EV_ABS codes should be used
155instead of EV_REL codes.
156
157A few EV_REL codes have special meanings:
158
159* REL_WHEEL, REL_HWHEEL:
160 - These codes are used for vertical and horizontal scroll wheels,
161 respectively.
162
163EV_ABS:
164----------
165EV_ABS events describe absolute changes in a property. For example, a touchpad
166may emit coordinates for a touch location.
167
168A few EV_ABS codes have special meanings:
169
170* ABS_DISTANCE:
171 - Used to describe the distance of a tool from an interaction surface. This
172 event should only be emitted while the tool is hovering, meaning in close
173 proximity of the device and while the value of the BTN_TOUCH code is 0. If
174 the input device may be used freely in three dimensions, consider ABS_Z
175 instead.
176
177* ABS_MT_<name>:
178 - Used to describe multitouch input events. Please see
179 multi-touch-protocol.txt for details.
180
181EV_SW:
182----------
183EV_SW events describe stateful binary switches. For example, the SW_LID code is
184used to denote when a laptop lid is closed.
185
186Upon binding to a device or resuming from suspend, a driver must report
187the current switch state. This ensures that the device, kernel, and userspace
188state is in sync.
189
190Upon resume, if the switch state is the same as before suspend, then the input
191subsystem will filter out the duplicate switch state reports. The driver does
192not need to keep the state of the switch at any time.
193
194EV_MSC:
195----------
196EV_MSC events are used for input and output events that do not fall under other
197categories.
198
199EV_LED:
200----------
201EV_LED events are used for input and output to set and query the state of
202various LEDs on devices.
203
204EV_REP:
205----------
206EV_REP events are used for specifying autorepeating events.
207
208EV_SND:
209----------
210EV_SND events are used for sending sound commands to simple sound output
211devices.
212
213EV_FF:
214----------
215EV_FF events are used to initialize a force feedback capable device and to cause
216such device to feedback.
217
218EV_PWR:
219----------
220EV_PWR events are a special type of event used specifically for power
221mangement. Its usage is not well defined. To be addressed later.
222
223Guidelines:
224==========
225The guidelines below ensure proper single-touch and multi-finger functionality.
226For multi-touch functionality, see the multi-touch-protocol.txt document for
227more information.
228
229Mice:
230----------
231REL_{X,Y} must be reported when the mouse moves. BTN_LEFT must be used to report
232the primary button press. BTN_{MIDDLE,RIGHT,4,5,etc.} should be used to report
233further buttons of the device. REL_WHEEL and REL_HWHEEL should be used to report
234scroll wheel events where available.
235
236Touchscreens:
237----------
238ABS_{X,Y} must be reported with the location of the touch. BTN_TOUCH must be
239used to report when a touch is active on the screen.
240BTN_{MOUSE,LEFT,MIDDLE,RIGHT} must not be reported as the result of touch
241contact. BTN_TOOL_<name> events should be reported where possible.
242
243Trackpads:
244----------
245Legacy trackpads that only provide relative position information must report
246events like mice described above.
247
248Trackpads that provide absolute touch position must report ABS_{X,Y} for the
249location of the touch. BTN_TOUCH should be used to report when a touch is active
250on the trackpad. Where multi-finger support is available, BTN_TOOL_<name> should
251be used to report the number of touches active on the trackpad.
252
253Tablets:
254----------
255BTN_TOOL_<name> events must be reported when a stylus or other tool is active on
256the tablet. ABS_{X,Y} must be reported with the location of the tool. BTN_TOUCH
257should be used to report when the tool is in contact with the tablet.
258BTN_{STYLUS,STYLUS2} should be used to report buttons on the tool itself. Any
259button may be used for buttons on the tablet except BTN_{MOUSE,LEFT}.
260BTN_{0,1,2,etc} are good generic codes for unlabeled buttons. Do not use
261meaningful buttons, like BTN_FORWARD, unless the button is labeled for that
262purpose on the device.
diff --git a/Documentation/md.txt b/Documentation/md.txt
index a81c7b4790f2..2366b1c8cf19 100644
--- a/Documentation/md.txt
+++ b/Documentation/md.txt
@@ -552,6 +552,16 @@ also have
552 within the array where IO will be blocked. This is currently 552 within the array where IO will be blocked. This is currently
553 only supported for raid4/5/6. 553 only supported for raid4/5/6.
554 554
555 sync_min
556 sync_max
557 The two values, given as numbers of sectors, indicate a range
558 withing the array where 'check'/'repair' will operate. Must be
559 a multiple of chunk_size. When it reaches "sync_max" it will
560 pause, rather than complete.
561 You can use 'select' or 'poll' on "sync_completed" to wait for
562 that number to reach sync_max. Then you can either increase
563 "sync_max", or can write 'idle' to "sync_action".
564
555 565
556Each active md device may also have attributes specific to the 566Each active md device may also have attributes specific to the
557personality module that manages it. 567personality module that manages it.
diff --git a/Documentation/sound/alsa/SB-Live-mixer.txt b/Documentation/sound/alsa/SB-Live-mixer.txt
index f5639d40521d..f4b5988f450c 100644
--- a/Documentation/sound/alsa/SB-Live-mixer.txt
+++ b/Documentation/sound/alsa/SB-Live-mixer.txt
@@ -87,14 +87,14 @@ accumulator. ALSA uses accumulators 0 and 1 for left and right PCM.
87The result is forwarded to the ADC capture FIFO (thus to the standard capture 87The result is forwarded to the ADC capture FIFO (thus to the standard capture
88PCM device). 88PCM device).
89 89
90name='Music Playback Volume',index=0 90name='Synth Playback Volume',index=0
91 91
92This control is used to attenuate samples for left and right MIDI FX-bus 92This control is used to attenuate samples for left and right MIDI FX-bus
93accumulators. ALSA uses accumulators 4 and 5 for left and right MIDI samples. 93accumulators. ALSA uses accumulators 4 and 5 for left and right MIDI samples.
94The result samples are forwarded to the front DAC PCM slots of the AC97 codec. 94The result samples are forwarded to the front DAC PCM slots of the AC97 codec.
95 95
96name='Music Capture Volume',index=0 96name='Synth Capture Volume',index=0
97name='Music Capture Switch',index=0 97name='Synth Capture Switch',index=0
98 98
99These controls are used to attenuate samples for left and right MIDI FX-bus 99These controls are used to attenuate samples for left and right MIDI FX-bus
100accumulator. ALSA uses accumulators 4 and 5 for left and right PCM. 100accumulator. ALSA uses accumulators 4 and 5 for left and right PCM.
diff --git a/Documentation/video4linux/sh_mobile_ceu_camera.txt b/Documentation/video4linux/sh_mobile_ceu_camera.txt
index cb47e723af74..1e96ce6e2d2f 100644
--- a/Documentation/video4linux/sh_mobile_ceu_camera.txt
+++ b/Documentation/video4linux/sh_mobile_ceu_camera.txt
@@ -37,7 +37,7 @@ Generic scaling / cropping scheme
37-1'- 37-1'-
38 38
39In the above chart minuses and slashes represent "real" data amounts, points and 39In the above chart minuses and slashes represent "real" data amounts, points and
40accents represent "useful" data, basically, CEU scaled amd cropped output, 40accents represent "useful" data, basically, CEU scaled and cropped output,
41mapped back onto the client's source plane. 41mapped back onto the client's source plane.
42 42
43Such a configuration can be produced by user requests: 43Such a configuration can be produced by user requests:
@@ -65,7 +65,7 @@ Do not touch input rectangle - it is already optimal.
65 65
661. Calculate current sensor scales: 661. Calculate current sensor scales:
67 67
68 scale_s = ((3') - (3)) / ((2') - (2)) 68 scale_s = ((2') - (2)) / ((3') - (3))
69 69
702. Calculate "effective" input crop (sensor subwindow) - CEU crop scaled back at 702. Calculate "effective" input crop (sensor subwindow) - CEU crop scaled back at
71current sensor scales onto input window - this is user S_CROP: 71current sensor scales onto input window - this is user S_CROP:
@@ -80,7 +80,7 @@ window:
804. Calculate sensor output window by applying combined scales to real input 804. Calculate sensor output window by applying combined scales to real input
81window: 81window:
82 82
83 width_s_out = ((2') - (2)) / scale_comb 83 width_s_out = ((7') - (7)) = ((2') - (2)) / scale_comb
84 84
855. Apply iterative sensor S_FMT for sensor output window. 855. Apply iterative sensor S_FMT for sensor output window.
86 86
diff --git a/MAINTAINERS b/MAINTAINERS
index ec3600306289..13803127b68f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -151,6 +151,7 @@ S: Maintained
151F: drivers/net/hamradio/6pack.c 151F: drivers/net/hamradio/6pack.c
152 152
1538169 10/100/1000 GIGABIT ETHERNET DRIVER 1538169 10/100/1000 GIGABIT ETHERNET DRIVER
154M: Realtek linux nic maintainers <nic_swsd@realtek.com>
154M: Francois Romieu <romieu@fr.zoreil.com> 155M: Francois Romieu <romieu@fr.zoreil.com>
155L: netdev@vger.kernel.org 156L: netdev@vger.kernel.org
156S: Maintained 157S: Maintained
@@ -5395,7 +5396,7 @@ F: drivers/media/video/*7146*
5395F: include/media/*7146* 5396F: include/media/*7146*
5396 5397
5397SAMSUNG AUDIO (ASoC) DRIVERS 5398SAMSUNG AUDIO (ASoC) DRIVERS
5398M: Jassi Brar <jassi.brar@samsung.com> 5399M: Jassi Brar <jassisinghbrar@gmail.com>
5399L: alsa-devel@alsa-project.org (moderated for non-subscribers) 5400L: alsa-devel@alsa-project.org (moderated for non-subscribers)
5400S: Supported 5401S: Supported
5401F: sound/soc/samsung 5402F: sound/soc/samsung
diff --git a/Makefile b/Makefile
index 322e7334ccb9..5a7a2e4f5c0b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 39 3SUBLEVEL = 39
4EXTRAVERSION = -rc3 4EXTRAVERSION = -rc5
5NAME = Flesh-Eating Bats with Fangs 5NAME = Flesh-Eating Bats with Fangs
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
index 9bb7b858ed23..7a6d908bb865 100644
--- a/arch/alpha/kernel/Makefile
+++ b/arch/alpha/kernel/Makefile
@@ -4,7 +4,7 @@
4 4
5extra-y := head.o vmlinux.lds 5extra-y := head.o vmlinux.lds
6asflags-y := $(KBUILD_CFLAGS) 6asflags-y := $(KBUILD_CFLAGS)
7ccflags-y := -Werror -Wno-sign-compare 7ccflags-y := -Wno-sign-compare
8 8
9obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ 9obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \
10 irq_alpha.o signal.o setup.o ptrace.o time.o \ 10 irq_alpha.o signal.o setup.o ptrace.o time.o \
diff --git a/arch/alpha/kernel/core_mcpcia.c b/arch/alpha/kernel/core_mcpcia.c
index 381fec0af52e..da7bcc372f16 100644
--- a/arch/alpha/kernel/core_mcpcia.c
+++ b/arch/alpha/kernel/core_mcpcia.c
@@ -88,7 +88,7 @@ conf_read(unsigned long addr, unsigned char type1,
88{ 88{
89 unsigned long flags; 89 unsigned long flags;
90 unsigned long mid = MCPCIA_HOSE2MID(hose->index); 90 unsigned long mid = MCPCIA_HOSE2MID(hose->index);
91 unsigned int stat0, value, temp, cpu; 91 unsigned int stat0, value, cpu;
92 92
93 cpu = smp_processor_id(); 93 cpu = smp_processor_id();
94 94
@@ -101,7 +101,7 @@ conf_read(unsigned long addr, unsigned char type1,
101 stat0 = *(vuip)MCPCIA_CAP_ERR(mid); 101 stat0 = *(vuip)MCPCIA_CAP_ERR(mid);
102 *(vuip)MCPCIA_CAP_ERR(mid) = stat0; 102 *(vuip)MCPCIA_CAP_ERR(mid) = stat0;
103 mb(); 103 mb();
104 temp = *(vuip)MCPCIA_CAP_ERR(mid); 104 *(vuip)MCPCIA_CAP_ERR(mid);
105 DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0)); 105 DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0));
106 106
107 mb(); 107 mb();
@@ -136,7 +136,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1,
136{ 136{
137 unsigned long flags; 137 unsigned long flags;
138 unsigned long mid = MCPCIA_HOSE2MID(hose->index); 138 unsigned long mid = MCPCIA_HOSE2MID(hose->index);
139 unsigned int stat0, temp, cpu; 139 unsigned int stat0, cpu;
140 140
141 cpu = smp_processor_id(); 141 cpu = smp_processor_id();
142 142
@@ -145,7 +145,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1,
145 /* Reset status register to avoid losing errors. */ 145 /* Reset status register to avoid losing errors. */
146 stat0 = *(vuip)MCPCIA_CAP_ERR(mid); 146 stat0 = *(vuip)MCPCIA_CAP_ERR(mid);
147 *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb(); 147 *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb();
148 temp = *(vuip)MCPCIA_CAP_ERR(mid); 148 *(vuip)MCPCIA_CAP_ERR(mid);
149 DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0)); 149 DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0));
150 150
151 draina(); 151 draina();
@@ -157,7 +157,7 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1,
157 *((vuip)addr) = value; 157 *((vuip)addr) = value;
158 mb(); 158 mb();
159 mb(); /* magic */ 159 mb(); /* magic */
160 temp = *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */ 160 *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */
161 mcheck_expected(cpu) = 0; 161 mcheck_expected(cpu) = 0;
162 mb(); 162 mb();
163 163
@@ -572,12 +572,10 @@ mcpcia_print_system_area(unsigned long la_ptr)
572void 572void
573mcpcia_machine_check(unsigned long vector, unsigned long la_ptr) 573mcpcia_machine_check(unsigned long vector, unsigned long la_ptr)
574{ 574{
575 struct el_common *mchk_header;
576 struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout; 575 struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout;
577 unsigned int cpu = smp_processor_id(); 576 unsigned int cpu = smp_processor_id();
578 int expected; 577 int expected;
579 578
580 mchk_header = (struct el_common *)la_ptr;
581 mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr; 579 mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr;
582 expected = mcheck_expected(cpu); 580 expected = mcheck_expected(cpu);
583 581
diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c
index c3b3781a03de..14b26c466c89 100644
--- a/arch/alpha/kernel/err_titan.c
+++ b/arch/alpha/kernel/err_titan.c
@@ -533,8 +533,6 @@ static struct el_subpacket_annotation el_titan_annotations[] = {
533static struct el_subpacket * 533static struct el_subpacket *
534el_process_regatta_subpacket(struct el_subpacket *header) 534el_process_regatta_subpacket(struct el_subpacket *header)
535{ 535{
536 int status;
537
538 if (header->class != EL_CLASS__REGATTA_FAMILY) { 536 if (header->class != EL_CLASS__REGATTA_FAMILY) {
539 printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n", 537 printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n",
540 err_print_prefix, 538 err_print_prefix,
@@ -551,7 +549,7 @@ el_process_regatta_subpacket(struct el_subpacket *header)
551 printk("%s ** Occurred on CPU %d:\n", 549 printk("%s ** Occurred on CPU %d:\n",
552 err_print_prefix, 550 err_print_prefix,
553 (int)header->by_type.regatta_frame.cpuid); 551 (int)header->by_type.regatta_frame.cpuid);
554 status = privateer_process_logout_frame((struct el_common *) 552 privateer_process_logout_frame((struct el_common *)
555 header->by_type.regatta_frame.data_start, 1); 553 header->by_type.regatta_frame.data_start, 1);
556 break; 554 break;
557 default: 555 default:
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index 1479dc6ebd97..51b7fbd9e4c1 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -228,7 +228,7 @@ struct irqaction timer_irqaction = {
228void __init 228void __init
229init_rtc_irq(void) 229init_rtc_irq(void)
230{ 230{
231 irq_set_chip_and_handler_name(RTC_IRQ, &no_irq_chip, 231 irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip,
232 handle_simple_irq, "RTC"); 232 handle_simple_irq, "RTC");
233 setup_irq(RTC_IRQ, &timer_irqaction); 233 setup_irq(RTC_IRQ, &timer_irqaction);
234} 234}
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index d2634e4476b4..edbddcbd5bc6 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -1404,8 +1404,6 @@ determine_cpu_caches (unsigned int cpu_type)
1404 case PCA56_CPU: 1404 case PCA56_CPU:
1405 case PCA57_CPU: 1405 case PCA57_CPU:
1406 { 1406 {
1407 unsigned long cbox_config, size;
1408
1409 if (cpu_type == PCA56_CPU) { 1407 if (cpu_type == PCA56_CPU) {
1410 L1I = CSHAPE(16*1024, 6, 1); 1408 L1I = CSHAPE(16*1024, 6, 1);
1411 L1D = CSHAPE(8*1024, 5, 1); 1409 L1D = CSHAPE(8*1024, 5, 1);
@@ -1415,10 +1413,12 @@ determine_cpu_caches (unsigned int cpu_type)
1415 } 1413 }
1416 L3 = -1; 1414 L3 = -1;
1417 1415
1416#if 0
1417 unsigned long cbox_config, size;
1418
1418 cbox_config = *(vulp) phys_to_virt (0xfffff00008UL); 1419 cbox_config = *(vulp) phys_to_virt (0xfffff00008UL);
1419 size = 512*1024 * (1 << ((cbox_config >> 12) & 3)); 1420 size = 512*1024 * (1 << ((cbox_config >> 12) & 3));
1420 1421
1421#if 0
1422 L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1); 1422 L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1);
1423#else 1423#else
1424 L2 = external_cache_probe(512*1024, 6); 1424 L2 = external_cache_probe(512*1024, 6);
diff --git a/arch/alpha/kernel/smc37c93x.c b/arch/alpha/kernel/smc37c93x.c
index 3e6a2893af9f..6886b834f487 100644
--- a/arch/alpha/kernel/smc37c93x.c
+++ b/arch/alpha/kernel/smc37c93x.c
@@ -79,7 +79,6 @@
79static unsigned long __init SMCConfigState(unsigned long baseAddr) 79static unsigned long __init SMCConfigState(unsigned long baseAddr)
80{ 80{
81 unsigned char devId; 81 unsigned char devId;
82 unsigned char devRev;
83 82
84 unsigned long configPort; 83 unsigned long configPort;
85 unsigned long indexPort; 84 unsigned long indexPort;
@@ -100,7 +99,7 @@ static unsigned long __init SMCConfigState(unsigned long baseAddr)
100 devId = inb(dataPort); 99 devId = inb(dataPort);
101 if (devId == VALID_DEVICE_ID) { 100 if (devId == VALID_DEVICE_ID) {
102 outb(DEVICE_REV, indexPort); 101 outb(DEVICE_REV, indexPort);
103 devRev = inb(dataPort); 102 /* unsigned char devRev = */ inb(dataPort);
104 break; 103 break;
105 } 104 }
106 else 105 else
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index d3cb28bb8eb0..d92cdc715c65 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -156,7 +156,6 @@ static void __init
156wildfire_init_irq_per_pca(int qbbno, int pcano) 156wildfire_init_irq_per_pca(int qbbno, int pcano)
157{ 157{
158 int i, irq_bias; 158 int i, irq_bias;
159 unsigned long io_bias;
160 static struct irqaction isa_enable = { 159 static struct irqaction isa_enable = {
161 .handler = no_action, 160 .handler = no_action,
162 .name = "isa_enable", 161 .name = "isa_enable",
@@ -165,10 +164,12 @@ wildfire_init_irq_per_pca(int qbbno, int pcano)
165 irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA) 164 irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA)
166 + pcano * WILDFIRE_IRQ_PER_PCA; 165 + pcano * WILDFIRE_IRQ_PER_PCA;
167 166
167#if 0
168 unsigned long io_bias;
169
168 /* Only need the following for first PCI bus per PCA. */ 170 /* Only need the following for first PCI bus per PCA. */
169 io_bias = WILDFIRE_IO(qbbno, pcano<<1) - WILDFIRE_IO_BIAS; 171 io_bias = WILDFIRE_IO(qbbno, pcano<<1) - WILDFIRE_IO_BIAS;
170 172
171#if 0
172 outb(0, DMA1_RESET_REG + io_bias); 173 outb(0, DMA1_RESET_REG + io_bias);
173 outb(0, DMA2_RESET_REG + io_bias); 174 outb(0, DMA2_RESET_REG + io_bias);
174 outb(DMA_MODE_CASCADE, DMA2_MODE_REG + io_bias); 175 outb(DMA_MODE_CASCADE, DMA2_MODE_REG + io_bias);
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index a58e84f1a63b..918e8e0b72ff 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -153,6 +153,7 @@ void read_persistent_clock(struct timespec *ts)
153 year += 100; 153 year += 100;
154 154
155 ts->tv_sec = mktime(year, mon, day, hour, min, sec); 155 ts->tv_sec = mktime(year, mon, day, hour, min, sec);
156 ts->tv_nsec = 0;
156} 157}
157 158
158 159
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index fdc9d4dbf85b..377a7a595b08 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1540,7 +1540,6 @@ config HIGHMEM
1540config HIGHPTE 1540config HIGHPTE
1541 bool "Allocate 2nd-level pagetables from highmem" 1541 bool "Allocate 2nd-level pagetables from highmem"
1542 depends on HIGHMEM 1542 depends on HIGHMEM
1543 depends on !OUTER_CACHE
1544 1543
1545config HW_PERF_EVENTS 1544config HW_PERF_EVENTS
1546 bool "Enable hardware performance counter support for perf events" 1545 bool "Enable hardware performance counter support for perf events"
@@ -2012,6 +2011,8 @@ source "kernel/power/Kconfig"
2012 2011
2013config ARCH_SUSPEND_POSSIBLE 2012config ARCH_SUSPEND_POSSIBLE
2014 depends on !ARCH_S5P64X0 && !ARCH_S5P6442 2013 depends on !ARCH_S5P64X0 && !ARCH_S5P6442
2014 depends on CPU_ARM920T || CPU_ARM926T || CPU_SA1100 || \
2015 CPU_V6 || CPU_V6K || CPU_V7 || CPU_XSC3 || CPU_XSCALE
2015 def_bool y 2016 def_bool y
2016 2017
2017endmenu 2018endmenu
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 494224a9b459..03d01d783e3b 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -63,17 +63,6 @@ config DEBUG_USER
63 8 - SIGSEGV faults 63 8 - SIGSEGV faults
64 16 - SIGBUS faults 64 16 - SIGBUS faults
65 65
66config DEBUG_ERRORS
67 bool "Verbose kernel error messages"
68 depends on DEBUG_KERNEL
69 help
70 This option controls verbose debugging information which can be
71 printed when the kernel detects an internal error. This debugging
72 information is useful to kernel hackers when tracking down problems,
73 but mostly meaningless to other people. It's safe to say Y unless
74 you are concerned with the code size or don't want to see these
75 messages.
76
77config DEBUG_STACK_USAGE 66config DEBUG_STACK_USAGE
78 bool "Enable stack utilization instrumentation" 67 bool "Enable stack utilization instrumentation"
79 depends on DEBUG_KERNEL 68 depends on DEBUG_KERNEL
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index e7521bca2c35..6ea9b6f3607a 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -16,5 +16,4 @@ obj-$(CONFIG_SHARP_SCOOP) += scoop.o
16obj-$(CONFIG_ARCH_IXP2000) += uengine.o 16obj-$(CONFIG_ARCH_IXP2000) += uengine.o
17obj-$(CONFIG_ARCH_IXP23XX) += uengine.o 17obj-$(CONFIG_ARCH_IXP23XX) += uengine.o
18obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o 18obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
19obj-$(CONFIG_COMMON_CLKDEV) += clkdev.o
20obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o 19obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index ed5bc9e05a4e..cd4458f64171 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -2,6 +2,7 @@
2#define __ASM_ARM_CPUTYPE_H 2#define __ASM_ARM_CPUTYPE_H
3 3
4#include <linux/stringify.h> 4#include <linux/stringify.h>
5#include <linux/kernel.h>
5 6
6#define CPUID_ID 0 7#define CPUID_ID 0
7#define CPUID_CACHETYPE 1 8#define CPUID_CACHETYPE 1
diff --git a/arch/arm/include/asm/thread_notify.h b/arch/arm/include/asm/thread_notify.h
index c4391ba20350..1dc980675894 100644
--- a/arch/arm/include/asm/thread_notify.h
+++ b/arch/arm/include/asm/thread_notify.h
@@ -43,6 +43,7 @@ static inline void thread_notify(unsigned long rc, struct thread_info *thread)
43#define THREAD_NOTIFY_FLUSH 0 43#define THREAD_NOTIFY_FLUSH 0
44#define THREAD_NOTIFY_EXIT 1 44#define THREAD_NOTIFY_EXIT 1
45#define THREAD_NOTIFY_SWITCH 2 45#define THREAD_NOTIFY_SWITCH 2
46#define THREAD_NOTIFY_COPY 3
46 47
47#endif 48#endif
48#endif 49#endif
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index c891eb76c0e3..87dbe3e21970 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -396,6 +396,10 @@
396#define __NR_fanotify_init (__NR_SYSCALL_BASE+367) 396#define __NR_fanotify_init (__NR_SYSCALL_BASE+367)
397#define __NR_fanotify_mark (__NR_SYSCALL_BASE+368) 397#define __NR_fanotify_mark (__NR_SYSCALL_BASE+368)
398#define __NR_prlimit64 (__NR_SYSCALL_BASE+369) 398#define __NR_prlimit64 (__NR_SYSCALL_BASE+369)
399#define __NR_name_to_handle_at (__NR_SYSCALL_BASE+370)
400#define __NR_open_by_handle_at (__NR_SYSCALL_BASE+371)
401#define __NR_clock_adjtime (__NR_SYSCALL_BASE+372)
402#define __NR_syncfs (__NR_SYSCALL_BASE+373)
399 403
400/* 404/*
401 * The following SWIs are ARM private. 405 * The following SWIs are ARM private.
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 74554f1742d7..8d95446150a3 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -29,7 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o
29obj-$(CONFIG_ARTHUR) += arthur.o 29obj-$(CONFIG_ARTHUR) += arthur.o
30obj-$(CONFIG_ISA_DMA) += dma-isa.o 30obj-$(CONFIG_ISA_DMA) += dma-isa.o
31obj-$(CONFIG_PCI) += bios32.o isa.o 31obj-$(CONFIG_PCI) += bios32.o isa.o
32obj-$(CONFIG_PM) += sleep.o 32obj-$(CONFIG_PM_SLEEP) += sleep.o
33obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o 33obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
34obj-$(CONFIG_SMP) += smp.o smp_tlb.o 34obj-$(CONFIG_SMP) += smp.o smp_tlb.o
35obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o 35obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 5c26eccef998..7fbf28c35bb2 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -379,6 +379,10 @@
379 CALL(sys_fanotify_init) 379 CALL(sys_fanotify_init)
380 CALL(sys_fanotify_mark) 380 CALL(sys_fanotify_mark)
381 CALL(sys_prlimit64) 381 CALL(sys_prlimit64)
382/* 370 */ CALL(sys_name_to_handle_at)
383 CALL(sys_open_by_handle_at)
384 CALL(sys_clock_adjtime)
385 CALL(sys_syncfs)
382#ifndef syscalls_counted 386#ifndef syscalls_counted
383.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 387.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
384#define syscalls_counted 388#define syscalls_counted
diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c
index d4a0da1e48f4..9b05c6a0dcea 100644
--- a/arch/arm/kernel/elf.c
+++ b/arch/arm/kernel/elf.c
@@ -40,15 +40,22 @@ EXPORT_SYMBOL(elf_check_arch);
40void elf_set_personality(const struct elf32_hdr *x) 40void elf_set_personality(const struct elf32_hdr *x)
41{ 41{
42 unsigned int eflags = x->e_flags; 42 unsigned int eflags = x->e_flags;
43 unsigned int personality = PER_LINUX_32BIT; 43 unsigned int personality = current->personality & ~PER_MASK;
44
45 /*
46 * We only support Linux ELF executables, so always set the
47 * personality to LINUX.
48 */
49 personality |= PER_LINUX;
44 50
45 /* 51 /*
46 * APCS-26 is only valid for OABI executables 52 * APCS-26 is only valid for OABI executables
47 */ 53 */
48 if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) { 54 if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN &&
49 if (eflags & EF_ARM_APCS_26) 55 (eflags & EF_ARM_APCS_26))
50 personality = PER_LINUX; 56 personality &= ~ADDR_LIMIT_32BIT;
51 } 57 else
58 personality |= ADDR_LIMIT_32BIT;
52 59
53 set_personality(personality); 60 set_personality(personality);
54 61
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 8dbc126f7152..87acc25d7a3e 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -868,6 +868,13 @@ static void reset_ctrl_regs(void *info)
868 */ 868 */
869 asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); 869 asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0));
870 isb(); 870 isb();
871
872 /*
873 * Clear any configured vector-catch events before
874 * enabling monitor mode.
875 */
876 asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0));
877 isb();
871 } 878 }
872 879
873 if (enable_monitor_mode()) 880 if (enable_monitor_mode())
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 69cfee0fe00f..979da3947f42 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -221,7 +221,7 @@ again:
221 prev_raw_count &= armpmu->max_period; 221 prev_raw_count &= armpmu->max_period;
222 222
223 if (overflow) 223 if (overflow)
224 delta = armpmu->max_period - prev_raw_count + new_raw_count; 224 delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
225 else 225 else
226 delta = new_raw_count - prev_raw_count; 226 delta = new_raw_count - prev_raw_count;
227 227
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 94bbedbed639..5e1e54197227 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -372,6 +372,8 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
372 if (clone_flags & CLONE_SETTLS) 372 if (clone_flags & CLONE_SETTLS)
373 thread->tp_value = regs->ARM_r3; 373 thread->tp_value = regs->ARM_r3;
374 374
375 thread_notify(THREAD_NOTIFY_COPY, thread);
376
375 return 0; 377 return 0;
376} 378}
377 379
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index f0000e188c8c..3b54ad19d489 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -410,8 +410,7 @@ static int bad_syscall(int n, struct pt_regs *regs)
410 struct thread_info *thread = current_thread_info(); 410 struct thread_info *thread = current_thread_info();
411 siginfo_t info; 411 siginfo_t info;
412 412
413 if (current->personality != PER_LINUX && 413 if ((current->personality & PER_MASK) != PER_LINUX &&
414 current->personality != PER_LINUX_32BIT &&
415 thread->exec_domain->handler) { 414 thread->exec_domain->handler) {
416 thread->exec_domain->handler(n, regs); 415 thread->exec_domain->handler(n, regs);
417 return regs->ARM_r0; 416 return regs->ARM_r0;
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index f68012239641..a3a94e9c9378 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -314,7 +314,7 @@ static struct clk timer2_clk = {
314 .name = "timer2", 314 .name = "timer2",
315 .parent = &pll1_aux_clk, 315 .parent = &pll1_aux_clk,
316 .lpsc = DAVINCI_LPSC_TIMER2, 316 .lpsc = DAVINCI_LPSC_TIMER2,
317 .usecount = 1, /* REVISIT: why can't' this be disabled? */ 317 .usecount = 1, /* REVISIT: why can't this be disabled? */
318}; 318};
319 319
320static struct clk timer3_clk = { 320static struct clk timer3_clk = {
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 5f8a65424184..4c82c2716293 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -274,7 +274,7 @@ static struct clk timer2_clk = {
274 .name = "timer2", 274 .name = "timer2",
275 .parent = &pll1_aux_clk, 275 .parent = &pll1_aux_clk,
276 .lpsc = DAVINCI_LPSC_TIMER2, 276 .lpsc = DAVINCI_LPSC_TIMER2,
277 .usecount = 1, /* REVISIT: why can't' this be disabled? */ 277 .usecount = 1, /* REVISIT: why can't this be disabled? */
278}; 278};
279 279
280static struct clk_lookup dm644x_clks[] = { 280static struct clk_lookup dm644x_clks[] = {
diff --git a/arch/arm/mach-mmp/include/mach/gpio.h b/arch/arm/mach-mmp/include/mach/gpio.h
index ee8b02ed8011..7bfb827f3fe3 100644
--- a/arch/arm/mach-mmp/include/mach/gpio.h
+++ b/arch/arm/mach-mmp/include/mach/gpio.h
@@ -10,7 +10,7 @@
10#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2)) 10#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
11#define GPIO_REG(x) (*((volatile u32 *)(GPIO_REGS_VIRT + (x)))) 11#define GPIO_REG(x) (*((volatile u32 *)(GPIO_REGS_VIRT + (x))))
12 12
13#define NR_BUILTIN_GPIO (192) 13#define NR_BUILTIN_GPIO IRQ_GPIO_NUM
14 14
15#define gpio_to_bank(gpio) ((gpio) >> 5) 15#define gpio_to_bank(gpio) ((gpio) >> 5)
16#define gpio_to_irq(gpio) (IRQ_GPIO_START + (gpio)) 16#define gpio_to_irq(gpio) (IRQ_GPIO_START + (gpio))
diff --git a/arch/arm/mach-mmp/include/mach/mfp-pxa168.h b/arch/arm/mach-mmp/include/mach/mfp-pxa168.h
index 4621067c7720..713be155a44d 100644
--- a/arch/arm/mach-mmp/include/mach/mfp-pxa168.h
+++ b/arch/arm/mach-mmp/include/mach/mfp-pxa168.h
@@ -8,6 +8,15 @@
8#define MFP_DRIVE_MEDIUM (0x2 << 13) 8#define MFP_DRIVE_MEDIUM (0x2 << 13)
9#define MFP_DRIVE_FAST (0x3 << 13) 9#define MFP_DRIVE_FAST (0x3 << 13)
10 10
11#undef MFP_CFG
12#undef MFP_CFG_DRV
13
14#define MFP_CFG(pin, af) \
15 (MFP_LPM_INPUT | MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_DRIVE_MEDIUM)
16
17#define MFP_CFG_DRV(pin, af, drv) \
18 (MFP_LPM_INPUT | MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_DRIVE_##drv)
19
11/* GPIO */ 20/* GPIO */
12#define GPIO0_GPIO MFP_CFG(GPIO0, AF5) 21#define GPIO0_GPIO MFP_CFG(GPIO0, AF5)
13#define GPIO1_GPIO MFP_CFG(GPIO1, AF5) 22#define GPIO1_GPIO MFP_CFG(GPIO1, AF5)
diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c
index 7f568611547e..6a96911b0ad5 100644
--- a/arch/arm/mach-msm/board-qsd8x50.c
+++ b/arch/arm/mach-msm/board-qsd8x50.c
@@ -160,10 +160,7 @@ static struct msm_mmc_platform_data qsd8x50_sdc1_data = {
160 160
161static void __init qsd8x50_init_mmc(void) 161static void __init qsd8x50_init_mmc(void)
162{ 162{
163 if (machine_is_qsd8x50_ffa() || machine_is_qsd8x50a_ffa()) 163 vreg_mmc = vreg_get(NULL, "gp5");
164 vreg_mmc = vreg_get(NULL, "gp6");
165 else
166 vreg_mmc = vreg_get(NULL, "gp5");
167 164
168 if (IS_ERR(vreg_mmc)) { 165 if (IS_ERR(vreg_mmc)) {
169 pr_err("vreg get for vreg_mmc failed (%ld)\n", 166 pr_err("vreg get for vreg_mmc failed (%ld)\n",
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 56f920c55b6a..38b95e949d13 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -269,7 +269,7 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt)
269 269
270 /* Use existing clock_event for cpu 0 */ 270 /* Use existing clock_event for cpu 0 */
271 if (!smp_processor_id()) 271 if (!smp_processor_id())
272 return; 272 return 0;
273 273
274 writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL); 274 writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
275 275
diff --git a/arch/arm/mach-pxa/include/mach/gpio.h b/arch/arm/mach-pxa/include/mach/gpio.h
index b024a8b37439..c4639502efca 100644
--- a/arch/arm/mach-pxa/include/mach/gpio.h
+++ b/arch/arm/mach-pxa/include/mach/gpio.h
@@ -99,11 +99,24 @@
99#define GAFR(x) GPIO_REG(0x54 + (((x) & 0x70) >> 2)) 99#define GAFR(x) GPIO_REG(0x54 + (((x) & 0x70) >> 2))
100 100
101 101
102#define NR_BUILTIN_GPIO 128 102#define NR_BUILTIN_GPIO PXA_GPIO_IRQ_NUM
103 103
104#define gpio_to_bank(gpio) ((gpio) >> 5) 104#define gpio_to_bank(gpio) ((gpio) >> 5)
105#define gpio_to_irq(gpio) IRQ_GPIO(gpio) 105#define gpio_to_irq(gpio) IRQ_GPIO(gpio)
106#define irq_to_gpio(irq) IRQ_TO_GPIO(irq) 106
107static inline int irq_to_gpio(unsigned int irq)
108{
109 int gpio;
110
111 if (irq == IRQ_GPIO0 || irq == IRQ_GPIO1)
112 return irq - IRQ_GPIO0;
113
114 gpio = irq - PXA_GPIO_IRQ_BASE;
115 if (gpio >= 2 && gpio < NR_BUILTIN_GPIO)
116 return gpio;
117
118 return -1;
119}
107 120
108#ifdef CONFIG_CPU_PXA26x 121#ifdef CONFIG_CPU_PXA26x
109/* GPIO86/87/88/89 on PXA26x have their direction bits in GPDR2 inverted, 122/* GPIO86/87/88/89 on PXA26x have their direction bits in GPDR2 inverted,
diff --git a/arch/arm/mach-pxa/include/mach/irqs.h b/arch/arm/mach-pxa/include/mach/irqs.h
index a4285fc00878..038402404e39 100644
--- a/arch/arm/mach-pxa/include/mach/irqs.h
+++ b/arch/arm/mach-pxa/include/mach/irqs.h
@@ -93,9 +93,6 @@
93#define GPIO_2_x_TO_IRQ(x) (PXA_GPIO_IRQ_BASE + (x)) 93#define GPIO_2_x_TO_IRQ(x) (PXA_GPIO_IRQ_BASE + (x))
94#define IRQ_GPIO(x) (((x) < 2) ? (IRQ_GPIO0 + (x)) : GPIO_2_x_TO_IRQ(x)) 94#define IRQ_GPIO(x) (((x) < 2) ? (IRQ_GPIO0 + (x)) : GPIO_2_x_TO_IRQ(x))
95 95
96#define IRQ_TO_GPIO_2_x(i) ((i) - PXA_GPIO_IRQ_BASE)
97#define IRQ_TO_GPIO(i) (((i) < IRQ_GPIO(2)) ? ((i) - IRQ_GPIO0) : IRQ_TO_GPIO_2_x(i))
98
99/* 96/*
100 * The following interrupts are for board specific purposes. Since 97 * The following interrupts are for board specific purposes. Since
101 * the kernel can only run on one machine at a time, we can re-use 98 * the kernel can only run on one machine at a time, we can re-use
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index 6bde5956358d..a4af8c52d7ee 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -285,7 +285,7 @@ static inline void pxa25x_init_pm(void) {}
285 285
286static int pxa25x_set_wake(struct irq_data *d, unsigned int on) 286static int pxa25x_set_wake(struct irq_data *d, unsigned int on)
287{ 287{
288 int gpio = IRQ_TO_GPIO(d->irq); 288 int gpio = irq_to_gpio(d->irq);
289 uint32_t mask = 0; 289 uint32_t mask = 0;
290 290
291 if (gpio >= 0 && gpio < 85) 291 if (gpio >= 0 && gpio < 85)
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index 1cb5d0f9723f..909756eaf4b7 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -345,7 +345,7 @@ static inline void pxa27x_init_pm(void) {}
345 */ 345 */
346static int pxa27x_set_wake(struct irq_data *d, unsigned int on) 346static int pxa27x_set_wake(struct irq_data *d, unsigned int on)
347{ 347{
348 int gpio = IRQ_TO_GPIO(d->irq); 348 int gpio = irq_to_gpio(d->irq);
349 uint32_t mask; 349 uint32_t mask;
350 350
351 if (gpio >= 0 && gpio < 128) 351 if (gpio >= 0 && gpio < 128)
diff --git a/arch/arm/mach-s3c2440/mach-gta02.c b/arch/arm/mach-s3c2440/mach-gta02.c
index 0db2411ef4bb..716662008ce2 100644
--- a/arch/arm/mach-s3c2440/mach-gta02.c
+++ b/arch/arm/mach-s3c2440/mach-gta02.c
@@ -409,6 +409,10 @@ struct platform_device s3c24xx_pwm_device = {
409 .num_resources = 0, 409 .num_resources = 0,
410}; 410};
411 411
412static struct platform_device gta02_dfbmcs320_device = {
413 .name = "dfbmcs320",
414};
415
412static struct i2c_board_info gta02_i2c_devs[] __initdata = { 416static struct i2c_board_info gta02_i2c_devs[] __initdata = {
413 { 417 {
414 I2C_BOARD_INFO("pcf50633", 0x73), 418 I2C_BOARD_INFO("pcf50633", 0x73),
@@ -523,6 +527,7 @@ static struct platform_device *gta02_devices[] __initdata = {
523 &s3c_device_iis, 527 &s3c_device_iis,
524 &samsung_asoc_dma, 528 &samsung_asoc_dma,
525 &s3c_device_i2c0, 529 &s3c_device_i2c0,
530 &gta02_dfbmcs320_device,
526 &gta02_buttons_device, 531 &gta02_buttons_device,
527 &s3c_device_adc, 532 &s3c_device_adc,
528 &s3c_device_ts, 533 &s3c_device_ts,
diff --git a/arch/arm/mach-tegra/gpio.c b/arch/arm/mach-tegra/gpio.c
index 76a3f654220f..65a1aba6823d 100644
--- a/arch/arm/mach-tegra/gpio.c
+++ b/arch/arm/mach-tegra/gpio.c
@@ -257,7 +257,8 @@ static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
257void tegra_gpio_resume(void) 257void tegra_gpio_resume(void)
258{ 258{
259 unsigned long flags; 259 unsigned long flags;
260 int b, p, i; 260 int b;
261 int p;
261 262
262 local_irq_save(flags); 263 local_irq_save(flags);
263 264
@@ -280,7 +281,8 @@ void tegra_gpio_resume(void)
280void tegra_gpio_suspend(void) 281void tegra_gpio_suspend(void)
281{ 282{
282 unsigned long flags; 283 unsigned long flags;
283 int b, p, i; 284 int b;
285 int p;
284 286
285 local_irq_save(flags); 287 local_irq_save(flags);
286 for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) { 288 for (b = 0; b < ARRAY_SIZE(tegra_gpio_banks); b++) {
diff --git a/arch/arm/mach-tegra/tegra2_clocks.c b/arch/arm/mach-tegra/tegra2_clocks.c
index 6d7c4eea4dcb..4459470c052d 100644
--- a/arch/arm/mach-tegra/tegra2_clocks.c
+++ b/arch/arm/mach-tegra/tegra2_clocks.c
@@ -1362,14 +1362,15 @@ static int tegra_clk_shared_bus_set_rate(struct clk *c, unsigned long rate)
1362{ 1362{
1363 unsigned long flags; 1363 unsigned long flags;
1364 int ret; 1364 int ret;
1365 long new_rate = rate;
1365 1366
1366 rate = clk_round_rate(c->parent, rate); 1367 new_rate = clk_round_rate(c->parent, new_rate);
1367 if (rate < 0) 1368 if (new_rate < 0)
1368 return rate; 1369 return new_rate;
1369 1370
1370 spin_lock_irqsave(&c->parent->spinlock, flags); 1371 spin_lock_irqsave(&c->parent->spinlock, flags);
1371 1372
1372 c->u.shared_bus_user.rate = rate; 1373 c->u.shared_bus_user.rate = new_rate;
1373 ret = tegra_clk_shared_bus_update(c->parent); 1374 ret = tegra_clk_shared_bus_update(c->parent);
1374 1375
1375 spin_unlock_irqrestore(&c->parent->spinlock, flags); 1376 spin_unlock_irqrestore(&c->parent->spinlock, flags);
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index af913741e6ec..6e1907fa94f0 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -178,16 +178,15 @@ static struct i2c_board_info __initdata mop500_i2c0_devices[] = {
178 .irq = NOMADIK_GPIO_TO_IRQ(217), 178 .irq = NOMADIK_GPIO_TO_IRQ(217),
179 .platform_data = &mop500_tc35892_data, 179 .platform_data = &mop500_tc35892_data,
180 }, 180 },
181}; 181 /* I2C0 devices only available prior to HREFv60 */
182
183/* I2C0 devices only available prior to HREFv60 */
184static struct i2c_board_info __initdata mop500_i2c0_old_devices[] = {
185 { 182 {
186 I2C_BOARD_INFO("tps61052", 0x33), 183 I2C_BOARD_INFO("tps61052", 0x33),
187 .platform_data = &mop500_tps61052_data, 184 .platform_data = &mop500_tps61052_data,
188 }, 185 },
189}; 186};
190 187
188#define NUM_PRE_V60_I2C0_DEVICES 1
189
191static struct i2c_board_info __initdata mop500_i2c2_devices[] = { 190static struct i2c_board_info __initdata mop500_i2c2_devices[] = {
192 { 191 {
193 /* lp5521 LED driver, 1st device */ 192 /* lp5521 LED driver, 1st device */
@@ -425,6 +424,8 @@ static void __init mop500_uart_init(void)
425 424
426static void __init mop500_init_machine(void) 425static void __init mop500_init_machine(void)
427{ 426{
427 int i2c0_devs;
428
428 /* 429 /*
429 * The HREFv60 board removed a GPIO expander and routed 430 * The HREFv60 board removed a GPIO expander and routed
430 * all these GPIO pins to the internal GPIO controller 431 * all these GPIO pins to the internal GPIO controller
@@ -448,11 +449,11 @@ static void __init mop500_init_machine(void)
448 449
449 platform_device_register(&ab8500_device); 450 platform_device_register(&ab8500_device);
450 451
451 i2c_register_board_info(0, mop500_i2c0_devices, 452 i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
452 ARRAY_SIZE(mop500_i2c0_devices)); 453 if (machine_is_hrefv60())
453 if (!machine_is_hrefv60()) 454 i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES;
454 i2c_register_board_info(0, mop500_i2c0_old_devices, 455
455 ARRAY_SIZE(mop500_i2c0_old_devices)); 456 i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
456 i2c_register_board_info(2, mop500_i2c2_devices, 457 i2c_register_board_info(2, mop500_i2c2_devices,
457 ARRAY_SIZE(mop500_i2c2_devices)); 458 ARRAY_SIZE(mop500_i2c2_devices));
458} 459}
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index afe209e1e1f8..74be05f3e03a 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -7,6 +7,7 @@
7#include <linux/shm.h> 7#include <linux/shm.h>
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/personality.h>
10#include <linux/random.h> 11#include <linux/random.h>
11#include <asm/cputype.h> 12#include <asm/cputype.h>
12#include <asm/system.h> 13#include <asm/system.h>
@@ -82,7 +83,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
82 mm->cached_hole_size = 0; 83 mm->cached_hole_size = 0;
83 } 84 }
84 /* 8 bits of randomness in 20 address space bits */ 85 /* 8 bits of randomness in 20 address space bits */
85 if (current->flags & PF_RANDOMIZE) 86 if ((current->flags & PF_RANDOMIZE) &&
87 !(current->personality & ADDR_NO_RANDOMIZE))
86 addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT; 88 addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT;
87 89
88full_search: 90full_search:
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index b46eb21f05c7..bf8a1d1cccb6 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -390,7 +390,7 @@ ENTRY(cpu_arm920_set_pte_ext)
390/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 390/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
391.globl cpu_arm920_suspend_size 391.globl cpu_arm920_suspend_size
392.equ cpu_arm920_suspend_size, 4 * 3 392.equ cpu_arm920_suspend_size, 4 * 3
393#ifdef CONFIG_PM 393#ifdef CONFIG_PM_SLEEP
394ENTRY(cpu_arm920_do_suspend) 394ENTRY(cpu_arm920_do_suspend)
395 stmfd sp!, {r4 - r7, lr} 395 stmfd sp!, {r4 - r7, lr}
396 mrc p15, 0, r4, c13, c0, 0 @ PID 396 mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 6a4bdb2c94a7..0ed85d930c09 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -404,7 +404,7 @@ ENTRY(cpu_arm926_set_pte_ext)
404/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 404/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
405.globl cpu_arm926_suspend_size 405.globl cpu_arm926_suspend_size
406.equ cpu_arm926_suspend_size, 4 * 3 406.equ cpu_arm926_suspend_size, 4 * 3
407#ifdef CONFIG_PM 407#ifdef CONFIG_PM_SLEEP
408ENTRY(cpu_arm926_do_suspend) 408ENTRY(cpu_arm926_do_suspend)
409 stmfd sp!, {r4 - r7, lr} 409 stmfd sp!, {r4 - r7, lr}
410 mrc p15, 0, r4, c13, c0, 0 @ PID 410 mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 74483d1977fe..184a9c997e36 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -171,7 +171,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
171 171
172.globl cpu_sa1100_suspend_size 172.globl cpu_sa1100_suspend_size
173.equ cpu_sa1100_suspend_size, 4*4 173.equ cpu_sa1100_suspend_size, 4*4
174#ifdef CONFIG_PM 174#ifdef CONFIG_PM_SLEEP
175ENTRY(cpu_sa1100_do_suspend) 175ENTRY(cpu_sa1100_do_suspend)
176 stmfd sp!, {r4 - r7, lr} 176 stmfd sp!, {r4 - r7, lr}
177 mrc p15, 0, r4, c3, c0, 0 @ domain ID 177 mrc p15, 0, r4, c3, c0, 0 @ domain ID
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index bfa0c9f611c5..7c99cb4c8e4f 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -124,7 +124,7 @@ ENTRY(cpu_v6_set_pte_ext)
124/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ 124/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
125.globl cpu_v6_suspend_size 125.globl cpu_v6_suspend_size
126.equ cpu_v6_suspend_size, 4 * 8 126.equ cpu_v6_suspend_size, 4 * 8
127#ifdef CONFIG_PM 127#ifdef CONFIG_PM_SLEEP
128ENTRY(cpu_v6_do_suspend) 128ENTRY(cpu_v6_do_suspend)
129 stmfd sp!, {r4 - r11, lr} 129 stmfd sp!, {r4 - r11, lr}
130 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID 130 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index c35618e42f6f..babfba09c89f 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -211,7 +211,7 @@ cpu_v7_name:
211/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ 211/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
212.globl cpu_v7_suspend_size 212.globl cpu_v7_suspend_size
213.equ cpu_v7_suspend_size, 4 * 8 213.equ cpu_v7_suspend_size, 4 * 8
214#ifdef CONFIG_PM 214#ifdef CONFIG_PM_SLEEP
215ENTRY(cpu_v7_do_suspend) 215ENTRY(cpu_v7_do_suspend)
216 stmfd sp!, {r4 - r11, lr} 216 stmfd sp!, {r4 - r11, lr}
217 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID 217 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 63d8b2044e84..596213699f37 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -417,7 +417,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
417 417
418.globl cpu_xsc3_suspend_size 418.globl cpu_xsc3_suspend_size
419.equ cpu_xsc3_suspend_size, 4 * 8 419.equ cpu_xsc3_suspend_size, 4 * 8
420#ifdef CONFIG_PM 420#ifdef CONFIG_PM_SLEEP
421ENTRY(cpu_xsc3_do_suspend) 421ENTRY(cpu_xsc3_do_suspend)
422 stmfd sp!, {r4 - r10, lr} 422 stmfd sp!, {r4 - r10, lr}
423 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 423 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 086038cd86ab..ce233bcbf506 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -518,7 +518,7 @@ ENTRY(cpu_xscale_set_pte_ext)
518 518
519.globl cpu_xscale_suspend_size 519.globl cpu_xscale_suspend_size
520.equ cpu_xscale_suspend_size, 4 * 7 520.equ cpu_xscale_suspend_size, 4 * 7
521#ifdef CONFIG_PM 521#ifdef CONFIG_PM_SLEEP
522ENTRY(cpu_xscale_do_suspend) 522ENTRY(cpu_xscale_do_suspend)
523 stmfd sp!, {r4 - r10, lr} 523 stmfd sp!, {r4 - r10, lr}
524 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode 524 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/plat-s5p/pm.c b/arch/arm/plat-s5p/pm.c
index d592b6304b48..d15dc47b0e3d 100644
--- a/arch/arm/plat-s5p/pm.c
+++ b/arch/arm/plat-s5p/pm.c
@@ -19,17 +19,6 @@
19 19
20#define PFX "s5p pm: " 20#define PFX "s5p pm: "
21 21
22/* s3c_pm_check_resume_pin
23 *
24 * check to see if the pin is configured correctly for sleep mode, and
25 * make any necessary adjustments if it is not
26*/
27
28static void s3c_pm_check_resume_pin(unsigned int pin, unsigned int irqoffs)
29{
30 /* nothing here yet */
31}
32
33/* s3c_pm_configure_extint 22/* s3c_pm_configure_extint
34 * 23 *
35 * configure all external interrupt pins 24 * configure all external interrupt pins
diff --git a/arch/arm/plat-samsung/pm-check.c b/arch/arm/plat-samsung/pm-check.c
index e4baf76f374a..6b733fafe7cd 100644
--- a/arch/arm/plat-samsung/pm-check.c
+++ b/arch/arm/plat-samsung/pm-check.c
@@ -164,7 +164,6 @@ static inline int in_region(void *ptr, int size, void *what, size_t whatsz)
164 */ 164 */
165static u32 *s3c_pm_runcheck(struct resource *res, u32 *val) 165static u32 *s3c_pm_runcheck(struct resource *res, u32 *val)
166{ 166{
167 void *save_at = phys_to_virt(s3c_sleep_save_phys);
168 unsigned long addr; 167 unsigned long addr;
169 unsigned long left; 168 unsigned long left;
170 void *stkpage; 169 void *stkpage;
@@ -192,11 +191,6 @@ static u32 *s3c_pm_runcheck(struct resource *res, u32 *val)
192 goto skip_check; 191 goto skip_check;
193 } 192 }
194 193
195 if (in_region(ptr, left, save_at, 32*4 )) {
196 S3C_PMDBG("skipping %08lx, has save block in\n", addr);
197 goto skip_check;
198 }
199
200 /* calculate and check the checksum */ 194 /* calculate and check the checksum */
201 195
202 calc = crc32_le(~0, ptr, left); 196 calc = crc32_le(~0, ptr, left);
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index d5b58d31903c..5c0a440d6e16 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -214,8 +214,9 @@ void s3c_pm_do_restore_core(struct sleep_save *ptr, int count)
214 * 214 *
215 * print any IRQs asserted at resume time (ie, we woke from) 215 * print any IRQs asserted at resume time (ie, we woke from)
216*/ 216*/
217static void s3c_pm_show_resume_irqs(int start, unsigned long which, 217static void __maybe_unused s3c_pm_show_resume_irqs(int start,
218 unsigned long mask) 218 unsigned long which,
219 unsigned long mask)
219{ 220{
220 int i; 221 int i;
221 222
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index bbf3da012afd..f74695075e64 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -78,6 +78,14 @@ static void vfp_thread_exit(struct thread_info *thread)
78 put_cpu(); 78 put_cpu();
79} 79}
80 80
81static void vfp_thread_copy(struct thread_info *thread)
82{
83 struct thread_info *parent = current_thread_info();
84
85 vfp_sync_hwstate(parent);
86 thread->vfpstate = parent->vfpstate;
87}
88
81/* 89/*
82 * When this function is called with the following 'cmd's, the following 90 * When this function is called with the following 'cmd's, the following
83 * is true while this function is being run: 91 * is true while this function is being run:
@@ -104,12 +112,17 @@ static void vfp_thread_exit(struct thread_info *thread)
104static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) 112static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
105{ 113{
106 struct thread_info *thread = v; 114 struct thread_info *thread = v;
115 u32 fpexc;
116#ifdef CONFIG_SMP
117 unsigned int cpu;
118#endif
107 119
108 if (likely(cmd == THREAD_NOTIFY_SWITCH)) { 120 switch (cmd) {
109 u32 fpexc = fmrx(FPEXC); 121 case THREAD_NOTIFY_SWITCH:
122 fpexc = fmrx(FPEXC);
110 123
111#ifdef CONFIG_SMP 124#ifdef CONFIG_SMP
112 unsigned int cpu = thread->cpu; 125 cpu = thread->cpu;
113 126
114 /* 127 /*
115 * On SMP, if VFP is enabled, save the old state in 128 * On SMP, if VFP is enabled, save the old state in
@@ -134,13 +147,20 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
134 * old state. 147 * old state.
135 */ 148 */
136 fmxr(FPEXC, fpexc & ~FPEXC_EN); 149 fmxr(FPEXC, fpexc & ~FPEXC_EN);
137 return NOTIFY_DONE; 150 break;
138 }
139 151
140 if (cmd == THREAD_NOTIFY_FLUSH) 152 case THREAD_NOTIFY_FLUSH:
141 vfp_thread_flush(thread); 153 vfp_thread_flush(thread);
142 else 154 break;
155
156 case THREAD_NOTIFY_EXIT:
143 vfp_thread_exit(thread); 157 vfp_thread_exit(thread);
158 break;
159
160 case THREAD_NOTIFY_COPY:
161 vfp_thread_copy(thread);
162 break;
163 }
144 164
145 return NOTIFY_DONE; 165 return NOTIFY_DONE;
146} 166}
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 851b3bf6e962..eccdefe70d4e 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -6,7 +6,6 @@ config MICROBLAZE
6 select HAVE_FUNCTION_GRAPH_TRACER 6 select HAVE_FUNCTION_GRAPH_TRACER
7 select HAVE_DYNAMIC_FTRACE 7 select HAVE_DYNAMIC_FTRACE
8 select HAVE_FTRACE_MCOUNT_RECORD 8 select HAVE_FTRACE_MCOUNT_RECORD
9 select USB_ARCH_HAS_EHCI
10 select ARCH_WANT_OPTIONAL_GPIOLIB 9 select ARCH_WANT_OPTIONAL_GPIOLIB
11 select HAVE_OPROFILE 10 select HAVE_OPROFILE
12 select HAVE_ARCH_KGDB 11 select HAVE_ARCH_KGDB
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b6ff882f695b..8f4d50b0adfa 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -209,7 +209,7 @@ config ARCH_HIBERNATION_POSSIBLE
209config ARCH_SUSPEND_POSSIBLE 209config ARCH_SUSPEND_POSSIBLE
210 def_bool y 210 def_bool y
211 depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \ 211 depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
212 PPC_85xx || PPC_86xx || PPC_PSERIES || 44x || 40x 212 (PPC_85xx && !SMP) || PPC_86xx || PPC_PSERIES || 44x || 40x
213 213
214config PPC_DCR_NATIVE 214config PPC_DCR_NATIVE
215 bool 215 bool
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index be3cdf9134ce..1833d1a07e79 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -382,10 +382,12 @@ extern const char *powerpc_base_platform;
382#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ 382#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
383 CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \ 383 CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \
384 CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) 384 CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
385#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ 385#define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
386 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
387 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ 386 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
388 CPU_FTR_DBELL) 387 CPU_FTR_DBELL)
388#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
389 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
390 CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD)
389#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) 391#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
390 392
391/* 64-bit CPUs */ 393/* 64-bit CPUs */
@@ -435,11 +437,15 @@ extern const char *powerpc_base_platform;
435#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) 437#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
436 438
437#ifdef __powerpc64__ 439#ifdef __powerpc64__
440#ifdef CONFIG_PPC_BOOK3E
441#define CPU_FTRS_POSSIBLE (CPU_FTRS_E5500)
442#else
438#define CPU_FTRS_POSSIBLE \ 443#define CPU_FTRS_POSSIBLE \
439 (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \ 444 (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \
440 CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \ 445 CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \
441 CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \ 446 CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \
442 CPU_FTR_1T_SEGMENT | CPU_FTR_VSX) 447 CPU_FTR_1T_SEGMENT | CPU_FTR_VSX)
448#endif
443#else 449#else
444enum { 450enum {
445 CPU_FTRS_POSSIBLE = 451 CPU_FTRS_POSSIBLE =
@@ -473,16 +479,21 @@ enum {
473#endif 479#endif
474#ifdef CONFIG_E500 480#ifdef CONFIG_E500
475 CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC | 481 CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC |
482 CPU_FTRS_E5500 |
476#endif 483#endif
477 0, 484 0,
478}; 485};
479#endif /* __powerpc64__ */ 486#endif /* __powerpc64__ */
480 487
481#ifdef __powerpc64__ 488#ifdef __powerpc64__
489#ifdef CONFIG_PPC_BOOK3E
490#define CPU_FTRS_ALWAYS (CPU_FTRS_E5500)
491#else
482#define CPU_FTRS_ALWAYS \ 492#define CPU_FTRS_ALWAYS \
483 (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \ 493 (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \
484 CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \ 494 CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \
485 CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE) 495 CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE)
496#endif
486#else 497#else
487enum { 498enum {
488 CPU_FTRS_ALWAYS = 499 CPU_FTRS_ALWAYS =
@@ -513,6 +524,7 @@ enum {
513#endif 524#endif
514#ifdef CONFIG_E500 525#ifdef CONFIG_E500
515 CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC & 526 CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC &
527 CPU_FTRS_E5500 &
516#endif 528#endif
517 CPU_FTRS_POSSIBLE, 529 CPU_FTRS_POSSIBLE,
518}; 530};
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index 811f04ac3660..8d1569c29042 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -162,7 +162,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
162 * on platforms where such control is possible. 162 * on platforms where such control is possible.
163 */ 163 */
164#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ 164#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
165 defined(CONFIG_KPROBES) 165 defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
166#define PAGE_KERNEL_TEXT PAGE_KERNEL_X 166#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
167#else 167#else
168#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX 168#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
diff --git a/arch/powerpc/include/asm/uninorth.h b/arch/powerpc/include/asm/uninorth.h
index ae9c899c8a6d..d12b11d7641e 100644
--- a/arch/powerpc/include/asm/uninorth.h
+++ b/arch/powerpc/include/asm/uninorth.h
@@ -60,7 +60,7 @@
60 * 60 *
61 * Obviously, the GART is not cache coherent and so any change to it 61 * Obviously, the GART is not cache coherent and so any change to it
62 * must be flushed to memory (or maybe just make the GART space non 62 * must be flushed to memory (or maybe just make the GART space non
63 * cachable). AGP memory itself does't seem to be cache coherent neither. 63 * cachable). AGP memory itself doesn't seem to be cache coherent neither.
64 * 64 *
65 * In order to invalidate the GART (which is probably necessary to inval 65 * In order to invalidate the GART (which is probably necessary to inval
66 * the bridge internal TLBs), the following sequence has to be written, 66 * the bridge internal TLBs), the following sequence has to be written,
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index c9b68d07ac4f..b9602ee06deb 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1973,7 +1973,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1973 .pvr_mask = 0xffff0000, 1973 .pvr_mask = 0xffff0000,
1974 .pvr_value = 0x80240000, 1974 .pvr_value = 0x80240000,
1975 .cpu_name = "e5500", 1975 .cpu_name = "e5500",
1976 .cpu_features = CPU_FTRS_E500MC, 1976 .cpu_features = CPU_FTRS_E5500,
1977 .cpu_user_features = COMMON_USER_BOOKE, 1977 .cpu_user_features = COMMON_USER_BOOKE,
1978 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | 1978 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
1979 MMU_FTR_USE_TLBILX, 1979 MMU_FTR_USE_TLBILX,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 3d3d416339dd..5b5e1f002a8e 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -163,7 +163,7 @@ static void crash_kexec_prepare_cpus(int cpu)
163} 163}
164 164
165/* wait for all the CPUs to hit real mode but timeout if they don't come in */ 165/* wait for all the CPUs to hit real mode but timeout if they don't come in */
166#if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) 166#ifdef CONFIG_PPC_STD_MMU_64
167static void crash_kexec_wait_realmode(int cpu) 167static void crash_kexec_wait_realmode(int cpu)
168{ 168{
169 unsigned int msecs; 169 unsigned int msecs;
@@ -188,9 +188,7 @@ static void crash_kexec_wait_realmode(int cpu)
188 } 188 }
189 mb(); 189 mb();
190} 190}
191#else 191#endif /* CONFIG_PPC_STD_MMU_64 */
192static inline void crash_kexec_wait_realmode(int cpu) {}
193#endif
194 192
195/* 193/*
196 * This function will be called by secondary cpus or by kexec cpu 194 * This function will be called by secondary cpus or by kexec cpu
@@ -235,7 +233,9 @@ void crash_kexec_secondary(struct pt_regs *regs)
235 crash_ipi_callback(regs); 233 crash_ipi_callback(regs);
236} 234}
237 235
238#else 236#else /* ! CONFIG_SMP */
237static inline void crash_kexec_wait_realmode(int cpu) {}
238
239static void crash_kexec_prepare_cpus(int cpu) 239static void crash_kexec_prepare_cpus(int cpu)
240{ 240{
241 /* 241 /*
@@ -255,7 +255,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
255{ 255{
256 cpus_in_sr = CPU_MASK_NONE; 256 cpus_in_sr = CPU_MASK_NONE;
257} 257}
258#endif 258#endif /* CONFIG_SMP */
259 259
260/* 260/*
261 * Register a function to be called on shutdown. Only use this if you 261 * Register a function to be called on shutdown. Only use this if you
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index c834757bebc0..2b97b80d6d7d 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -330,9 +330,11 @@ void __init find_legacy_serial_ports(void)
330 if (!parent) 330 if (!parent)
331 continue; 331 continue;
332 if (of_match_node(legacy_serial_parents, parent) != NULL) { 332 if (of_match_node(legacy_serial_parents, parent) != NULL) {
333 index = add_legacy_soc_port(np, np); 333 if (of_device_is_available(np)) {
334 if (index >= 0 && np == stdout) 334 index = add_legacy_soc_port(np, np);
335 legacy_serial_console = index; 335 if (index >= 0 && np == stdout)
336 legacy_serial_console = index;
337 }
336 } 338 }
337 of_node_put(parent); 339 of_node_put(parent);
338 } 340 }
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index c4063b7f49a0..822f63008ae1 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -398,6 +398,25 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
398 return 0; 398 return 0;
399} 399}
400 400
401static u64 check_and_compute_delta(u64 prev, u64 val)
402{
403 u64 delta = (val - prev) & 0xfffffffful;
404
405 /*
406 * POWER7 can roll back counter values, if the new value is smaller
407 * than the previous value it will cause the delta and the counter to
408 * have bogus values unless we rolled a counter over. If a coutner is
409 * rolled back, it will be smaller, but within 256, which is the maximum
410 * number of events to rollback at once. If we dectect a rollback
411 * return 0. This can lead to a small lack of precision in the
412 * counters.
413 */
414 if (prev > val && (prev - val) < 256)
415 delta = 0;
416
417 return delta;
418}
419
401static void power_pmu_read(struct perf_event *event) 420static void power_pmu_read(struct perf_event *event)
402{ 421{
403 s64 val, delta, prev; 422 s64 val, delta, prev;
@@ -416,10 +435,11 @@ static void power_pmu_read(struct perf_event *event)
416 prev = local64_read(&event->hw.prev_count); 435 prev = local64_read(&event->hw.prev_count);
417 barrier(); 436 barrier();
418 val = read_pmc(event->hw.idx); 437 val = read_pmc(event->hw.idx);
438 delta = check_and_compute_delta(prev, val);
439 if (!delta)
440 return;
419 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); 441 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
420 442
421 /* The counters are only 32 bits wide */
422 delta = (val - prev) & 0xfffffffful;
423 local64_add(delta, &event->count); 443 local64_add(delta, &event->count);
424 local64_sub(delta, &event->hw.period_left); 444 local64_sub(delta, &event->hw.period_left);
425} 445}
@@ -449,8 +469,9 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
449 val = (event->hw.idx == 5) ? pmc5 : pmc6; 469 val = (event->hw.idx == 5) ? pmc5 : pmc6;
450 prev = local64_read(&event->hw.prev_count); 470 prev = local64_read(&event->hw.prev_count);
451 event->hw.idx = 0; 471 event->hw.idx = 0;
452 delta = (val - prev) & 0xfffffffful; 472 delta = check_and_compute_delta(prev, val);
453 local64_add(delta, &event->count); 473 if (delta)
474 local64_add(delta, &event->count);
454 } 475 }
455} 476}
456 477
@@ -458,14 +479,16 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
458 unsigned long pmc5, unsigned long pmc6) 479 unsigned long pmc5, unsigned long pmc6)
459{ 480{
460 struct perf_event *event; 481 struct perf_event *event;
461 u64 val; 482 u64 val, prev;
462 int i; 483 int i;
463 484
464 for (i = 0; i < cpuhw->n_limited; ++i) { 485 for (i = 0; i < cpuhw->n_limited; ++i) {
465 event = cpuhw->limited_counter[i]; 486 event = cpuhw->limited_counter[i];
466 event->hw.idx = cpuhw->limited_hwidx[i]; 487 event->hw.idx = cpuhw->limited_hwidx[i];
467 val = (event->hw.idx == 5) ? pmc5 : pmc6; 488 val = (event->hw.idx == 5) ? pmc5 : pmc6;
468 local64_set(&event->hw.prev_count, val); 489 prev = local64_read(&event->hw.prev_count);
490 if (check_and_compute_delta(prev, val))
491 local64_set(&event->hw.prev_count, val);
469 perf_event_update_userpage(event); 492 perf_event_update_userpage(event);
470 } 493 }
471} 494}
@@ -1197,7 +1220,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
1197 1220
1198 /* we don't have to worry about interrupts here */ 1221 /* we don't have to worry about interrupts here */
1199 prev = local64_read(&event->hw.prev_count); 1222 prev = local64_read(&event->hw.prev_count);
1200 delta = (val - prev) & 0xfffffffful; 1223 delta = check_and_compute_delta(prev, val);
1201 local64_add(delta, &event->count); 1224 local64_add(delta, &event->count);
1202 1225
1203 /* 1226 /*
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 375480c56eb9..f33acfd872ad 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -229,6 +229,9 @@ static u64 scan_dispatch_log(u64 stop_tb)
229 u64 stolen = 0; 229 u64 stolen = 0;
230 u64 dtb; 230 u64 dtb;
231 231
232 if (!dtl)
233 return 0;
234
232 if (i == vpa->dtl_idx) 235 if (i == vpa->dtl_idx)
233 return 0; 236 return 0;
234 while (i < vpa->dtl_idx) { 237 while (i < vpa->dtl_idx) {
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index a830c5e80657..bc5f0dc6ae1e 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -842,6 +842,7 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr)
842 mpic_setup_this_cpu(); 842 mpic_setup_this_cpu();
843} 843}
844 844
845#ifdef CONFIG_PPC64
845#ifdef CONFIG_HOTPLUG_CPU 846#ifdef CONFIG_HOTPLUG_CPU
846static int smp_core99_cpu_notify(struct notifier_block *self, 847static int smp_core99_cpu_notify(struct notifier_block *self,
847 unsigned long action, void *hcpu) 848 unsigned long action, void *hcpu)
@@ -879,7 +880,6 @@ static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
879 880
880static void __init smp_core99_bringup_done(void) 881static void __init smp_core99_bringup_done(void)
881{ 882{
882#ifdef CONFIG_PPC64
883 extern void g5_phy_disable_cpu1(void); 883 extern void g5_phy_disable_cpu1(void);
884 884
885 /* Close i2c bus if it was used for tb sync */ 885 /* Close i2c bus if it was used for tb sync */
@@ -894,14 +894,14 @@ static void __init smp_core99_bringup_done(void)
894 set_cpu_present(1, false); 894 set_cpu_present(1, false);
895 g5_phy_disable_cpu1(); 895 g5_phy_disable_cpu1();
896 } 896 }
897#endif /* CONFIG_PPC64 */
898
899#ifdef CONFIG_HOTPLUG_CPU 897#ifdef CONFIG_HOTPLUG_CPU
900 register_cpu_notifier(&smp_core99_cpu_nb); 898 register_cpu_notifier(&smp_core99_cpu_nb);
901#endif 899#endif
900
902 if (ppc_md.progress) 901 if (ppc_md.progress)
903 ppc_md.progress("smp_core99_bringup_done", 0x349); 902 ppc_md.progress("smp_core99_bringup_done", 0x349);
904} 903}
904#endif /* CONFIG_PPC64 */
905 905
906#ifdef CONFIG_HOTPLUG_CPU 906#ifdef CONFIG_HOTPLUG_CPU
907 907
@@ -975,7 +975,9 @@ static void pmac_cpu_die(void)
975struct smp_ops_t core99_smp_ops = { 975struct smp_ops_t core99_smp_ops = {
976 .message_pass = smp_mpic_message_pass, 976 .message_pass = smp_mpic_message_pass,
977 .probe = smp_core99_probe, 977 .probe = smp_core99_probe,
978#ifdef CONFIG_PPC64
978 .bringup_done = smp_core99_bringup_done, 979 .bringup_done = smp_core99_bringup_done,
980#endif
979 .kick_cpu = smp_core99_kick_cpu, 981 .kick_cpu = smp_core99_kick_cpu,
980 .setup_cpu = smp_core99_setup_cpu, 982 .setup_cpu = smp_core99_setup_cpu,
981 .give_timebase = smp_core99_give_timebase, 983 .give_timebase = smp_core99_give_timebase,
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 000724149089..6c42cfde8415 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -287,14 +287,22 @@ static int alloc_dispatch_logs(void)
287 int cpu, ret; 287 int cpu, ret;
288 struct paca_struct *pp; 288 struct paca_struct *pp;
289 struct dtl_entry *dtl; 289 struct dtl_entry *dtl;
290 struct kmem_cache *dtl_cache;
290 291
291 if (!firmware_has_feature(FW_FEATURE_SPLPAR)) 292 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
292 return 0; 293 return 0;
293 294
295 dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
296 DISPATCH_LOG_BYTES, 0, NULL);
297 if (!dtl_cache) {
298 pr_warn("Failed to create dispatch trace log buffer cache\n");
299 pr_warn("Stolen time statistics will be unreliable\n");
300 return 0;
301 }
302
294 for_each_possible_cpu(cpu) { 303 for_each_possible_cpu(cpu) {
295 pp = &paca[cpu]; 304 pp = &paca[cpu];
296 dtl = kmalloc_node(DISPATCH_LOG_BYTES, GFP_KERNEL, 305 dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
297 cpu_to_node(cpu));
298 if (!dtl) { 306 if (!dtl) {
299 pr_warn("Failed to allocate dispatch trace log for cpu %d\n", 307 pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
300 cpu); 308 cpu);
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index f8f7f28c6343..68ca9290df94 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -324,6 +324,11 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
324 struct resource rsrc; 324 struct resource rsrc;
325 const int *bus_range; 325 const int *bus_range;
326 326
327 if (!of_device_is_available(dev)) {
328 pr_warning("%s: disabled\n", dev->full_name);
329 return -ENODEV;
330 }
331
327 pr_debug("Adding PCI host bridge %s\n", dev->full_name); 332 pr_debug("Adding PCI host bridge %s\n", dev->full_name);
328 333
329 /* Fetch host bridge registers address */ 334 /* Fetch host bridge registers address */
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 975e3ab13cb5..44bca3f994b0 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -76,7 +76,7 @@ static void prng_seed(int nbytes)
76 76
77 /* Add the entropy */ 77 /* Add the entropy */
78 while (nbytes >= 8) { 78 while (nbytes >= 8) {
79 *((__u64 *)parm_block) ^= *((__u64 *)buf+i*8); 79 *((__u64 *)parm_block) ^= *((__u64 *)buf+i);
80 prng_add_entropy(); 80 prng_add_entropy();
81 i += 8; 81 i += 8;
82 nbytes -= 8; 82 nbytes -= 8;
diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S
index 7e9d30d567b0..ab0e041ac54c 100644
--- a/arch/s390/kvm/sie64a.S
+++ b/arch/s390/kvm/sie64a.S
@@ -48,10 +48,10 @@ sie_irq_handler:
48 tm __TI_flags+7(%r2),_TIF_EXIT_SIE 48 tm __TI_flags+7(%r2),_TIF_EXIT_SIE
49 jz 0f 49 jz 0f
50 larl %r2,sie_exit # work pending, leave sie 50 larl %r2,sie_exit # work pending, leave sie
51 stg %r2,__LC_RETURN_PSW+8 51 stg %r2,SPI_PSW+8(0,%r15)
52 br %r14 52 br %r14
530: larl %r2,sie_reenter # re-enter with guest id 530: larl %r2,sie_reenter # re-enter with guest id
54 stg %r2,__LC_RETURN_PSW+8 54 stg %r2,SPI_PSW+8(0,%r15)
551: br %r14 551: br %r14
56 56
57/* 57/*
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 9217e332b118..4cf85fef407c 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -558,9 +558,9 @@ static void pfault_interrupt(unsigned int ext_int_code,
558 * Get the token (= address of the task structure of the affected task). 558 * Get the token (= address of the task structure of the affected task).
559 */ 559 */
560#ifdef CONFIG_64BIT 560#ifdef CONFIG_64BIT
561 tsk = *(struct task_struct **) param64; 561 tsk = (struct task_struct *) param64;
562#else 562#else
563 tsk = *(struct task_struct **) param32; 563 tsk = (struct task_struct *) param32;
564#endif 564#endif
565 565
566 if (subcode & 0x0080) { 566 if (subcode & 0x0080) {
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 122ffbd08ce0..0607e4b14b27 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -24,12 +24,13 @@ static void change_page_attr(unsigned long addr, int numpages,
24 WARN_ON_ONCE(1); 24 WARN_ON_ONCE(1);
25 continue; 25 continue;
26 } 26 }
27 ptep = pte_offset_kernel(pmdp, addr + i * PAGE_SIZE); 27 ptep = pte_offset_kernel(pmdp, addr);
28 28
29 pte = *ptep; 29 pte = *ptep;
30 pte = set(pte); 30 pte = set(pte);
31 ptep_invalidate(&init_mm, addr + i * PAGE_SIZE, ptep); 31 ptep_invalidate(&init_mm, addr, ptep);
32 *ptep = pte; 32 *ptep = pte;
33 addr += PAGE_SIZE;
33 } 34 }
34} 35}
35 36
diff --git a/arch/x86/include/asm/gart.h b/arch/x86/include/asm/gart.h
index 43085bfc99c3..156cd5d18d2a 100644
--- a/arch/x86/include/asm/gart.h
+++ b/arch/x86/include/asm/gart.h
@@ -66,7 +66,7 @@ static inline void gart_set_size_and_enable(struct pci_dev *dev, u32 order)
66 * Don't enable translation but enable GART IO and CPU accesses. 66 * Don't enable translation but enable GART IO and CPU accesses.
67 * Also, set DISTLBWALKPRB since GART tables memory is UC. 67 * Also, set DISTLBWALKPRB since GART tables memory is UC.
68 */ 68 */
69 ctl = DISTLBWALKPRB | order << 1; 69 ctl = order << 1;
70 70
71 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); 71 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
72} 72}
@@ -75,17 +75,17 @@ static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
75{ 75{
76 u32 tmp, ctl; 76 u32 tmp, ctl;
77 77
78 /* address of the mappings table */ 78 /* address of the mappings table */
79 addr >>= 12; 79 addr >>= 12;
80 tmp = (u32) addr<<4; 80 tmp = (u32) addr<<4;
81 tmp &= ~0xf; 81 tmp &= ~0xf;
82 pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp); 82 pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp);
83 83
84 /* Enable GART translation for this hammer. */ 84 /* Enable GART translation for this hammer. */
85 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl); 85 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
86 ctl |= GARTEN; 86 ctl |= GARTEN | DISTLBWALKPRB;
87 ctl &= ~(DISGARTCPU | DISGARTIO); 87 ctl &= ~(DISGARTCPU | DISGARTIO);
88 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl); 88 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
89} 89}
90 90
91static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size) 91static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index fd5a1f365c95..3cce71413d0b 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -96,11 +96,15 @@
96#define MSR_IA32_MC0_ADDR 0x00000402 96#define MSR_IA32_MC0_ADDR 0x00000402
97#define MSR_IA32_MC0_MISC 0x00000403 97#define MSR_IA32_MC0_MISC 0x00000403
98 98
99#define MSR_AMD64_MC0_MASK 0xc0010044
100
99#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) 101#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x))
100#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x)) 102#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x))
101#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x)) 103#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x))
102#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x)) 104#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x))
103 105
106#define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x))
107
104/* These are consecutive and not in the normal 4er MCE bank block */ 108/* These are consecutive and not in the normal 4er MCE bank block */
105#define MSR_IA32_MC0_CTL2 0x00000280 109#define MSR_IA32_MC0_CTL2 0x00000280
106#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x)) 110#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x))
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
index 3d4dab43c994..a50fc9f493b3 100644
--- a/arch/x86/include/asm/numa.h
+++ b/arch/x86/include/asm/numa.h
@@ -51,7 +51,7 @@ static inline void numa_remove_cpu(int cpu) { }
51#endif /* CONFIG_NUMA */ 51#endif /* CONFIG_NUMA */
52 52
53#ifdef CONFIG_DEBUG_PER_CPU_MAPS 53#ifdef CONFIG_DEBUG_PER_CPU_MAPS
54struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable); 54void debug_cpumask_set_cpu(int cpu, int node, bool enable);
55#endif 55#endif
56 56
57#endif /* _ASM_X86_NUMA_H */ 57#endif /* _ASM_X86_NUMA_H */
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 86d1ad4962a7..73fb469908c6 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -499,7 +499,7 @@ out:
499 * Don't enable translation yet but enable GART IO and CPU 499 * Don't enable translation yet but enable GART IO and CPU
500 * accesses and set DISTLBWALKPRB since GART table memory is UC. 500 * accesses and set DISTLBWALKPRB since GART table memory is UC.
501 */ 501 */
502 u32 ctl = DISTLBWALKPRB | aper_order << 1; 502 u32 ctl = aper_order << 1;
503 503
504 bus = amd_nb_bus_dev_ranges[i].bus; 504 bus = amd_nb_bus_dev_ranges[i].bus;
505 dev_base = amd_nb_bus_dev_ranges[i].dev_base; 505 dev_base = amd_nb_bus_dev_ranges[i].dev_base;
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 0b4be431c620..adee12e0da1f 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -228,6 +228,7 @@
228#include <linux/kthread.h> 228#include <linux/kthread.h>
229#include <linux/jiffies.h> 229#include <linux/jiffies.h>
230#include <linux/acpi.h> 230#include <linux/acpi.h>
231#include <linux/syscore_ops.h>
231 232
232#include <asm/system.h> 233#include <asm/system.h>
233#include <asm/uaccess.h> 234#include <asm/uaccess.h>
@@ -1238,6 +1239,7 @@ static int suspend(int vetoable)
1238 1239
1239 local_irq_disable(); 1240 local_irq_disable();
1240 sysdev_suspend(PMSG_SUSPEND); 1241 sysdev_suspend(PMSG_SUSPEND);
1242 syscore_suspend();
1241 1243
1242 local_irq_enable(); 1244 local_irq_enable();
1243 1245
@@ -1255,6 +1257,7 @@ static int suspend(int vetoable)
1255 apm_error("suspend", err); 1257 apm_error("suspend", err);
1256 err = (err == APM_SUCCESS) ? 0 : -EIO; 1258 err = (err == APM_SUCCESS) ? 0 : -EIO;
1257 1259
1260 syscore_resume();
1258 sysdev_resume(); 1261 sysdev_resume();
1259 local_irq_enable(); 1262 local_irq_enable();
1260 1263
@@ -1280,6 +1283,7 @@ static void standby(void)
1280 1283
1281 local_irq_disable(); 1284 local_irq_disable();
1282 sysdev_suspend(PMSG_SUSPEND); 1285 sysdev_suspend(PMSG_SUSPEND);
1286 syscore_suspend();
1283 local_irq_enable(); 1287 local_irq_enable();
1284 1288
1285 err = set_system_power_state(APM_STATE_STANDBY); 1289 err = set_system_power_state(APM_STATE_STANDBY);
@@ -1287,6 +1291,7 @@ static void standby(void)
1287 apm_error("standby", err); 1291 apm_error("standby", err);
1288 1292
1289 local_irq_disable(); 1293 local_irq_disable();
1294 syscore_resume();
1290 sysdev_resume(); 1295 sysdev_resume();
1291 local_irq_enable(); 1296 local_irq_enable();
1292 1297
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 3ecece0217ef..3532d3bf8105 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -615,6 +615,25 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
615 /* As a rule processors have APIC timer running in deep C states */ 615 /* As a rule processors have APIC timer running in deep C states */
616 if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) 616 if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400))
617 set_cpu_cap(c, X86_FEATURE_ARAT); 617 set_cpu_cap(c, X86_FEATURE_ARAT);
618
619 /*
620 * Disable GART TLB Walk Errors on Fam10h. We do this here
621 * because this is always needed when GART is enabled, even in a
622 * kernel which has no MCE support built in.
623 */
624 if (c->x86 == 0x10) {
625 /*
626 * BIOS should disable GartTlbWlk Errors themself. If
627 * it doesn't do it here as suggested by the BKDG.
628 *
629 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
630 */
631 u64 mask;
632
633 rdmsrl(MSR_AMD64_MCx_MASK(4), mask);
634 mask |= (1 << 10);
635 wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
636 }
618} 637}
619 638
620#ifdef CONFIG_X86_32 639#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index eed3673a8656..632e5dc9c9c0 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -586,8 +586,12 @@ static int x86_setup_perfctr(struct perf_event *event)
586 return -EOPNOTSUPP; 586 return -EOPNOTSUPP;
587 } 587 }
588 588
589 /*
590 * Do not allow config1 (extended registers) to propagate,
591 * there's no sane user-space generalization yet:
592 */
589 if (attr->type == PERF_TYPE_RAW) 593 if (attr->type == PERF_TYPE_RAW)
590 return x86_pmu_extra_regs(event->attr.config, event); 594 return 0;
591 595
592 if (attr->type == PERF_TYPE_HW_CACHE) 596 if (attr->type == PERF_TYPE_HW_CACHE)
593 return set_ext_hw_attr(hwc, event); 597 return set_ext_hw_attr(hwc, event);
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 461f62bbd774..cf4e369cea67 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -8,7 +8,7 @@ static __initconst const u64 amd_hw_cache_event_ids
8 [ C(L1D) ] = { 8 [ C(L1D) ] = {
9 [ C(OP_READ) ] = { 9 [ C(OP_READ) ] = {
10 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ 10 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
11 [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */ 11 [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */
12 }, 12 },
13 [ C(OP_WRITE) ] = { 13 [ C(OP_WRITE) ] = {
14 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */ 14 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
@@ -427,7 +427,9 @@ static __initconst const struct x86_pmu amd_pmu = {
427 * 427 *
428 * Exceptions: 428 * Exceptions:
429 * 429 *
430 * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
430 * 0x003 FP PERF_CTL[3] 431 * 0x003 FP PERF_CTL[3]
432 * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
431 * 0x00B FP PERF_CTL[3] 433 * 0x00B FP PERF_CTL[3]
432 * 0x00D FP PERF_CTL[3] 434 * 0x00D FP PERF_CTL[3]
433 * 0x023 DE PERF_CTL[2:0] 435 * 0x023 DE PERF_CTL[2:0]
@@ -448,6 +450,8 @@ static __initconst const struct x86_pmu amd_pmu = {
448 * 0x0DF LS PERF_CTL[5:0] 450 * 0x0DF LS PERF_CTL[5:0]
449 * 0x1D6 EX PERF_CTL[5:0] 451 * 0x1D6 EX PERF_CTL[5:0]
450 * 0x1D8 EX PERF_CTL[5:0] 452 * 0x1D8 EX PERF_CTL[5:0]
453 *
454 * (*) depending on the umask all FPU counters may be used
451 */ 455 */
452 456
453static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0); 457static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
@@ -460,18 +464,28 @@ static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
460static struct event_constraint * 464static struct event_constraint *
461amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) 465amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
462{ 466{
463 unsigned int event_code = amd_get_event_code(&event->hw); 467 struct hw_perf_event *hwc = &event->hw;
468 unsigned int event_code = amd_get_event_code(hwc);
464 469
465 switch (event_code & AMD_EVENT_TYPE_MASK) { 470 switch (event_code & AMD_EVENT_TYPE_MASK) {
466 case AMD_EVENT_FP: 471 case AMD_EVENT_FP:
467 switch (event_code) { 472 switch (event_code) {
473 case 0x000:
474 if (!(hwc->config & 0x0000F000ULL))
475 break;
476 if (!(hwc->config & 0x00000F00ULL))
477 break;
478 return &amd_f15_PMC3;
479 case 0x004:
480 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
481 break;
482 return &amd_f15_PMC3;
468 case 0x003: 483 case 0x003:
469 case 0x00B: 484 case 0x00B:
470 case 0x00D: 485 case 0x00D:
471 return &amd_f15_PMC3; 486 return &amd_f15_PMC3;
472 default:
473 return &amd_f15_PMC53;
474 } 487 }
488 return &amd_f15_PMC53;
475 case AMD_EVENT_LS: 489 case AMD_EVENT_LS:
476 case AMD_EVENT_DC: 490 case AMD_EVENT_DC:
477 case AMD_EVENT_EX_LS: 491 case AMD_EVENT_EX_LS:
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 8fc2b2cee1da..43fa20b13817 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -391,12 +391,12 @@ static __initconst const u64 nehalem_hw_cache_event_ids
391{ 391{
392 [ C(L1D) ] = { 392 [ C(L1D) ] = {
393 [ C(OP_READ) ] = { 393 [ C(OP_READ) ] = {
394 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ 394 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
395 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ 395 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
396 }, 396 },
397 [ C(OP_WRITE) ] = { 397 [ C(OP_WRITE) ] = {
398 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ 398 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
399 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ 399 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
400 }, 400 },
401 [ C(OP_PREFETCH) ] = { 401 [ C(OP_PREFETCH) ] = {
402 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ 402 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
@@ -1425,6 +1425,7 @@ static __init int intel_pmu_init(void)
1425 1425
1426 case 37: /* 32 nm nehalem, "Clarkdale" */ 1426 case 37: /* 32 nm nehalem, "Clarkdale" */
1427 case 44: /* 32 nm nehalem, "Gulftown" */ 1427 case 44: /* 32 nm nehalem, "Gulftown" */
1428 case 47: /* 32 nm Xeon E7 */
1428 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids, 1429 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
1429 sizeof(hw_cache_event_ids)); 1430 sizeof(hw_cache_event_ids));
1430 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs, 1431 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index c2520e178d32..d1f77e2934a1 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -947,7 +947,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
947 if (!x86_perf_event_set_period(event)) 947 if (!x86_perf_event_set_period(event))
948 continue; 948 continue;
949 if (perf_event_overflow(event, 1, &data, regs)) 949 if (perf_event_overflow(event, 1, &data, regs))
950 p4_pmu_disable_event(event); 950 x86_pmu_stop(event, 0);
951 } 951 }
952 952
953 if (handled) { 953 if (handled) {
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 82ada01625b9..b117efd24f71 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -81,6 +81,9 @@ static u32 gart_unmapped_entry;
81#define AGPEXTERN 81#define AGPEXTERN
82#endif 82#endif
83 83
84/* GART can only remap to physical addresses < 1TB */
85#define GART_MAX_PHYS_ADDR (1ULL << 40)
86
84/* backdoor interface to AGP driver */ 87/* backdoor interface to AGP driver */
85AGPEXTERN int agp_memory_reserved; 88AGPEXTERN int agp_memory_reserved;
86AGPEXTERN __u32 *agp_gatt_table; 89AGPEXTERN __u32 *agp_gatt_table;
@@ -212,9 +215,13 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
212 size_t size, int dir, unsigned long align_mask) 215 size_t size, int dir, unsigned long align_mask)
213{ 216{
214 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); 217 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
215 unsigned long iommu_page = alloc_iommu(dev, npages, align_mask); 218 unsigned long iommu_page;
216 int i; 219 int i;
217 220
221 if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
222 return bad_dma_addr;
223
224 iommu_page = alloc_iommu(dev, npages, align_mask);
218 if (iommu_page == -1) { 225 if (iommu_page == -1) {
219 if (!nonforced_iommu(dev, phys_mem, size)) 226 if (!nonforced_iommu(dev, phys_mem, size))
220 return phys_mem; 227 return phys_mem;
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 9559d360fde7..745258dfc4dc 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -213,53 +213,48 @@ int early_cpu_to_node(int cpu)
213 return per_cpu(x86_cpu_to_node_map, cpu); 213 return per_cpu(x86_cpu_to_node_map, cpu);
214} 214}
215 215
216struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable) 216void debug_cpumask_set_cpu(int cpu, int node, bool enable)
217{ 217{
218 int node = early_cpu_to_node(cpu);
219 struct cpumask *mask; 218 struct cpumask *mask;
220 char buf[64]; 219 char buf[64];
221 220
222 if (node == NUMA_NO_NODE) { 221 if (node == NUMA_NO_NODE) {
223 /* early_cpu_to_node() already emits a warning and trace */ 222 /* early_cpu_to_node() already emits a warning and trace */
224 return NULL; 223 return;
225 } 224 }
226 mask = node_to_cpumask_map[node]; 225 mask = node_to_cpumask_map[node];
227 if (!mask) { 226 if (!mask) {
228 pr_err("node_to_cpumask_map[%i] NULL\n", node); 227 pr_err("node_to_cpumask_map[%i] NULL\n", node);
229 dump_stack(); 228 dump_stack();
230 return NULL; 229 return;
231 } 230 }
232 231
232 if (enable)
233 cpumask_set_cpu(cpu, mask);
234 else
235 cpumask_clear_cpu(cpu, mask);
236
233 cpulist_scnprintf(buf, sizeof(buf), mask); 237 cpulist_scnprintf(buf, sizeof(buf), mask);
234 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", 238 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
235 enable ? "numa_add_cpu" : "numa_remove_cpu", 239 enable ? "numa_add_cpu" : "numa_remove_cpu",
236 cpu, node, buf); 240 cpu, node, buf);
237 return mask; 241 return;
238} 242}
239 243
240# ifndef CONFIG_NUMA_EMU 244# ifndef CONFIG_NUMA_EMU
241static void __cpuinit numa_set_cpumask(int cpu, int enable) 245static void __cpuinit numa_set_cpumask(int cpu, bool enable)
242{ 246{
243 struct cpumask *mask; 247 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
244
245 mask = debug_cpumask_set_cpu(cpu, enable);
246 if (!mask)
247 return;
248
249 if (enable)
250 cpumask_set_cpu(cpu, mask);
251 else
252 cpumask_clear_cpu(cpu, mask);
253} 248}
254 249
255void __cpuinit numa_add_cpu(int cpu) 250void __cpuinit numa_add_cpu(int cpu)
256{ 251{
257 numa_set_cpumask(cpu, 1); 252 numa_set_cpumask(cpu, true);
258} 253}
259 254
260void __cpuinit numa_remove_cpu(int cpu) 255void __cpuinit numa_remove_cpu(int cpu)
261{ 256{
262 numa_set_cpumask(cpu, 0); 257 numa_set_cpumask(cpu, false);
263} 258}
264# endif /* !CONFIG_NUMA_EMU */ 259# endif /* !CONFIG_NUMA_EMU */
265 260
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index ad091e4cff17..de84cc140379 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -454,10 +454,9 @@ void __cpuinit numa_remove_cpu(int cpu)
454 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); 454 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
455} 455}
456#else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 456#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
457static void __cpuinit numa_set_cpumask(int cpu, int enable) 457static void __cpuinit numa_set_cpumask(int cpu, bool enable)
458{ 458{
459 struct cpumask *mask; 459 int nid, physnid;
460 int nid, physnid, i;
461 460
462 nid = early_cpu_to_node(cpu); 461 nid = early_cpu_to_node(cpu);
463 if (nid == NUMA_NO_NODE) { 462 if (nid == NUMA_NO_NODE) {
@@ -467,28 +466,21 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable)
467 466
468 physnid = emu_nid_to_phys[nid]; 467 physnid = emu_nid_to_phys[nid];
469 468
470 for_each_online_node(i) { 469 for_each_online_node(nid) {
471 if (emu_nid_to_phys[nid] != physnid) 470 if (emu_nid_to_phys[nid] != physnid)
472 continue; 471 continue;
473 472
474 mask = debug_cpumask_set_cpu(cpu, enable); 473 debug_cpumask_set_cpu(cpu, nid, enable);
475 if (!mask)
476 return;
477
478 if (enable)
479 cpumask_set_cpu(cpu, mask);
480 else
481 cpumask_clear_cpu(cpu, mask);
482 } 474 }
483} 475}
484 476
485void __cpuinit numa_add_cpu(int cpu) 477void __cpuinit numa_add_cpu(int cpu)
486{ 478{
487 numa_set_cpumask(cpu, 1); 479 numa_set_cpumask(cpu, true);
488} 480}
489 481
490void __cpuinit numa_remove_cpu(int cpu) 482void __cpuinit numa_remove_cpu(int cpu)
491{ 483{
492 numa_set_cpumask(cpu, 0); 484 numa_set_cpumask(cpu, false);
493} 485}
494#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 486#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
diff --git a/arch/x86/platform/ce4100/falconfalls.dts b/arch/x86/platform/ce4100/falconfalls.dts
index dc701ea58546..2d6d226f2b10 100644
--- a/arch/x86/platform/ce4100/falconfalls.dts
+++ b/arch/x86/platform/ce4100/falconfalls.dts
@@ -74,6 +74,7 @@
74 compatible = "intel,ce4100-pci", "pci"; 74 compatible = "intel,ce4100-pci", "pci";
75 device_type = "pci"; 75 device_type = "pci";
76 bus-range = <1 1>; 76 bus-range = <1 1>;
77 reg = <0x0800 0x0 0x0 0x0 0x0>;
77 ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>; 78 ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>;
78 79
79 interrupt-parent = <&ioapic2>; 80 interrupt-parent = <&ioapic2>;
@@ -412,6 +413,7 @@
412 #address-cells = <2>; 413 #address-cells = <2>;
413 #size-cells = <1>; 414 #size-cells = <1>;
414 compatible = "isa"; 415 compatible = "isa";
416 reg = <0xf800 0x0 0x0 0x0 0x0>;
415 ranges = <1 0 0 0 0 0x100>; 417 ranges = <1 0 0 0 0 0x100>;
416 418
417 rtc@70 { 419 rtc@70 {
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index 5c0207bf959b..275dbc19e2cf 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -97,11 +97,11 @@ static int __init sfi_parse_mtmr(struct sfi_table_header *table)
97 pentry->freq_hz, pentry->irq); 97 pentry->freq_hz, pentry->irq);
98 if (!pentry->irq) 98 if (!pentry->irq)
99 continue; 99 continue;
100 mp_irq.type = MP_IOAPIC; 100 mp_irq.type = MP_INTSRC;
101 mp_irq.irqtype = mp_INT; 101 mp_irq.irqtype = mp_INT;
102/* triggering mode edge bit 2-3, active high polarity bit 0-1 */ 102/* triggering mode edge bit 2-3, active high polarity bit 0-1 */
103 mp_irq.irqflag = 5; 103 mp_irq.irqflag = 5;
104 mp_irq.srcbus = 0; 104 mp_irq.srcbus = MP_BUS_ISA;
105 mp_irq.srcbusirq = pentry->irq; /* IRQ */ 105 mp_irq.srcbusirq = pentry->irq; /* IRQ */
106 mp_irq.dstapic = MP_APIC_ALL; 106 mp_irq.dstapic = MP_APIC_ALL;
107 mp_irq.dstirq = pentry->irq; 107 mp_irq.dstirq = pentry->irq;
@@ -168,10 +168,10 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table)
168 for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) { 168 for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) {
169 pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n", 169 pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n",
170 totallen, (u32)pentry->phys_addr, pentry->irq); 170 totallen, (u32)pentry->phys_addr, pentry->irq);
171 mp_irq.type = MP_IOAPIC; 171 mp_irq.type = MP_INTSRC;
172 mp_irq.irqtype = mp_INT; 172 mp_irq.irqtype = mp_INT;
173 mp_irq.irqflag = 0xf; /* level trigger and active low */ 173 mp_irq.irqflag = 0xf; /* level trigger and active low */
174 mp_irq.srcbus = 0; 174 mp_irq.srcbus = MP_BUS_ISA;
175 mp_irq.srcbusirq = pentry->irq; /* IRQ */ 175 mp_irq.srcbusirq = pentry->irq; /* IRQ */
176 mp_irq.dstapic = MP_APIC_ALL; 176 mp_irq.dstapic = MP_APIC_ALL;
177 mp_irq.dstirq = pentry->irq; 177 mp_irq.dstirq = pentry->irq;
@@ -282,7 +282,7 @@ void __init x86_mrst_early_setup(void)
282 /* Avoid searching for BIOS MP tables */ 282 /* Avoid searching for BIOS MP tables */
283 x86_init.mpparse.find_smp_config = x86_init_noop; 283 x86_init.mpparse.find_smp_config = x86_init_noop;
284 x86_init.mpparse.get_smp_config = x86_init_uint_noop; 284 x86_init.mpparse.get_smp_config = x86_init_uint_noop;
285 285 set_bit(MP_BUS_ISA, mp_bus_not_pci);
286} 286}
287 287
288/* 288/*
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index a991b57f91fe..aef7af92b28b 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1473,16 +1473,20 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1473#endif 1473#endif
1474} 1474}
1475 1475
1476#ifdef CONFIG_X86_32
1476static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) 1477static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1477{ 1478{
1478 unsigned long pfn = pte_pfn(pte);
1479
1480#ifdef CONFIG_X86_32
1481 /* If there's an existing pte, then don't allow _PAGE_RW to be set */ 1479 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1482 if (pte_val_ma(*ptep) & _PAGE_PRESENT) 1480 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1483 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & 1481 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1484 pte_val_ma(pte)); 1482 pte_val_ma(pte));
1485#endif 1483
1484 return pte;
1485}
1486#else /* CONFIG_X86_64 */
1487static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1488{
1489 unsigned long pfn = pte_pfn(pte);
1486 1490
1487 /* 1491 /*
1488 * If the new pfn is within the range of the newly allocated 1492 * If the new pfn is within the range of the newly allocated
@@ -1497,6 +1501,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1497 1501
1498 return pte; 1502 return pte;
1499} 1503}
1504#endif /* CONFIG_X86_64 */
1500 1505
1501/* Init-time set_pte while constructing initial pagetables, which 1506/* Init-time set_pte while constructing initial pagetables, which
1502 doesn't allow RO pagetable pages to be remapped RW */ 1507 doesn't allow RO pagetable pages to be remapped RW */
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index fa0269a99377..90bac0aac3a5 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -227,7 +227,7 @@ char * __init xen_memory_setup(void)
227 227
228 memcpy(map_raw, map, sizeof(map)); 228 memcpy(map_raw, map, sizeof(map));
229 e820.nr_map = 0; 229 e820.nr_map = 0;
230 xen_extra_mem_start = mem_end; 230 xen_extra_mem_start = max((1ULL << 32), mem_end);
231 for (i = 0; i < memmap.nr_entries; i++) { 231 for (i = 0; i < memmap.nr_entries; i++) {
232 unsigned long long end; 232 unsigned long long end;
233 233
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index d77089df412e..4340ee076bd5 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -64,47 +64,41 @@ asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
64 64
65int arch_show_interrupts(struct seq_file *p, int prec) 65int arch_show_interrupts(struct seq_file *p, int prec)
66{ 66{
67 int j;
68
69 seq_printf(p, "%*s: ", prec, "NMI");
70 for_each_online_cpu(j)
71 seq_printf(p, "%10u ", nmi_count(j));
72 seq_putc(p, '\n');
73 seq_printf(p, "%*s: ", prec, "ERR"); 67 seq_printf(p, "%*s: ", prec, "ERR");
74 seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); 68 seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
75 return 0; 69 return 0;
76} 70}
77 71
78static void xtensa_irq_mask(struct irq_chip *d) 72static void xtensa_irq_mask(struct irq_data *d)
79{ 73{
80 cached_irq_mask &= ~(1 << d->irq); 74 cached_irq_mask &= ~(1 << d->irq);
81 set_sr (cached_irq_mask, INTENABLE); 75 set_sr (cached_irq_mask, INTENABLE);
82} 76}
83 77
84static void xtensa_irq_unmask(struct irq_chip *d) 78static void xtensa_irq_unmask(struct irq_data *d)
85{ 79{
86 cached_irq_mask |= 1 << d->irq; 80 cached_irq_mask |= 1 << d->irq;
87 set_sr (cached_irq_mask, INTENABLE); 81 set_sr (cached_irq_mask, INTENABLE);
88} 82}
89 83
90static void xtensa_irq_enable(struct irq_chip *d) 84static void xtensa_irq_enable(struct irq_data *d)
91{ 85{
92 variant_irq_enable(d->irq); 86 variant_irq_enable(d->irq);
93 xtensa_irq_unmask(d->irq); 87 xtensa_irq_unmask(d->irq);
94} 88}
95 89
96static void xtensa_irq_disable(struct irq_chip *d) 90static void xtensa_irq_disable(struct irq_data *d)
97{ 91{
98 xtensa_irq_mask(d->irq); 92 xtensa_irq_mask(d->irq);
99 variant_irq_disable(d->irq); 93 variant_irq_disable(d->irq);
100} 94}
101 95
102static void xtensa_irq_ack(struct irq_chip *d) 96static void xtensa_irq_ack(struct irq_data *d)
103{ 97{
104 set_sr(1 << d->irq, INTCLEAR); 98 set_sr(1 << d->irq, INTCLEAR);
105} 99}
106 100
107static int xtensa_irq_retrigger(struct irq_chip *d) 101static int xtensa_irq_retrigger(struct irq_data *d)
108{ 102{
109 set_sr (1 << d->irq, INTSET); 103 set_sr (1 << d->irq, INTSET);
110 return 1; 104 return 1;
diff --git a/block/blk-core.c b/block/blk-core.c
index 90f22cc30799..a2e58eeb3549 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -198,26 +198,13 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
198} 198}
199EXPORT_SYMBOL(blk_dump_rq_flags); 199EXPORT_SYMBOL(blk_dump_rq_flags);
200 200
201/*
202 * Make sure that plugs that were pending when this function was entered,
203 * are now complete and requests pushed to the queue.
204*/
205static inline void queue_sync_plugs(struct request_queue *q)
206{
207 /*
208 * If the current process is plugged and has barriers submitted,
209 * we will livelock if we don't unplug first.
210 */
211 blk_flush_plug(current);
212}
213
214static void blk_delay_work(struct work_struct *work) 201static void blk_delay_work(struct work_struct *work)
215{ 202{
216 struct request_queue *q; 203 struct request_queue *q;
217 204
218 q = container_of(work, struct request_queue, delay_work.work); 205 q = container_of(work, struct request_queue, delay_work.work);
219 spin_lock_irq(q->queue_lock); 206 spin_lock_irq(q->queue_lock);
220 __blk_run_queue(q, false); 207 __blk_run_queue(q);
221 spin_unlock_irq(q->queue_lock); 208 spin_unlock_irq(q->queue_lock);
222} 209}
223 210
@@ -233,7 +220,8 @@ static void blk_delay_work(struct work_struct *work)
233 */ 220 */
234void blk_delay_queue(struct request_queue *q, unsigned long msecs) 221void blk_delay_queue(struct request_queue *q, unsigned long msecs)
235{ 222{
236 schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs)); 223 queue_delayed_work(kblockd_workqueue, &q->delay_work,
224 msecs_to_jiffies(msecs));
237} 225}
238EXPORT_SYMBOL(blk_delay_queue); 226EXPORT_SYMBOL(blk_delay_queue);
239 227
@@ -251,7 +239,7 @@ void blk_start_queue(struct request_queue *q)
251 WARN_ON(!irqs_disabled()); 239 WARN_ON(!irqs_disabled());
252 240
253 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 241 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
254 __blk_run_queue(q, false); 242 __blk_run_queue(q);
255} 243}
256EXPORT_SYMBOL(blk_start_queue); 244EXPORT_SYMBOL(blk_start_queue);
257 245
@@ -298,38 +286,42 @@ void blk_sync_queue(struct request_queue *q)
298{ 286{
299 del_timer_sync(&q->timeout); 287 del_timer_sync(&q->timeout);
300 cancel_delayed_work_sync(&q->delay_work); 288 cancel_delayed_work_sync(&q->delay_work);
301 queue_sync_plugs(q);
302} 289}
303EXPORT_SYMBOL(blk_sync_queue); 290EXPORT_SYMBOL(blk_sync_queue);
304 291
305/** 292/**
306 * __blk_run_queue - run a single device queue 293 * __blk_run_queue - run a single device queue
307 * @q: The queue to run 294 * @q: The queue to run
308 * @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
309 * 295 *
310 * Description: 296 * Description:
311 * See @blk_run_queue. This variant must be called with the queue lock 297 * See @blk_run_queue. This variant must be called with the queue lock
312 * held and interrupts disabled. 298 * held and interrupts disabled.
313 *
314 */ 299 */
315void __blk_run_queue(struct request_queue *q, bool force_kblockd) 300void __blk_run_queue(struct request_queue *q)
316{ 301{
317 if (unlikely(blk_queue_stopped(q))) 302 if (unlikely(blk_queue_stopped(q)))
318 return; 303 return;
319 304
320 /* 305 q->request_fn(q);
321 * Only recurse once to avoid overrunning the stack, let the unplug
322 * handling reinvoke the handler shortly if we already got there.
323 */
324 if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
325 q->request_fn(q);
326 queue_flag_clear(QUEUE_FLAG_REENTER, q);
327 } else
328 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
329} 306}
330EXPORT_SYMBOL(__blk_run_queue); 307EXPORT_SYMBOL(__blk_run_queue);
331 308
332/** 309/**
310 * blk_run_queue_async - run a single device queue in workqueue context
311 * @q: The queue to run
312 *
313 * Description:
314 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
315 * of us.
316 */
317void blk_run_queue_async(struct request_queue *q)
318{
319 if (likely(!blk_queue_stopped(q)))
320 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
321}
322EXPORT_SYMBOL(blk_run_queue_async);
323
324/**
333 * blk_run_queue - run a single device queue 325 * blk_run_queue - run a single device queue
334 * @q: The queue to run 326 * @q: The queue to run
335 * 327 *
@@ -342,7 +334,7 @@ void blk_run_queue(struct request_queue *q)
342 unsigned long flags; 334 unsigned long flags;
343 335
344 spin_lock_irqsave(q->queue_lock, flags); 336 spin_lock_irqsave(q->queue_lock, flags);
345 __blk_run_queue(q, false); 337 __blk_run_queue(q);
346 spin_unlock_irqrestore(q->queue_lock, flags); 338 spin_unlock_irqrestore(q->queue_lock, flags);
347} 339}
348EXPORT_SYMBOL(blk_run_queue); 340EXPORT_SYMBOL(blk_run_queue);
@@ -991,7 +983,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
991 blk_queue_end_tag(q, rq); 983 blk_queue_end_tag(q, rq);
992 984
993 add_acct_request(q, rq, where); 985 add_acct_request(q, rq, where);
994 __blk_run_queue(q, false); 986 __blk_run_queue(q);
995 spin_unlock_irqrestore(q->queue_lock, flags); 987 spin_unlock_irqrestore(q->queue_lock, flags);
996} 988}
997EXPORT_SYMBOL(blk_insert_request); 989EXPORT_SYMBOL(blk_insert_request);
@@ -1311,7 +1303,15 @@ get_rq:
1311 1303
1312 plug = current->plug; 1304 plug = current->plug;
1313 if (plug) { 1305 if (plug) {
1314 if (!plug->should_sort && !list_empty(&plug->list)) { 1306 /*
1307 * If this is the first request added after a plug, fire
1308 * of a plug trace. If others have been added before, check
1309 * if we have multiple devices in this plug. If so, make a
1310 * note to sort the list before dispatch.
1311 */
1312 if (list_empty(&plug->list))
1313 trace_block_plug(q);
1314 else if (!plug->should_sort) {
1315 struct request *__rq; 1315 struct request *__rq;
1316 1316
1317 __rq = list_entry_rq(plug->list.prev); 1317 __rq = list_entry_rq(plug->list.prev);
@@ -1327,7 +1327,7 @@ get_rq:
1327 } else { 1327 } else {
1328 spin_lock_irq(q->queue_lock); 1328 spin_lock_irq(q->queue_lock);
1329 add_acct_request(q, req, where); 1329 add_acct_request(q, req, where);
1330 __blk_run_queue(q, false); 1330 __blk_run_queue(q);
1331out_unlock: 1331out_unlock:
1332 spin_unlock_irq(q->queue_lock); 1332 spin_unlock_irq(q->queue_lock);
1333 } 1333 }
@@ -2644,6 +2644,7 @@ void blk_start_plug(struct blk_plug *plug)
2644 2644
2645 plug->magic = PLUG_MAGIC; 2645 plug->magic = PLUG_MAGIC;
2646 INIT_LIST_HEAD(&plug->list); 2646 INIT_LIST_HEAD(&plug->list);
2647 INIT_LIST_HEAD(&plug->cb_list);
2647 plug->should_sort = 0; 2648 plug->should_sort = 0;
2648 2649
2649 /* 2650 /*
@@ -2668,33 +2669,93 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
2668 return !(rqa->q <= rqb->q); 2669 return !(rqa->q <= rqb->q);
2669} 2670}
2670 2671
2671static void flush_plug_list(struct blk_plug *plug) 2672/*
2673 * If 'from_schedule' is true, then postpone the dispatch of requests
2674 * until a safe kblockd context. We due this to avoid accidental big
2675 * additional stack usage in driver dispatch, in places where the originally
2676 * plugger did not intend it.
2677 */
2678static void queue_unplugged(struct request_queue *q, unsigned int depth,
2679 bool from_schedule)
2680 __releases(q->queue_lock)
2681{
2682 trace_block_unplug(q, depth, !from_schedule);
2683
2684 /*
2685 * If we are punting this to kblockd, then we can safely drop
2686 * the queue_lock before waking kblockd (which needs to take
2687 * this lock).
2688 */
2689 if (from_schedule) {
2690 spin_unlock(q->queue_lock);
2691 blk_run_queue_async(q);
2692 } else {
2693 __blk_run_queue(q);
2694 spin_unlock(q->queue_lock);
2695 }
2696
2697}
2698
2699static void flush_plug_callbacks(struct blk_plug *plug)
2700{
2701 LIST_HEAD(callbacks);
2702
2703 if (list_empty(&plug->cb_list))
2704 return;
2705
2706 list_splice_init(&plug->cb_list, &callbacks);
2707
2708 while (!list_empty(&callbacks)) {
2709 struct blk_plug_cb *cb = list_first_entry(&callbacks,
2710 struct blk_plug_cb,
2711 list);
2712 list_del(&cb->list);
2713 cb->callback(cb);
2714 }
2715}
2716
2717void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2672{ 2718{
2673 struct request_queue *q; 2719 struct request_queue *q;
2674 unsigned long flags; 2720 unsigned long flags;
2675 struct request *rq; 2721 struct request *rq;
2722 LIST_HEAD(list);
2723 unsigned int depth;
2676 2724
2677 BUG_ON(plug->magic != PLUG_MAGIC); 2725 BUG_ON(plug->magic != PLUG_MAGIC);
2678 2726
2727 flush_plug_callbacks(plug);
2679 if (list_empty(&plug->list)) 2728 if (list_empty(&plug->list))
2680 return; 2729 return;
2681 2730
2682 if (plug->should_sort) 2731 list_splice_init(&plug->list, &list);
2683 list_sort(NULL, &plug->list, plug_rq_cmp); 2732
2733 if (plug->should_sort) {
2734 list_sort(NULL, &list, plug_rq_cmp);
2735 plug->should_sort = 0;
2736 }
2684 2737
2685 q = NULL; 2738 q = NULL;
2739 depth = 0;
2740
2741 /*
2742 * Save and disable interrupts here, to avoid doing it for every
2743 * queue lock we have to take.
2744 */
2686 local_irq_save(flags); 2745 local_irq_save(flags);
2687 while (!list_empty(&plug->list)) { 2746 while (!list_empty(&list)) {
2688 rq = list_entry_rq(plug->list.next); 2747 rq = list_entry_rq(list.next);
2689 list_del_init(&rq->queuelist); 2748 list_del_init(&rq->queuelist);
2690 BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG)); 2749 BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
2691 BUG_ON(!rq->q); 2750 BUG_ON(!rq->q);
2692 if (rq->q != q) { 2751 if (rq->q != q) {
2693 if (q) { 2752 /*
2694 __blk_run_queue(q, false); 2753 * This drops the queue lock
2695 spin_unlock(q->queue_lock); 2754 */
2696 } 2755 if (q)
2756 queue_unplugged(q, depth, from_schedule);
2697 q = rq->q; 2757 q = rq->q;
2758 depth = 0;
2698 spin_lock(q->queue_lock); 2759 spin_lock(q->queue_lock);
2699 } 2760 }
2700 rq->cmd_flags &= ~REQ_ON_PLUG; 2761 rq->cmd_flags &= ~REQ_ON_PLUG;
@@ -2706,38 +2767,27 @@ static void flush_plug_list(struct blk_plug *plug)
2706 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); 2767 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
2707 else 2768 else
2708 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); 2769 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
2709 }
2710 2770
2711 if (q) { 2771 depth++;
2712 __blk_run_queue(q, false);
2713 spin_unlock(q->queue_lock);
2714 } 2772 }
2715 2773
2716 BUG_ON(!list_empty(&plug->list)); 2774 /*
2717 local_irq_restore(flags); 2775 * This drops the queue lock
2718} 2776 */
2719 2777 if (q)
2720static void __blk_finish_plug(struct task_struct *tsk, struct blk_plug *plug) 2778 queue_unplugged(q, depth, from_schedule);
2721{
2722 flush_plug_list(plug);
2723 2779
2724 if (plug == tsk->plug) 2780 local_irq_restore(flags);
2725 tsk->plug = NULL;
2726} 2781}
2727 2782
2728void blk_finish_plug(struct blk_plug *plug) 2783void blk_finish_plug(struct blk_plug *plug)
2729{ 2784{
2730 if (plug) 2785 blk_flush_plug_list(plug, false);
2731 __blk_finish_plug(current, plug);
2732}
2733EXPORT_SYMBOL(blk_finish_plug);
2734 2786
2735void __blk_flush_plug(struct task_struct *tsk, struct blk_plug *plug) 2787 if (plug == current->plug)
2736{ 2788 current->plug = NULL;
2737 __blk_finish_plug(tsk, plug);
2738 tsk->plug = plug;
2739} 2789}
2740EXPORT_SYMBOL(__blk_flush_plug); 2790EXPORT_SYMBOL(blk_finish_plug);
2741 2791
2742int __init blk_dev_init(void) 2792int __init blk_dev_init(void)
2743{ 2793{
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 7482b7fa863b..81e31819a597 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -55,7 +55,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
55 WARN_ON(irqs_disabled()); 55 WARN_ON(irqs_disabled());
56 spin_lock_irq(q->queue_lock); 56 spin_lock_irq(q->queue_lock);
57 __elv_add_request(q, rq, where); 57 __elv_add_request(q, rq, where);
58 __blk_run_queue(q, false); 58 __blk_run_queue(q);
59 /* the queue is stopped so it won't be plugged+unplugged */ 59 /* the queue is stopped so it won't be plugged+unplugged */
60 if (rq->cmd_type == REQ_TYPE_PM_RESUME) 60 if (rq->cmd_type == REQ_TYPE_PM_RESUME)
61 q->request_fn(q); 61 q->request_fn(q);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index eba4a2790c6c..6c9b5e189e62 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -218,7 +218,7 @@ static void flush_end_io(struct request *flush_rq, int error)
218 * request_fn may confuse the driver. Always use kblockd. 218 * request_fn may confuse the driver. Always use kblockd.
219 */ 219 */
220 if (queued) 220 if (queued)
221 __blk_run_queue(q, true); 221 blk_run_queue_async(q);
222} 222}
223 223
224/** 224/**
@@ -274,7 +274,7 @@ static void flush_data_end_io(struct request *rq, int error)
274 * the comment in flush_end_io(). 274 * the comment in flush_end_io().
275 */ 275 */
276 if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) 276 if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
277 __blk_run_queue(q, true); 277 blk_run_queue_async(q);
278} 278}
279 279
280/** 280/**
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 261c75c665ae..bd236313f35d 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -66,14 +66,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
66 66
67 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { 67 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
68 blk_set_queue_full(q, BLK_RW_SYNC); 68 blk_set_queue_full(q, BLK_RW_SYNC);
69 } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) { 69 } else {
70 blk_clear_queue_full(q, BLK_RW_SYNC); 70 blk_clear_queue_full(q, BLK_RW_SYNC);
71 wake_up(&rl->wait[BLK_RW_SYNC]); 71 wake_up(&rl->wait[BLK_RW_SYNC]);
72 } 72 }
73 73
74 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { 74 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
75 blk_set_queue_full(q, BLK_RW_ASYNC); 75 blk_set_queue_full(q, BLK_RW_ASYNC);
76 } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) { 76 } else {
77 blk_clear_queue_full(q, BLK_RW_ASYNC); 77 blk_clear_queue_full(q, BLK_RW_ASYNC);
78 wake_up(&rl->wait[BLK_RW_ASYNC]); 78 wake_up(&rl->wait[BLK_RW_ASYNC]);
79 } 79 }
@@ -498,7 +498,6 @@ int blk_register_queue(struct gendisk *disk)
498{ 498{
499 int ret; 499 int ret;
500 struct device *dev = disk_to_dev(disk); 500 struct device *dev = disk_to_dev(disk);
501
502 struct request_queue *q = disk->queue; 501 struct request_queue *q = disk->queue;
503 502
504 if (WARN_ON(!q)) 503 if (WARN_ON(!q))
@@ -509,8 +508,10 @@ int blk_register_queue(struct gendisk *disk)
509 return ret; 508 return ret;
510 509
511 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); 510 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
512 if (ret < 0) 511 if (ret < 0) {
512 blk_trace_remove_sysfs(dev);
513 return ret; 513 return ret;
514 }
514 515
515 kobject_uevent(&q->kobj, KOBJ_ADD); 516 kobject_uevent(&q->kobj, KOBJ_ADD);
516 517
@@ -521,7 +522,7 @@ int blk_register_queue(struct gendisk *disk)
521 if (ret) { 522 if (ret) {
522 kobject_uevent(&q->kobj, KOBJ_REMOVE); 523 kobject_uevent(&q->kobj, KOBJ_REMOVE);
523 kobject_del(&q->kobj); 524 kobject_del(&q->kobj);
524 blk_trace_remove_sysfs(disk_to_dev(disk)); 525 blk_trace_remove_sysfs(dev);
525 kobject_put(&dev->kobj); 526 kobject_put(&dev->kobj);
526 return ret; 527 return ret;
527 } 528 }
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 3be881ec95ad..5b52011e3a40 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2582,28 +2582,20 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
2582} 2582}
2583 2583
2584/* 2584/*
2585 * Must always be called with the rcu_read_lock() held 2585 * Call func for each cic attached to this ioc.
2586 */ 2586 */
2587static void 2587static void
2588__call_for_each_cic(struct io_context *ioc, 2588call_for_each_cic(struct io_context *ioc,
2589 void (*func)(struct io_context *, struct cfq_io_context *)) 2589 void (*func)(struct io_context *, struct cfq_io_context *))
2590{ 2590{
2591 struct cfq_io_context *cic; 2591 struct cfq_io_context *cic;
2592 struct hlist_node *n; 2592 struct hlist_node *n;
2593 2593
2594 rcu_read_lock();
2595
2594 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) 2596 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
2595 func(ioc, cic); 2597 func(ioc, cic);
2596}
2597 2598
2598/*
2599 * Call func for each cic attached to this ioc.
2600 */
2601static void
2602call_for_each_cic(struct io_context *ioc,
2603 void (*func)(struct io_context *, struct cfq_io_context *))
2604{
2605 rcu_read_lock();
2606 __call_for_each_cic(ioc, func);
2607 rcu_read_unlock(); 2599 rcu_read_unlock();
2608} 2600}
2609 2601
@@ -2664,7 +2656,7 @@ static void cfq_free_io_context(struct io_context *ioc)
2664 * should be ok to iterate over the known list, we will see all cic's 2656 * should be ok to iterate over the known list, we will see all cic's
2665 * since no new ones are added. 2657 * since no new ones are added.
2666 */ 2658 */
2667 __call_for_each_cic(ioc, cic_free_func); 2659 call_for_each_cic(ioc, cic_free_func);
2668} 2660}
2669 2661
2670static void cfq_put_cooperator(struct cfq_queue *cfqq) 2662static void cfq_put_cooperator(struct cfq_queue *cfqq)
@@ -3368,7 +3360,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3368 cfqd->busy_queues > 1) { 3360 cfqd->busy_queues > 1) {
3369 cfq_del_timer(cfqd, cfqq); 3361 cfq_del_timer(cfqd, cfqq);
3370 cfq_clear_cfqq_wait_request(cfqq); 3362 cfq_clear_cfqq_wait_request(cfqq);
3371 __blk_run_queue(cfqd->queue, false); 3363 __blk_run_queue(cfqd->queue);
3372 } else { 3364 } else {
3373 cfq_blkiocg_update_idle_time_stats( 3365 cfq_blkiocg_update_idle_time_stats(
3374 &cfqq->cfqg->blkg); 3366 &cfqq->cfqg->blkg);
@@ -3383,7 +3375,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3383 * this new queue is RT and the current one is BE 3375 * this new queue is RT and the current one is BE
3384 */ 3376 */
3385 cfq_preempt_queue(cfqd, cfqq); 3377 cfq_preempt_queue(cfqd, cfqq);
3386 __blk_run_queue(cfqd->queue, false); 3378 __blk_run_queue(cfqd->queue);
3387 } 3379 }
3388} 3380}
3389 3381
@@ -3743,7 +3735,7 @@ static void cfq_kick_queue(struct work_struct *work)
3743 struct request_queue *q = cfqd->queue; 3735 struct request_queue *q = cfqd->queue;
3744 3736
3745 spin_lock_irq(q->queue_lock); 3737 spin_lock_irq(q->queue_lock);
3746 __blk_run_queue(cfqd->queue, false); 3738 __blk_run_queue(cfqd->queue);
3747 spin_unlock_irq(q->queue_lock); 3739 spin_unlock_irq(q->queue_lock);
3748} 3740}
3749 3741
diff --git a/block/elevator.c b/block/elevator.c
index 0cdb4e7ebab4..45ca1e34f582 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -642,7 +642,7 @@ void elv_quiesce_start(struct request_queue *q)
642 */ 642 */
643 elv_drain_elevator(q); 643 elv_drain_elevator(q);
644 while (q->rq.elvpriv) { 644 while (q->rq.elvpriv) {
645 __blk_run_queue(q, false); 645 __blk_run_queue(q);
646 spin_unlock_irq(q->queue_lock); 646 spin_unlock_irq(q->queue_lock);
647 msleep(10); 647 msleep(10);
648 spin_lock_irq(q->queue_lock); 648 spin_lock_irq(q->queue_lock);
@@ -671,7 +671,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
671 q->boundary_rq = rq; 671 q->boundary_rq = rq;
672 } 672 }
673 } else if (!(rq->cmd_flags & REQ_ELVPRIV) && 673 } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
674 where == ELEVATOR_INSERT_SORT) 674 (where == ELEVATOR_INSERT_SORT ||
675 where == ELEVATOR_INSERT_SORT_MERGE))
675 where = ELEVATOR_INSERT_BACK; 676 where = ELEVATOR_INSERT_BACK;
676 677
677 switch (where) { 678 switch (where) {
@@ -695,7 +696,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
695 * with anything. There's no point in delaying queue 696 * with anything. There's no point in delaying queue
696 * processing. 697 * processing.
697 */ 698 */
698 __blk_run_queue(q, false); 699 __blk_run_queue(q);
699 break; 700 break;
700 701
701 case ELEVATOR_INSERT_SORT_MERGE: 702 case ELEVATOR_INSERT_SORT_MERGE:
diff --git a/block/genhd.c b/block/genhd.c
index b364bd038a18..2dd988723d73 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1588,9 +1588,13 @@ static void disk_events_workfn(struct work_struct *work)
1588 1588
1589 spin_unlock_irq(&ev->lock); 1589 spin_unlock_irq(&ev->lock);
1590 1590
1591 /* tell userland about new events */ 1591 /*
1592 * Tell userland about new events. Only the events listed in
1593 * @disk->events are reported. Unlisted events are processed the
1594 * same internally but never get reported to userland.
1595 */
1592 for (i = 0; i < ARRAY_SIZE(disk_uevents); i++) 1596 for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
1593 if (events & (1 << i)) 1597 if (events & disk->events & (1 << i))
1594 envp[nr_events++] = disk_uevents[i]; 1598 envp[nr_events++] = disk_uevents[i];
1595 1599
1596 if (nr_events) 1600 if (nr_events)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index b136c9c1e531..449c556274c0 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -943,6 +943,10 @@ static int acpi_bus_get_flags(struct acpi_device *device)
943 if (ACPI_SUCCESS(status)) 943 if (ACPI_SUCCESS(status))
944 device->flags.lockable = 1; 944 device->flags.lockable = 1;
945 945
946 /* Power resources cannot be power manageable. */
947 if (device->device_type == ACPI_BUS_TYPE_POWER)
948 return 0;
949
946 /* Presence of _PS0|_PR0 indicates 'power manageable' */ 950 /* Presence of _PS0|_PR0 indicates 'power manageable' */
947 status = acpi_get_handle(device->handle, "_PS0", &temp); 951 status = acpi_get_handle(device->handle, "_PS0", &temp);
948 if (ACPI_FAILURE(status)) 952 if (ACPI_FAILURE(status))
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 39d829cd82dd..71afe0371311 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -150,7 +150,7 @@ static const struct ata_port_info ahci_port_info[] = {
150 { 150 {
151 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP | 151 AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
152 AHCI_HFLAG_YES_NCQ), 152 AHCI_HFLAG_YES_NCQ),
153 .flags = AHCI_FLAG_COMMON, 153 .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
154 .pio_mask = ATA_PIO4, 154 .pio_mask = ATA_PIO4,
155 .udma_mask = ATA_UDMA6, 155 .udma_mask = ATA_UDMA6,
156 .port_ops = &ahci_ops, 156 .port_ops = &ahci_ops,
@@ -261,6 +261,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
261 { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */ 261 { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
262 { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */ 262 { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */
263 { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */ 263 { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
264 { PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
265 { PCI_VDEVICE(INTEL, 0x1e03), board_ahci }, /* Panther Point AHCI */
266 { PCI_VDEVICE(INTEL, 0x1e04), board_ahci }, /* Panther Point RAID */
267 { PCI_VDEVICE(INTEL, 0x1e05), board_ahci }, /* Panther Point RAID */
268 { PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
269 { PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */
264 270
265 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 271 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
266 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 272 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 39865009c251..12c5282e7fca 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -229,6 +229,10 @@ enum {
229 EM_CTL_ALHD = (1 << 26), /* Activity LED */ 229 EM_CTL_ALHD = (1 << 26), /* Activity LED */
230 EM_CTL_XMT = (1 << 25), /* Transmit Only */ 230 EM_CTL_XMT = (1 << 25), /* Transmit Only */
231 EM_CTL_SMB = (1 << 24), /* Single Message Buffer */ 231 EM_CTL_SMB = (1 << 24), /* Single Message Buffer */
232 EM_CTL_SGPIO = (1 << 19), /* SGPIO messages supported */
233 EM_CTL_SES = (1 << 18), /* SES-2 messages supported */
234 EM_CTL_SAFTE = (1 << 17), /* SAF-TE messages supported */
235 EM_CTL_LED = (1 << 16), /* LED messages supported */
232 236
233 /* em message type */ 237 /* em message type */
234 EM_MSG_TYPE_LED = (1 << 0), /* LED */ 238 EM_MSG_TYPE_LED = (1 << 0), /* LED */
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 0bc3fd6c3fdb..6f6e7718b05c 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -309,6 +309,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
309 { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, 309 { 0x8086, 0x1d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
310 /* SATA Controller IDE (PBG) */ 310 /* SATA Controller IDE (PBG) */
311 { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, 311 { 0x8086, 0x1d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
312 /* SATA Controller IDE (Panther Point) */
313 { 0x8086, 0x1e00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
314 /* SATA Controller IDE (Panther Point) */
315 { 0x8086, 0x1e01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata },
316 /* SATA Controller IDE (Panther Point) */
317 { 0x8086, 0x1e08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
318 /* SATA Controller IDE (Panther Point) */
319 { 0x8086, 0x1e09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
312 { } /* terminate list */ 320 { } /* terminate list */
313}; 321};
314 322
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 26d452339e98..ff9d832a163d 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -109,6 +109,8 @@ static ssize_t ahci_read_em_buffer(struct device *dev,
109static ssize_t ahci_store_em_buffer(struct device *dev, 109static ssize_t ahci_store_em_buffer(struct device *dev,
110 struct device_attribute *attr, 110 struct device_attribute *attr,
111 const char *buf, size_t size); 111 const char *buf, size_t size);
112static ssize_t ahci_show_em_supported(struct device *dev,
113 struct device_attribute *attr, char *buf);
112 114
113static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL); 115static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
114static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL); 116static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
@@ -116,6 +118,7 @@ static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
116static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL); 118static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
117static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO, 119static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
118 ahci_read_em_buffer, ahci_store_em_buffer); 120 ahci_read_em_buffer, ahci_store_em_buffer);
121static DEVICE_ATTR(em_message_supported, S_IRUGO, ahci_show_em_supported, NULL);
119 122
120struct device_attribute *ahci_shost_attrs[] = { 123struct device_attribute *ahci_shost_attrs[] = {
121 &dev_attr_link_power_management_policy, 124 &dev_attr_link_power_management_policy,
@@ -126,6 +129,7 @@ struct device_attribute *ahci_shost_attrs[] = {
126 &dev_attr_ahci_host_version, 129 &dev_attr_ahci_host_version,
127 &dev_attr_ahci_port_cmd, 130 &dev_attr_ahci_port_cmd,
128 &dev_attr_em_buffer, 131 &dev_attr_em_buffer,
132 &dev_attr_em_message_supported,
129 NULL 133 NULL
130}; 134};
131EXPORT_SYMBOL_GPL(ahci_shost_attrs); 135EXPORT_SYMBOL_GPL(ahci_shost_attrs);
@@ -343,6 +347,24 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
343 return size; 347 return size;
344} 348}
345 349
350static ssize_t ahci_show_em_supported(struct device *dev,
351 struct device_attribute *attr, char *buf)
352{
353 struct Scsi_Host *shost = class_to_shost(dev);
354 struct ata_port *ap = ata_shost_to_port(shost);
355 struct ahci_host_priv *hpriv = ap->host->private_data;
356 void __iomem *mmio = hpriv->mmio;
357 u32 em_ctl;
358
359 em_ctl = readl(mmio + HOST_EM_CTL);
360
361 return sprintf(buf, "%s%s%s%s\n",
362 em_ctl & EM_CTL_LED ? "led " : "",
363 em_ctl & EM_CTL_SAFTE ? "saf-te " : "",
364 em_ctl & EM_CTL_SES ? "ses-2 " : "",
365 em_ctl & EM_CTL_SGPIO ? "sgpio " : "");
366}
367
346/** 368/**
347 * ahci_save_initial_config - Save and fixup initial config values 369 * ahci_save_initial_config - Save and fixup initial config values
348 * @dev: target AHCI device 370 * @dev: target AHCI device
@@ -539,6 +561,27 @@ void ahci_start_engine(struct ata_port *ap)
539{ 561{
540 void __iomem *port_mmio = ahci_port_base(ap); 562 void __iomem *port_mmio = ahci_port_base(ap);
541 u32 tmp; 563 u32 tmp;
564 u8 status;
565
566 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
567
568 /*
569 * At end of section 10.1 of AHCI spec (rev 1.3), it states
570 * Software shall not set PxCMD.ST to 1 until it is determined
571 * that a functoinal device is present on the port as determined by
572 * PxTFD.STS.BSY=0, PxTFD.STS.DRQ=0 and PxSSTS.DET=3h
573 *
574 * Even though most AHCI host controllers work without this check,
575 * specific controller will fail under this condition
576 */
577 if (status & (ATA_BUSY | ATA_DRQ))
578 return;
579 else {
580 ahci_scr_read(&ap->link, SCR_STATUS, &tmp);
581
582 if ((tmp & 0xf) != 0x3)
583 return;
584 }
542 585
543 /* start DMA */ 586 /* start DMA */
544 tmp = readl(port_mmio + PORT_CMD); 587 tmp = readl(port_mmio + PORT_CMD);
@@ -1897,7 +1940,17 @@ static void ahci_pmp_attach(struct ata_port *ap)
1897 ahci_enable_fbs(ap); 1940 ahci_enable_fbs(ap);
1898 1941
1899 pp->intr_mask |= PORT_IRQ_BAD_PMP; 1942 pp->intr_mask |= PORT_IRQ_BAD_PMP;
1900 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1943
1944 /*
1945 * We must not change the port interrupt mask register if the
1946 * port is marked frozen, the value in pp->intr_mask will be
1947 * restored later when the port is thawed.
1948 *
1949 * Note that during initialization, the port is marked as
1950 * frozen since the irq handler is not yet registered.
1951 */
1952 if (!(ap->pflags & ATA_PFLAG_FROZEN))
1953 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1901} 1954}
1902 1955
1903static void ahci_pmp_detach(struct ata_port *ap) 1956static void ahci_pmp_detach(struct ata_port *ap)
@@ -1913,7 +1966,10 @@ static void ahci_pmp_detach(struct ata_port *ap)
1913 writel(cmd, port_mmio + PORT_CMD); 1966 writel(cmd, port_mmio + PORT_CMD);
1914 1967
1915 pp->intr_mask &= ~PORT_IRQ_BAD_PMP; 1968 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
1916 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); 1969
1970 /* see comment above in ahci_pmp_attach() */
1971 if (!(ap->pflags & ATA_PFLAG_FROZEN))
1972 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1917} 1973}
1918 1974
1919int ahci_port_resume(struct ata_port *ap) 1975int ahci_port_resume(struct ata_port *ap)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 423c0a6952b2..76c3c15cb1e6 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4139,6 +4139,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4139 */ 4139 */
4140 { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER }, 4140 { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER },
4141 { "PIONEER DVD-RW DVR-212D", "1.28", ATA_HORKAGE_NOSETXFER }, 4141 { "PIONEER DVD-RW DVR-212D", "1.28", ATA_HORKAGE_NOSETXFER },
4142 { "PIONEER DVD-RW DVR-216D", "1.08", ATA_HORKAGE_NOSETXFER },
4142 4143
4143 /* End Marker */ 4144 /* End Marker */
4144 { } 4145 { }
@@ -5480,7 +5481,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
5480 if (!ap) 5481 if (!ap)
5481 return NULL; 5482 return NULL;
5482 5483
5483 ap->pflags |= ATA_PFLAG_INITIALIZING; 5484 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5484 ap->lock = &host->lock; 5485 ap->lock = &host->lock;
5485 ap->print_id = -1; 5486 ap->print_id = -1;
5486 ap->host = host; 5487 ap->host = host;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 88cd22fa65cd..f26f2fe3480a 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -3316,6 +3316,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3316 struct ata_eh_context *ehc = &link->eh_context; 3316 struct ata_eh_context *ehc = &link->eh_context;
3317 struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; 3317 struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
3318 enum ata_lpm_policy old_policy = link->lpm_policy; 3318 enum ata_lpm_policy old_policy = link->lpm_policy;
3319 bool no_dipm = ap->flags & ATA_FLAG_NO_DIPM;
3319 unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; 3320 unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
3320 unsigned int err_mask; 3321 unsigned int err_mask;
3321 int rc; 3322 int rc;
@@ -3332,7 +3333,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3332 */ 3333 */
3333 ata_for_each_dev(dev, link, ENABLED) { 3334 ata_for_each_dev(dev, link, ENABLED) {
3334 bool hipm = ata_id_has_hipm(dev->id); 3335 bool hipm = ata_id_has_hipm(dev->id);
3335 bool dipm = ata_id_has_dipm(dev->id); 3336 bool dipm = ata_id_has_dipm(dev->id) && !no_dipm;
3336 3337
3337 /* find the first enabled and LPM enabled devices */ 3338 /* find the first enabled and LPM enabled devices */
3338 if (!link_dev) 3339 if (!link_dev)
@@ -3389,7 +3390,8 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
3389 3390
3390 /* host config updated, enable DIPM if transitioning to MIN_POWER */ 3391 /* host config updated, enable DIPM if transitioning to MIN_POWER */
3391 ata_for_each_dev(dev, link, ENABLED) { 3392 ata_for_each_dev(dev, link, ENABLED) {
3392 if (policy == ATA_LPM_MIN_POWER && ata_id_has_dipm(dev->id)) { 3393 if (policy == ATA_LPM_MIN_POWER && !no_dipm &&
3394 ata_id_has_dipm(dev->id)) {
3393 err_mask = ata_dev_set_feature(dev, 3395 err_mask = ata_dev_set_feature(dev,
3394 SETFEATURES_SATA_ENABLE, SATA_DIPM); 3396 SETFEATURES_SATA_ENABLE, SATA_DIPM);
3395 if (err_mask && err_mask != AC_ERR_DEV) { 3397 if (err_mask && err_mask != AC_ERR_DEV) {
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index 0da0dcc7dd08..a5fdbdcb0faf 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -33,11 +33,12 @@
33 33
34 34
35#define DRV_NAME "pata_at91" 35#define DRV_NAME "pata_at91"
36#define DRV_VERSION "0.1" 36#define DRV_VERSION "0.2"
37 37
38#define CF_IDE_OFFSET 0x00c00000 38#define CF_IDE_OFFSET 0x00c00000
39#define CF_ALT_IDE_OFFSET 0x00e00000 39#define CF_ALT_IDE_OFFSET 0x00e00000
40#define CF_IDE_RES_SIZE 0x08 40#define CF_IDE_RES_SIZE 0x08
41#define NCS_RD_PULSE_LIMIT 0x3f /* maximal value for pulse bitfields */
41 42
42struct at91_ide_info { 43struct at91_ide_info {
43 unsigned long mode; 44 unsigned long mode;
@@ -49,8 +50,18 @@ struct at91_ide_info {
49 void __iomem *alt_addr; 50 void __iomem *alt_addr;
50}; 51};
51 52
52static const struct ata_timing initial_timing = 53static const struct ata_timing initial_timing = {
53 {XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0}; 54 .mode = XFER_PIO_0,
55 .setup = 70,
56 .act8b = 290,
57 .rec8b = 240,
58 .cyc8b = 600,
59 .active = 165,
60 .recover = 150,
61 .dmack_hold = 0,
62 .cycle = 600,
63 .udma = 0
64};
54 65
55static unsigned long calc_mck_cycles(unsigned long ns, unsigned long mck_hz) 66static unsigned long calc_mck_cycles(unsigned long ns, unsigned long mck_hz)
56{ 67{
@@ -109,6 +120,11 @@ static void set_smc_timing(struct device *dev,
109 /* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */ 120 /* (CS0, CS1, DIR, OE) <= (CFCE1, CFCE2, CFRNW, NCSX) timings */
110 ncs_read_setup = 1; 121 ncs_read_setup = 1;
111 ncs_read_pulse = read_cycle - 2; 122 ncs_read_pulse = read_cycle - 2;
123 if (ncs_read_pulse > NCS_RD_PULSE_LIMIT) {
124 ncs_read_pulse = NCS_RD_PULSE_LIMIT;
125 dev_warn(dev, "ncs_read_pulse limited to maximal value %lu\n",
126 ncs_read_pulse);
127 }
112 128
113 /* Write timings same as read timings */ 129 /* Write timings same as read timings */
114 write_cycle = read_cycle; 130 write_cycle = read_cycle;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index fbc5b6e7c591..abe3ab709e87 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -63,6 +63,7 @@ void device_pm_init(struct device *dev)
63 dev->power.wakeup = NULL; 63 dev->power.wakeup = NULL;
64 spin_lock_init(&dev->power.lock); 64 spin_lock_init(&dev->power.lock);
65 pm_runtime_init(dev); 65 pm_runtime_init(dev);
66 INIT_LIST_HEAD(&dev->power.entry);
66} 67}
67 68
68/** 69/**
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 4573c83df6dd..abbbd33e8d8a 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -258,7 +258,7 @@ void device_set_wakeup_capable(struct device *dev, bool capable)
258 if (!!dev->power.can_wakeup == !!capable) 258 if (!!dev->power.can_wakeup == !!capable)
259 return; 259 return;
260 260
261 if (device_is_registered(dev)) { 261 if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
262 if (capable) { 262 if (capable) {
263 if (wakeup_sysfs_add(dev)) 263 if (wakeup_sysfs_add(dev))
264 return; 264 return;
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index 90af2943f9e4..c126db3cb7d1 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -73,6 +73,7 @@ int syscore_suspend(void)
73 73
74 return ret; 74 return ret;
75} 75}
76EXPORT_SYMBOL_GPL(syscore_suspend);
76 77
77/** 78/**
78 * syscore_resume - Execute all the registered system core resume callbacks. 79 * syscore_resume - Execute all the registered system core resume callbacks.
@@ -95,6 +96,7 @@ void syscore_resume(void)
95 "Interrupts enabled after %pF\n", ops->resume); 96 "Interrupts enabled after %pF\n", ops->resume);
96 } 97 }
97} 98}
99EXPORT_SYMBOL_GPL(syscore_resume);
98#endif /* CONFIG_PM_SLEEP */ 100#endif /* CONFIG_PM_SLEEP */
99 101
100/** 102/**
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 012cba0d6d96..b072648dc3f6 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -115,6 +115,9 @@ static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
115 struct agp_memory *new; 115 struct agp_memory *new;
116 unsigned long alloc_size = num_agp_pages*sizeof(struct page *); 116 unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
117 117
118 if (INT_MAX/sizeof(struct page *) < num_agp_pages)
119 return NULL;
120
118 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); 121 new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
119 if (new == NULL) 122 if (new == NULL)
120 return NULL; 123 return NULL;
@@ -234,11 +237,14 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
234 int scratch_pages; 237 int scratch_pages;
235 struct agp_memory *new; 238 struct agp_memory *new;
236 size_t i; 239 size_t i;
240 int cur_memory;
237 241
238 if (!bridge) 242 if (!bridge)
239 return NULL; 243 return NULL;
240 244
241 if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp) 245 cur_memory = atomic_read(&bridge->current_memory_agp);
246 if ((cur_memory + page_count > bridge->max_memory_agp) ||
247 (cur_memory + page_count < page_count))
242 return NULL; 248 return NULL;
243 249
244 if (type >= AGP_USER_TYPES) { 250 if (type >= AGP_USER_TYPES) {
@@ -1089,8 +1095,8 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
1089 return -EINVAL; 1095 return -EINVAL;
1090 } 1096 }
1091 1097
1092 /* AK: could wrap */ 1098 if (((pg_start + mem->page_count) > num_entries) ||
1093 if ((pg_start + mem->page_count) > num_entries) 1099 ((pg_start + mem->page_count) < pg_start))
1094 return -EINVAL; 1100 return -EINVAL;
1095 1101
1096 j = pg_start; 1102 j = pg_start;
@@ -1124,7 +1130,7 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1124{ 1130{
1125 size_t i; 1131 size_t i;
1126 struct agp_bridge_data *bridge; 1132 struct agp_bridge_data *bridge;
1127 int mask_type; 1133 int mask_type, num_entries;
1128 1134
1129 bridge = mem->bridge; 1135 bridge = mem->bridge;
1130 if (!bridge) 1136 if (!bridge)
@@ -1136,6 +1142,11 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
1136 if (type != mem->type) 1142 if (type != mem->type)
1137 return -EINVAL; 1143 return -EINVAL;
1138 1144
1145 num_entries = agp_num_entries();
1146 if (((pg_start + mem->page_count) > num_entries) ||
1147 ((pg_start + mem->page_count) < pg_start))
1148 return -EINVAL;
1149
1139 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); 1150 mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
1140 if (mask_type != 0) { 1151 if (mask_type != 0) {
1141 /* The generic routines know nothing of memory types */ 1152 /* The generic routines know nothing of memory types */
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 84b164d1eb2b..838568a7dbf5 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1280,18 +1280,7 @@ static void unplug_port(struct port *port)
1280 spin_lock_irq(&pdrvdata_lock); 1280 spin_lock_irq(&pdrvdata_lock);
1281 list_del(&port->cons.list); 1281 list_del(&port->cons.list);
1282 spin_unlock_irq(&pdrvdata_lock); 1282 spin_unlock_irq(&pdrvdata_lock);
1283#if 0
1284 /*
1285 * hvc_remove() not called as removing one hvc port
1286 * results in other hvc ports getting frozen.
1287 *
1288 * Once this is resolved in hvc, this functionality
1289 * will be enabled. Till that is done, the -EPIPE
1290 * return from get_chars() above will help
1291 * hvc_console.c to clean up on ports we remove here.
1292 */
1293 hvc_remove(port->cons.hvc); 1283 hvc_remove(port->cons.hvc);
1294#endif
1295 } 1284 }
1296 1285
1297 /* Remove unused data this port might have received. */ 1286 /* Remove unused data this port might have received. */
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index d77005849af8..219d88a0eeae 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -142,6 +142,7 @@ static int cn_call_callback(struct sk_buff *skb)
142 cbq->callback(msg, nsp); 142 cbq->callback(msg, nsp);
143 kfree_skb(skb); 143 kfree_skb(skb);
144 cn_queue_release_callback(cbq); 144 cn_queue_release_callback(cbq);
145 err = 0;
145 } 146 }
146 147
147 return err; 148 return err;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 31e71c4fc831..9a8bebcf6b17 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -211,8 +211,6 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
211 211
212 scrubval = scrubval & 0x001F; 212 scrubval = scrubval & 0x001F;
213 213
214 amd64_debug("pci-read, sdram scrub control value: %d\n", scrubval);
215
216 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { 214 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
217 if (scrubrates[i].scrubval == scrubval) { 215 if (scrubrates[i].scrubval == scrubval) {
218 retval = scrubrates[i].bandwidth; 216 retval = scrubrates[i].bandwidth;
@@ -933,25 +931,74 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
933/* On F10h and later ErrAddr is MC4_ADDR[47:1] */ 931/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
934static u64 get_error_address(struct mce *m) 932static u64 get_error_address(struct mce *m)
935{ 933{
934 struct cpuinfo_x86 *c = &boot_cpu_data;
935 u64 addr;
936 u8 start_bit = 1; 936 u8 start_bit = 1;
937 u8 end_bit = 47; 937 u8 end_bit = 47;
938 938
939 if (boot_cpu_data.x86 == 0xf) { 939 if (c->x86 == 0xf) {
940 start_bit = 3; 940 start_bit = 3;
941 end_bit = 39; 941 end_bit = 39;
942 } 942 }
943 943
944 return m->addr & GENMASK(start_bit, end_bit); 944 addr = m->addr & GENMASK(start_bit, end_bit);
945
946 /*
947 * Erratum 637 workaround
948 */
949 if (c->x86 == 0x15) {
950 struct amd64_pvt *pvt;
951 u64 cc6_base, tmp_addr;
952 u32 tmp;
953 u8 mce_nid, intlv_en;
954
955 if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7)
956 return addr;
957
958 mce_nid = amd_get_nb_id(m->extcpu);
959 pvt = mcis[mce_nid]->pvt_info;
960
961 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
962 intlv_en = tmp >> 21 & 0x7;
963
964 /* add [47:27] + 3 trailing bits */
965 cc6_base = (tmp & GENMASK(0, 20)) << 3;
966
967 /* reverse and add DramIntlvEn */
968 cc6_base |= intlv_en ^ 0x7;
969
970 /* pin at [47:24] */
971 cc6_base <<= 24;
972
973 if (!intlv_en)
974 return cc6_base | (addr & GENMASK(0, 23));
975
976 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
977
978 /* faster log2 */
979 tmp_addr = (addr & GENMASK(12, 23)) << __fls(intlv_en + 1);
980
981 /* OR DramIntlvSel into bits [14:12] */
982 tmp_addr |= (tmp & GENMASK(21, 23)) >> 9;
983
984 /* add remaining [11:0] bits from original MC4_ADDR */
985 tmp_addr |= addr & GENMASK(0, 11);
986
987 return cc6_base | tmp_addr;
988 }
989
990 return addr;
945} 991}
946 992
947static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range) 993static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
948{ 994{
995 struct cpuinfo_x86 *c = &boot_cpu_data;
949 int off = range << 3; 996 int off = range << 3;
950 997
951 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo); 998 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
952 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo); 999 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
953 1000
954 if (boot_cpu_data.x86 == 0xf) 1001 if (c->x86 == 0xf)
955 return; 1002 return;
956 1003
957 if (!dram_rw(pvt, range)) 1004 if (!dram_rw(pvt, range))
@@ -959,6 +1006,31 @@ static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
959 1006
960 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi); 1007 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
961 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi); 1008 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1009
1010 /* Factor in CC6 save area by reading dst node's limit reg */
1011 if (c->x86 == 0x15) {
1012 struct pci_dev *f1 = NULL;
1013 u8 nid = dram_dst_node(pvt, range);
1014 u32 llim;
1015
1016 f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1));
1017 if (WARN_ON(!f1))
1018 return;
1019
1020 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1021
1022 pvt->ranges[range].lim.lo &= GENMASK(0, 15);
1023
1024 /* {[39:27],111b} */
1025 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1026
1027 pvt->ranges[range].lim.hi &= GENMASK(0, 7);
1028
1029 /* [47:40] */
1030 pvt->ranges[range].lim.hi |= llim >> 13;
1031
1032 pci_dev_put(f1);
1033 }
962} 1034}
963 1035
964static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, 1036static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
@@ -1403,12 +1475,8 @@ static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1403 return -EINVAL; 1475 return -EINVAL;
1404 } 1476 }
1405 1477
1406 if (intlv_en && 1478 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1407 (intlv_sel != ((sys_addr >> 12) & intlv_en))) {
1408 amd64_warn("Botched intlv bits, en: 0x%x, sel: 0x%x\n",
1409 intlv_en, intlv_sel);
1410 return -EINVAL; 1479 return -EINVAL;
1411 }
1412 1480
1413 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr); 1481 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1414 1482
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 11be36a311eb..9a666cb985b2 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -196,6 +196,9 @@
196 196
197#define DCT_CFG_SEL 0x10C 197#define DCT_CFG_SEL 0x10C
198 198
199#define DRAM_LOCAL_NODE_BASE 0x120
200#define DRAM_LOCAL_NODE_LIM 0x124
201
199#define DRAM_BASE_HI 0x140 202#define DRAM_BASE_HI 0x140
200#define DRAM_LIMIT_HI 0x144 203#define DRAM_LIMIT_HI 0x144
201 204
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 26343fd46596..29ffa350bfbe 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -458,13 +458,13 @@ static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci,
458 return -EINVAL; 458 return -EINVAL;
459 459
460 new_bw = mci->set_sdram_scrub_rate(mci, bandwidth); 460 new_bw = mci->set_sdram_scrub_rate(mci, bandwidth);
461 if (new_bw >= 0) { 461 if (new_bw < 0) {
462 edac_printk(KERN_DEBUG, EDAC_MC, "Scrub rate set to %d\n", new_bw); 462 edac_printk(KERN_WARNING, EDAC_MC,
463 return count; 463 "Error setting scrub rate to: %lu\n", bandwidth);
464 return -EINVAL;
464 } 465 }
465 466
466 edac_printk(KERN_DEBUG, EDAC_MC, "Error setting scrub rate to: %lu\n", bandwidth); 467 return count;
467 return -EINVAL;
468} 468}
469 469
470/* 470/*
@@ -483,7 +483,6 @@ static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
483 return bandwidth; 483 return bandwidth;
484 } 484 }
485 485
486 edac_printk(KERN_DEBUG, EDAC_MC, "Read scrub rate: %d\n", bandwidth);
487 return sprintf(data, "%d\n", bandwidth); 486 return sprintf(data, "%d\n", bandwidth);
488} 487}
489 488
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 432fc04c6bff..e522c702b04e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3771,8 +3771,11 @@ static bool g4x_compute_wm0(struct drm_device *dev,
3771 int entries, tlb_miss; 3771 int entries, tlb_miss;
3772 3772
3773 crtc = intel_get_crtc_for_plane(dev, plane); 3773 crtc = intel_get_crtc_for_plane(dev, plane);
3774 if (crtc->fb == NULL || !crtc->enabled) 3774 if (crtc->fb == NULL || !crtc->enabled) {
3775 *cursor_wm = cursor->guard_size;
3776 *plane_wm = display->guard_size;
3775 return false; 3777 return false;
3778 }
3776 3779
3777 htotal = crtc->mode.htotal; 3780 htotal = crtc->mode.htotal;
3778 hdisplay = crtc->mode.hdisplay; 3781 hdisplay = crtc->mode.hdisplay;
@@ -6215,36 +6218,6 @@ cleanup_work:
6215 return ret; 6218 return ret;
6216} 6219}
6217 6220
6218static void intel_crtc_reset(struct drm_crtc *crtc)
6219{
6220 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6221
6222 /* Reset flags back to the 'unknown' status so that they
6223 * will be correctly set on the initial modeset.
6224 */
6225 intel_crtc->dpms_mode = -1;
6226}
6227
6228static struct drm_crtc_helper_funcs intel_helper_funcs = {
6229 .dpms = intel_crtc_dpms,
6230 .mode_fixup = intel_crtc_mode_fixup,
6231 .mode_set = intel_crtc_mode_set,
6232 .mode_set_base = intel_pipe_set_base,
6233 .mode_set_base_atomic = intel_pipe_set_base_atomic,
6234 .load_lut = intel_crtc_load_lut,
6235 .disable = intel_crtc_disable,
6236};
6237
6238static const struct drm_crtc_funcs intel_crtc_funcs = {
6239 .reset = intel_crtc_reset,
6240 .cursor_set = intel_crtc_cursor_set,
6241 .cursor_move = intel_crtc_cursor_move,
6242 .gamma_set = intel_crtc_gamma_set,
6243 .set_config = drm_crtc_helper_set_config,
6244 .destroy = intel_crtc_destroy,
6245 .page_flip = intel_crtc_page_flip,
6246};
6247
6248static void intel_sanitize_modesetting(struct drm_device *dev, 6221static void intel_sanitize_modesetting(struct drm_device *dev,
6249 int pipe, int plane) 6222 int pipe, int plane)
6250{ 6223{
@@ -6281,6 +6254,42 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
6281 intel_disable_pipe(dev_priv, pipe); 6254 intel_disable_pipe(dev_priv, pipe);
6282} 6255}
6283 6256
6257static void intel_crtc_reset(struct drm_crtc *crtc)
6258{
6259 struct drm_device *dev = crtc->dev;
6260 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6261
6262 /* Reset flags back to the 'unknown' status so that they
6263 * will be correctly set on the initial modeset.
6264 */
6265 intel_crtc->dpms_mode = -1;
6266
6267 /* We need to fix up any BIOS configuration that conflicts with
6268 * our expectations.
6269 */
6270 intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
6271}
6272
6273static struct drm_crtc_helper_funcs intel_helper_funcs = {
6274 .dpms = intel_crtc_dpms,
6275 .mode_fixup = intel_crtc_mode_fixup,
6276 .mode_set = intel_crtc_mode_set,
6277 .mode_set_base = intel_pipe_set_base,
6278 .mode_set_base_atomic = intel_pipe_set_base_atomic,
6279 .load_lut = intel_crtc_load_lut,
6280 .disable = intel_crtc_disable,
6281};
6282
6283static const struct drm_crtc_funcs intel_crtc_funcs = {
6284 .reset = intel_crtc_reset,
6285 .cursor_set = intel_crtc_cursor_set,
6286 .cursor_move = intel_crtc_cursor_move,
6287 .gamma_set = intel_crtc_gamma_set,
6288 .set_config = drm_crtc_helper_set_config,
6289 .destroy = intel_crtc_destroy,
6290 .page_flip = intel_crtc_page_flip,
6291};
6292
6284static void intel_crtc_init(struct drm_device *dev, int pipe) 6293static void intel_crtc_init(struct drm_device *dev, int pipe)
6285{ 6294{
6286 drm_i915_private_t *dev_priv = dev->dev_private; 6295 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -6330,8 +6339,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
6330 6339
6331 setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, 6340 setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
6332 (unsigned long)intel_crtc); 6341 (unsigned long)intel_crtc);
6333
6334 intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
6335} 6342}
6336 6343
6337int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 6344int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 4256b8ef3947..6b22c1dcc015 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1151,10 +1151,10 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1151 (video_levels->blank << TV_BLANK_LEVEL_SHIFT))); 1151 (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
1152 { 1152 {
1153 int pipeconf_reg = PIPECONF(pipe); 1153 int pipeconf_reg = PIPECONF(pipe);
1154 int dspcntr_reg = DSPCNTR(pipe); 1154 int dspcntr_reg = DSPCNTR(intel_crtc->plane);
1155 int pipeconf = I915_READ(pipeconf_reg); 1155 int pipeconf = I915_READ(pipeconf_reg);
1156 int dspcntr = I915_READ(dspcntr_reg); 1156 int dspcntr = I915_READ(dspcntr_reg);
1157 int dspbase_reg = DSPADDR(pipe); 1157 int dspbase_reg = DSPADDR(intel_crtc->plane);
1158 int xpos = 0x0, ypos = 0x0; 1158 int xpos = 0x0, ypos = 0x0;
1159 unsigned int xsize, ysize; 1159 unsigned int xsize, ysize;
1160 /* Pipe must be off here */ 1160 /* Pipe must be off here */
@@ -1378,7 +1378,9 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1378 if (type < 0) 1378 if (type < 0)
1379 return connector_status_disconnected; 1379 return connector_status_disconnected;
1380 1380
1381 intel_tv->type = type;
1381 intel_tv_find_better_format(connector); 1382 intel_tv_find_better_format(connector);
1383
1382 return connector_status_connected; 1384 return connector_status_connected;
1383} 1385}
1384 1386
@@ -1670,8 +1672,7 @@ intel_tv_init(struct drm_device *dev)
1670 * 1672 *
1671 * More recent chipsets favour HDMI rather than integrated S-Video. 1673 * More recent chipsets favour HDMI rather than integrated S-Video.
1672 */ 1674 */
1673 connector->polled = 1675 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1674 DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
1675 1676
1676 drm_connector_init(dev, connector, &intel_tv_connector_funcs, 1677 drm_connector_init(dev, connector, &intel_tv_connector_funcs,
1677 DRM_MODE_CONNECTOR_SVIDEO); 1678 DRM_MODE_CONNECTOR_SVIDEO);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index ce38e97b9428..568caedd7216 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -83,7 +83,7 @@ nouveau_dma_init(struct nouveau_channel *chan)
83 return ret; 83 return ret;
84 84
85 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ 85 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
86 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000, 86 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
87 &chan->m2mf_ntfy); 87 &chan->m2mf_ntfy);
88 if (ret) 88 if (ret)
89 return ret; 89 return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 856d56a98d1e..a76514a209b3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -682,6 +682,9 @@ struct drm_nouveau_private {
682 /* For PFIFO and PGRAPH. */ 682 /* For PFIFO and PGRAPH. */
683 spinlock_t context_switch_lock; 683 spinlock_t context_switch_lock;
684 684
685 /* VM/PRAMIN flush, legacy PRAMIN aperture */
686 spinlock_t vm_lock;
687
685 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ 688 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
686 struct nouveau_ramht *ramht; 689 struct nouveau_ramht *ramht;
687 struct nouveau_gpuobj *ramfc; 690 struct nouveau_gpuobj *ramfc;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 889c4454682e..39aee6d4daf8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -181,13 +181,13 @@ nouveau_fbcon_sync(struct fb_info *info)
181 OUT_RING (chan, 0); 181 OUT_RING (chan, 0);
182 } 182 }
183 183
184 nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff); 184 nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff);
185 FIRE_RING(chan); 185 FIRE_RING(chan);
186 mutex_unlock(&chan->mutex); 186 mutex_unlock(&chan->mutex);
187 187
188 ret = -EBUSY; 188 ret = -EBUSY;
189 for (i = 0; i < 100000; i++) { 189 for (i = 0; i < 100000; i++) {
190 if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) { 190 if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) {
191 ret = 0; 191 ret = 0;
192 break; 192 break;
193 } 193 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 78f467fe30be..5045f8b921d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -398,7 +398,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
398 dma_bits = 40; 398 dma_bits = 40;
399 } else 399 } else
400 if (drm_pci_device_is_pcie(dev) && 400 if (drm_pci_device_is_pcie(dev) &&
401 dev_priv->chipset != 0x40 && 401 dev_priv->chipset > 0x40 &&
402 dev_priv->chipset != 0x45) { 402 dev_priv->chipset != 0x45) {
403 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39))) 403 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
404 dma_bits = 39; 404 dma_bits = 39;
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 7ba3fc0b30c1..5b39718ae1f8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -35,19 +35,22 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
35{ 35{
36 struct drm_device *dev = chan->dev; 36 struct drm_device *dev = chan->dev;
37 struct nouveau_bo *ntfy = NULL; 37 struct nouveau_bo *ntfy = NULL;
38 uint32_t flags; 38 uint32_t flags, ttmpl;
39 int ret; 39 int ret;
40 40
41 if (nouveau_vram_notify) 41 if (nouveau_vram_notify) {
42 flags = NOUVEAU_GEM_DOMAIN_VRAM; 42 flags = NOUVEAU_GEM_DOMAIN_VRAM;
43 else 43 ttmpl = TTM_PL_FLAG_VRAM;
44 } else {
44 flags = NOUVEAU_GEM_DOMAIN_GART; 45 flags = NOUVEAU_GEM_DOMAIN_GART;
46 ttmpl = TTM_PL_FLAG_TT;
47 }
45 48
46 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy); 49 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
47 if (ret) 50 if (ret)
48 return ret; 51 return ret;
49 52
50 ret = nouveau_bo_pin(ntfy, flags); 53 ret = nouveau_bo_pin(ntfy, ttmpl);
51 if (ret) 54 if (ret)
52 goto out_err; 55 goto out_err;
53 56
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 4f00c87ed86e..67a16e01ffa6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -1039,19 +1039,20 @@ nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
1039{ 1039{
1040 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; 1040 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
1041 struct drm_device *dev = gpuobj->dev; 1041 struct drm_device *dev = gpuobj->dev;
1042 unsigned long flags;
1042 1043
1043 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { 1044 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
1044 u64 ptr = gpuobj->vinst + offset; 1045 u64 ptr = gpuobj->vinst + offset;
1045 u32 base = ptr >> 16; 1046 u32 base = ptr >> 16;
1046 u32 val; 1047 u32 val;
1047 1048
1048 spin_lock(&dev_priv->ramin_lock); 1049 spin_lock_irqsave(&dev_priv->vm_lock, flags);
1049 if (dev_priv->ramin_base != base) { 1050 if (dev_priv->ramin_base != base) {
1050 dev_priv->ramin_base = base; 1051 dev_priv->ramin_base = base;
1051 nv_wr32(dev, 0x001700, dev_priv->ramin_base); 1052 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
1052 } 1053 }
1053 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff)); 1054 val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
1054 spin_unlock(&dev_priv->ramin_lock); 1055 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
1055 return val; 1056 return val;
1056 } 1057 }
1057 1058
@@ -1063,18 +1064,19 @@ nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
1063{ 1064{
1064 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; 1065 struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
1065 struct drm_device *dev = gpuobj->dev; 1066 struct drm_device *dev = gpuobj->dev;
1067 unsigned long flags;
1066 1068
1067 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) { 1069 if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
1068 u64 ptr = gpuobj->vinst + offset; 1070 u64 ptr = gpuobj->vinst + offset;
1069 u32 base = ptr >> 16; 1071 u32 base = ptr >> 16;
1070 1072
1071 spin_lock(&dev_priv->ramin_lock); 1073 spin_lock_irqsave(&dev_priv->vm_lock, flags);
1072 if (dev_priv->ramin_base != base) { 1074 if (dev_priv->ramin_base != base) {
1073 dev_priv->ramin_base = base; 1075 dev_priv->ramin_base = base;
1074 nv_wr32(dev, 0x001700, dev_priv->ramin_base); 1076 nv_wr32(dev, 0x001700, dev_priv->ramin_base);
1075 } 1077 }
1076 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val); 1078 nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
1077 spin_unlock(&dev_priv->ramin_lock); 1079 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
1078 return; 1080 return;
1079 } 1081 }
1080 1082
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index a33fe4019286..4bce801bc588 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -55,6 +55,7 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
55 be->func->clear(be); 55 be->func->clear(be);
56 return -EFAULT; 56 return -EFAULT;
57 } 57 }
58 nvbe->ttm_alloced[nvbe->nr_pages] = false;
58 } 59 }
59 60
60 nvbe->nr_pages++; 61 nvbe->nr_pages++;
@@ -427,7 +428,7 @@ nouveau_sgdma_init(struct drm_device *dev)
427 u32 aper_size, align; 428 u32 aper_size, align;
428 int ret; 429 int ret;
429 430
430 if (dev_priv->card_type >= NV_50 || drm_pci_device_is_pcie(dev)) 431 if (dev_priv->card_type >= NV_40 && drm_pci_device_is_pcie(dev))
431 aper_size = 512 * 1024 * 1024; 432 aper_size = 512 * 1024 * 1024;
432 else 433 else
433 aper_size = 64 * 1024 * 1024; 434 aper_size = 64 * 1024 * 1024;
@@ -457,7 +458,7 @@ nouveau_sgdma_init(struct drm_device *dev)
457 dev_priv->gart_info.func = &nv50_sgdma_backend; 458 dev_priv->gart_info.func = &nv50_sgdma_backend;
458 } else 459 } else
459 if (drm_pci_device_is_pcie(dev) && 460 if (drm_pci_device_is_pcie(dev) &&
460 dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) { 461 dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
461 if (nv44_graph_class(dev)) { 462 if (nv44_graph_class(dev)) {
462 dev_priv->gart_info.func = &nv44_sgdma_backend; 463 dev_priv->gart_info.func = &nv44_sgdma_backend;
463 align = 512 * 1024; 464 align = 512 * 1024;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 6e2b1a6caa2d..a30adec5beaa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -608,6 +608,7 @@ nouveau_card_init(struct drm_device *dev)
608 spin_lock_init(&dev_priv->channels.lock); 608 spin_lock_init(&dev_priv->channels.lock);
609 spin_lock_init(&dev_priv->tile.lock); 609 spin_lock_init(&dev_priv->tile.lock);
610 spin_lock_init(&dev_priv->context_switch_lock); 610 spin_lock_init(&dev_priv->context_switch_lock);
611 spin_lock_init(&dev_priv->vm_lock);
611 612
612 /* Make the CRTCs and I2C buses accessible */ 613 /* Make the CRTCs and I2C buses accessible */
613 ret = engine->display.early_init(dev); 614 ret = engine->display.early_init(dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index a6f8aa651fc6..4f95a1e5822e 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -404,23 +404,25 @@ void
404nv50_instmem_flush(struct drm_device *dev) 404nv50_instmem_flush(struct drm_device *dev)
405{ 405{
406 struct drm_nouveau_private *dev_priv = dev->dev_private; 406 struct drm_nouveau_private *dev_priv = dev->dev_private;
407 unsigned long flags;
407 408
408 spin_lock(&dev_priv->ramin_lock); 409 spin_lock_irqsave(&dev_priv->vm_lock, flags);
409 nv_wr32(dev, 0x00330c, 0x00000001); 410 nv_wr32(dev, 0x00330c, 0x00000001);
410 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) 411 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
411 NV_ERROR(dev, "PRAMIN flush timeout\n"); 412 NV_ERROR(dev, "PRAMIN flush timeout\n");
412 spin_unlock(&dev_priv->ramin_lock); 413 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
413} 414}
414 415
415void 416void
416nv84_instmem_flush(struct drm_device *dev) 417nv84_instmem_flush(struct drm_device *dev)
417{ 418{
418 struct drm_nouveau_private *dev_priv = dev->dev_private; 419 struct drm_nouveau_private *dev_priv = dev->dev_private;
420 unsigned long flags;
419 421
420 spin_lock(&dev_priv->ramin_lock); 422 spin_lock_irqsave(&dev_priv->vm_lock, flags);
421 nv_wr32(dev, 0x070000, 0x00000001); 423 nv_wr32(dev, 0x070000, 0x00000001);
422 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) 424 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
423 NV_ERROR(dev, "PRAMIN flush timeout\n"); 425 NV_ERROR(dev, "PRAMIN flush timeout\n");
424 spin_unlock(&dev_priv->ramin_lock); 426 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
425} 427}
426 428
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 4fd3432b5b8d..6c2694490741 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -174,10 +174,11 @@ void
174nv50_vm_flush_engine(struct drm_device *dev, int engine) 174nv50_vm_flush_engine(struct drm_device *dev, int engine)
175{ 175{
176 struct drm_nouveau_private *dev_priv = dev->dev_private; 176 struct drm_nouveau_private *dev_priv = dev->dev_private;
177 unsigned long flags;
177 178
178 spin_lock(&dev_priv->ramin_lock); 179 spin_lock_irqsave(&dev_priv->vm_lock, flags);
179 nv_wr32(dev, 0x100c80, (engine << 16) | 1); 180 nv_wr32(dev, 0x100c80, (engine << 16) | 1);
180 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) 181 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
181 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); 182 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
182 spin_unlock(&dev_priv->ramin_lock); 183 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
183} 184}
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index a0a2a0277f73..a179e6c55afb 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -104,11 +104,12 @@ nvc0_vm_flush(struct nouveau_vm *vm)
104 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; 104 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
105 struct drm_device *dev = vm->dev; 105 struct drm_device *dev = vm->dev;
106 struct nouveau_vm_pgd *vpgd; 106 struct nouveau_vm_pgd *vpgd;
107 unsigned long flags;
107 u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5; 108 u32 engine = (dev_priv->chan_vm == vm) ? 1 : 5;
108 109
109 pinstmem->flush(vm->dev); 110 pinstmem->flush(vm->dev);
110 111
111 spin_lock(&dev_priv->ramin_lock); 112 spin_lock_irqsave(&dev_priv->vm_lock, flags);
112 list_for_each_entry(vpgd, &vm->pgd_list, head) { 113 list_for_each_entry(vpgd, &vm->pgd_list, head) {
113 /* looks like maybe a "free flush slots" counter, the 114 /* looks like maybe a "free flush slots" counter, the
114 * faster you write to 0x100cbc to more it decreases 115 * faster you write to 0x100cbc to more it decreases
@@ -125,5 +126,5 @@ nvc0_vm_flush(struct nouveau_vm *vm)
125 nv_rd32(dev, 0x100c80), engine); 126 nv_rd32(dev, 0x100c80), engine);
126 } 127 }
127 } 128 }
128 spin_unlock(&dev_priv->ramin_lock); 129 spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
129} 130}
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index d71d375149f8..7bd745689097 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -135,7 +135,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
135 case ATOM_IIO_MOVE_INDEX: 135 case ATOM_IIO_MOVE_INDEX:
136 temp &= 136 temp &=
137 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 137 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
138 CU8(base + 2)); 138 CU8(base + 3));
139 temp |= 139 temp |=
140 ((index >> CU8(base + 2)) & 140 ((index >> CU8(base + 2)) &
141 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + 141 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
@@ -145,7 +145,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
145 case ATOM_IIO_MOVE_DATA: 145 case ATOM_IIO_MOVE_DATA:
146 temp &= 146 temp &=
147 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 147 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
148 CU8(base + 2)); 148 CU8(base + 3));
149 temp |= 149 temp |=
150 ((data >> CU8(base + 2)) & 150 ((data >> CU8(base + 2)) &
151 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + 151 (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base +
@@ -155,7 +155,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
155 case ATOM_IIO_MOVE_ATTR: 155 case ATOM_IIO_MOVE_ATTR:
156 temp &= 156 temp &=
157 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << 157 ~((0xFFFFFFFF >> (32 - CU8(base + 1))) <<
158 CU8(base + 2)); 158 CU8(base + 3));
159 temp |= 159 temp |=
160 ((ctx-> 160 ((ctx->
161 io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - 161 io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 -
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 9d516a8c4dfa..529a3a704731 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -532,10 +532,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
532 else 532 else
533 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 533 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
534 534
535 if ((rdev->family == CHIP_R600) || 535 if (rdev->family < CHIP_RV770)
536 (rdev->family == CHIP_RV610) ||
537 (rdev->family == CHIP_RV630) ||
538 (rdev->family == CHIP_RV670))
539 pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; 536 pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
540 } else { 537 } else {
541 pll->flags |= RADEON_PLL_LEGACY; 538 pll->flags |= RADEON_PLL_LEGACY;
@@ -565,7 +562,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
565 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 562 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
566 if (ss_enabled) { 563 if (ss_enabled) {
567 if (ss->refdiv) { 564 if (ss->refdiv) {
568 pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
569 pll->flags |= RADEON_PLL_USE_REF_DIV; 565 pll->flags |= RADEON_PLL_USE_REF_DIV;
570 pll->reference_div = ss->refdiv; 566 pll->reference_div = ss->refdiv;
571 if (ASIC_IS_AVIVO(rdev)) 567 if (ASIC_IS_AVIVO(rdev))
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 3453910ee0f3..e9bc135d9189 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -353,7 +353,7 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
353 struct drm_display_mode *mode, 353 struct drm_display_mode *mode,
354 struct drm_display_mode *other_mode) 354 struct drm_display_mode *other_mode)
355{ 355{
356 u32 tmp = 0; 356 u32 tmp;
357 /* 357 /*
358 * Line Buffer Setup 358 * Line Buffer Setup
359 * There are 3 line buffers, each one shared by 2 display controllers. 359 * There are 3 line buffers, each one shared by 2 display controllers.
@@ -363,64 +363,63 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
363 * first display controller 363 * first display controller
364 * 0 - first half of lb (3840 * 2) 364 * 0 - first half of lb (3840 * 2)
365 * 1 - first 3/4 of lb (5760 * 2) 365 * 1 - first 3/4 of lb (5760 * 2)
366 * 2 - whole lb (7680 * 2) 366 * 2 - whole lb (7680 * 2), other crtc must be disabled
367 * 3 - first 1/4 of lb (1920 * 2) 367 * 3 - first 1/4 of lb (1920 * 2)
368 * second display controller 368 * second display controller
369 * 4 - second half of lb (3840 * 2) 369 * 4 - second half of lb (3840 * 2)
370 * 5 - second 3/4 of lb (5760 * 2) 370 * 5 - second 3/4 of lb (5760 * 2)
371 * 6 - whole lb (7680 * 2) 371 * 6 - whole lb (7680 * 2), other crtc must be disabled
372 * 7 - last 1/4 of lb (1920 * 2) 372 * 7 - last 1/4 of lb (1920 * 2)
373 */ 373 */
374 if (mode && other_mode) { 374 /* this can get tricky if we have two large displays on a paired group
375 if (mode->hdisplay > other_mode->hdisplay) { 375 * of crtcs. Ideally for multiple large displays we'd assign them to
376 if (mode->hdisplay > 2560) 376 * non-linked crtcs for maximum line buffer allocation.
377 tmp = 1; /* 3/4 */ 377 */
378 else 378 if (radeon_crtc->base.enabled && mode) {
379 tmp = 0; /* 1/2 */ 379 if (other_mode)
380 } else if (other_mode->hdisplay > mode->hdisplay) {
381 if (other_mode->hdisplay > 2560)
382 tmp = 3; /* 1/4 */
383 else
384 tmp = 0; /* 1/2 */
385 } else
386 tmp = 0; /* 1/2 */ 380 tmp = 0; /* 1/2 */
387 } else if (mode) 381 else
388 tmp = 2; /* whole */ 382 tmp = 2; /* whole */
389 else if (other_mode) 383 } else
390 tmp = 3; /* 1/4 */ 384 tmp = 0;
391 385
392 /* second controller of the pair uses second half of the lb */ 386 /* second controller of the pair uses second half of the lb */
393 if (radeon_crtc->crtc_id % 2) 387 if (radeon_crtc->crtc_id % 2)
394 tmp += 4; 388 tmp += 4;
395 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp); 389 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
396 390
397 switch (tmp) { 391 if (radeon_crtc->base.enabled && mode) {
398 case 0: 392 switch (tmp) {
399 case 4: 393 case 0:
400 default: 394 case 4:
401 if (ASIC_IS_DCE5(rdev)) 395 default:
402 return 4096 * 2; 396 if (ASIC_IS_DCE5(rdev))
403 else 397 return 4096 * 2;
404 return 3840 * 2; 398 else
405 case 1: 399 return 3840 * 2;
406 case 5: 400 case 1:
407 if (ASIC_IS_DCE5(rdev)) 401 case 5:
408 return 6144 * 2; 402 if (ASIC_IS_DCE5(rdev))
409 else 403 return 6144 * 2;
410 return 5760 * 2; 404 else
411 case 2: 405 return 5760 * 2;
412 case 6: 406 case 2:
413 if (ASIC_IS_DCE5(rdev)) 407 case 6:
414 return 8192 * 2; 408 if (ASIC_IS_DCE5(rdev))
415 else 409 return 8192 * 2;
416 return 7680 * 2; 410 else
417 case 3: 411 return 7680 * 2;
418 case 7: 412 case 3:
419 if (ASIC_IS_DCE5(rdev)) 413 case 7:
420 return 2048 * 2; 414 if (ASIC_IS_DCE5(rdev))
421 else 415 return 2048 * 2;
422 return 1920 * 2; 416 else
417 return 1920 * 2;
418 }
423 } 419 }
420
421 /* controller not enabled, so no lb used */
422 return 0;
424} 423}
425 424
426static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev) 425static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
@@ -2581,7 +2580,7 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
2581 u32 wptr, tmp; 2580 u32 wptr, tmp;
2582 2581
2583 if (rdev->wb.enabled) 2582 if (rdev->wb.enabled)
2584 wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]; 2583 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
2585 else 2584 else
2586 wptr = RREG32(IH_RB_WPTR); 2585 wptr = RREG32(IH_RB_WPTR);
2587 2586
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 15d58292677a..6f27593901c7 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3231,7 +3231,7 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
3231 u32 wptr, tmp; 3231 u32 wptr, tmp;
3232 3232
3233 if (rdev->wb.enabled) 3233 if (rdev->wb.enabled)
3234 wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]; 3234 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
3235 else 3235 else
3236 wptr = RREG32(IH_RB_WPTR); 3236 wptr = RREG32(IH_RB_WPTR);
3237 3237
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2ef6d5135064..5f45fa12bb8b 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1199,7 +1199,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1199 if (router->ddc_valid || router->cd_valid) { 1199 if (router->ddc_valid || router->cd_valid) {
1200 radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); 1200 radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
1201 if (!radeon_connector->router_bus) 1201 if (!radeon_connector->router_bus)
1202 goto failed; 1202 DRM_ERROR("Failed to assign router i2c bus! Check dmesg for i2c errors.\n");
1203 } 1203 }
1204 switch (connector_type) { 1204 switch (connector_type) {
1205 case DRM_MODE_CONNECTOR_VGA: 1205 case DRM_MODE_CONNECTOR_VGA:
@@ -1208,7 +1208,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1208 if (i2c_bus->valid) { 1208 if (i2c_bus->valid) {
1209 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1209 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1210 if (!radeon_connector->ddc_bus) 1210 if (!radeon_connector->ddc_bus)
1211 goto failed; 1211 DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1212 } 1212 }
1213 radeon_connector->dac_load_detect = true; 1213 radeon_connector->dac_load_detect = true;
1214 drm_connector_attach_property(&radeon_connector->base, 1214 drm_connector_attach_property(&radeon_connector->base,
@@ -1226,7 +1226,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1226 if (i2c_bus->valid) { 1226 if (i2c_bus->valid) {
1227 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1227 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1228 if (!radeon_connector->ddc_bus) 1228 if (!radeon_connector->ddc_bus)
1229 goto failed; 1229 DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1230 } 1230 }
1231 radeon_connector->dac_load_detect = true; 1231 radeon_connector->dac_load_detect = true;
1232 drm_connector_attach_property(&radeon_connector->base, 1232 drm_connector_attach_property(&radeon_connector->base,
@@ -1249,7 +1249,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1249 if (i2c_bus->valid) { 1249 if (i2c_bus->valid) {
1250 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1250 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1251 if (!radeon_connector->ddc_bus) 1251 if (!radeon_connector->ddc_bus)
1252 goto failed; 1252 DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1253 } 1253 }
1254 subpixel_order = SubPixelHorizontalRGB; 1254 subpixel_order = SubPixelHorizontalRGB;
1255 drm_connector_attach_property(&radeon_connector->base, 1255 drm_connector_attach_property(&radeon_connector->base,
@@ -1290,7 +1290,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1290 if (i2c_bus->valid) { 1290 if (i2c_bus->valid) {
1291 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1291 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1292 if (!radeon_connector->ddc_bus) 1292 if (!radeon_connector->ddc_bus)
1293 goto failed; 1293 DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1294 } 1294 }
1295 drm_connector_attach_property(&radeon_connector->base, 1295 drm_connector_attach_property(&radeon_connector->base,
1296 rdev->mode_info.coherent_mode_property, 1296 rdev->mode_info.coherent_mode_property,
@@ -1329,10 +1329,10 @@ radeon_add_atom_connector(struct drm_device *dev,
1329 else 1329 else
1330 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); 1330 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
1331 if (!radeon_dig_connector->dp_i2c_bus) 1331 if (!radeon_dig_connector->dp_i2c_bus)
1332 goto failed; 1332 DRM_ERROR("DP: Failed to assign dp ddc bus! Check dmesg for i2c errors.\n");
1333 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1333 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1334 if (!radeon_connector->ddc_bus) 1334 if (!radeon_connector->ddc_bus)
1335 goto failed; 1335 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1336 } 1336 }
1337 subpixel_order = SubPixelHorizontalRGB; 1337 subpixel_order = SubPixelHorizontalRGB;
1338 drm_connector_attach_property(&radeon_connector->base, 1338 drm_connector_attach_property(&radeon_connector->base,
@@ -1381,7 +1381,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1381 if (i2c_bus->valid) { 1381 if (i2c_bus->valid) {
1382 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1382 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1383 if (!radeon_connector->ddc_bus) 1383 if (!radeon_connector->ddc_bus)
1384 goto failed; 1384 DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1385 } 1385 }
1386 drm_connector_attach_property(&radeon_connector->base, 1386 drm_connector_attach_property(&radeon_connector->base,
1387 dev->mode_config.scaling_mode_property, 1387 dev->mode_config.scaling_mode_property,
@@ -1457,7 +1457,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1457 if (i2c_bus->valid) { 1457 if (i2c_bus->valid) {
1458 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1458 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1459 if (!radeon_connector->ddc_bus) 1459 if (!radeon_connector->ddc_bus)
1460 goto failed; 1460 DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1461 } 1461 }
1462 radeon_connector->dac_load_detect = true; 1462 radeon_connector->dac_load_detect = true;
1463 drm_connector_attach_property(&radeon_connector->base, 1463 drm_connector_attach_property(&radeon_connector->base,
@@ -1475,7 +1475,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1475 if (i2c_bus->valid) { 1475 if (i2c_bus->valid) {
1476 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1476 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1477 if (!radeon_connector->ddc_bus) 1477 if (!radeon_connector->ddc_bus)
1478 goto failed; 1478 DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1479 } 1479 }
1480 radeon_connector->dac_load_detect = true; 1480 radeon_connector->dac_load_detect = true;
1481 drm_connector_attach_property(&radeon_connector->base, 1481 drm_connector_attach_property(&radeon_connector->base,
@@ -1493,7 +1493,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1493 if (i2c_bus->valid) { 1493 if (i2c_bus->valid) {
1494 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1494 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1495 if (!radeon_connector->ddc_bus) 1495 if (!radeon_connector->ddc_bus)
1496 goto failed; 1496 DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1497 } 1497 }
1498 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1498 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1499 radeon_connector->dac_load_detect = true; 1499 radeon_connector->dac_load_detect = true;
@@ -1538,7 +1538,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1538 if (i2c_bus->valid) { 1538 if (i2c_bus->valid) {
1539 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus); 1539 radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
1540 if (!radeon_connector->ddc_bus) 1540 if (!radeon_connector->ddc_bus)
1541 goto failed; 1541 DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1542 } 1542 }
1543 drm_connector_attach_property(&radeon_connector->base, 1543 drm_connector_attach_property(&radeon_connector->base,
1544 dev->mode_config.scaling_mode_property, 1544 dev->mode_config.scaling_mode_property,
@@ -1567,9 +1567,4 @@ radeon_add_legacy_connector(struct drm_device *dev,
1567 radeon_legacy_backlight_init(radeon_encoder, connector); 1567 radeon_legacy_backlight_init(radeon_encoder, connector);
1568 } 1568 }
1569 } 1569 }
1570 return;
1571
1572failed:
1573 drm_connector_cleanup(connector);
1574 kfree(connector);
1575} 1570}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index ccbabf734a61..983cbac75af0 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -1096,6 +1096,9 @@ void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
1096 if (!radeon_connector->router.ddc_valid) 1096 if (!radeon_connector->router.ddc_valid)
1097 return; 1097 return;
1098 1098
1099 if (!radeon_connector->router_bus)
1100 return;
1101
1099 radeon_i2c_get_byte(radeon_connector->router_bus, 1102 radeon_i2c_get_byte(radeon_connector->router_bus,
1100 radeon_connector->router.i2c_addr, 1103 radeon_connector->router.i2c_addr,
1101 0x3, &val); 1104 0x3, &val);
@@ -1121,6 +1124,9 @@ void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
1121 if (!radeon_connector->router.cd_valid) 1124 if (!radeon_connector->router.cd_valid)
1122 return; 1125 return;
1123 1126
1127 if (!radeon_connector->router_bus)
1128 return;
1129
1124 radeon_i2c_get_byte(radeon_connector->router_bus, 1130 radeon_i2c_get_byte(radeon_connector->router_bus,
1125 radeon_connector->router.i2c_addr, 1131 radeon_connector->router.i2c_addr,
1126 0x3, &val); 1132 0x3, &val);
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c
index edfb92e41735..196ffafafd88 100644
--- a/drivers/hwmon/pmbus_core.c
+++ b/drivers/hwmon/pmbus_core.c
@@ -139,7 +139,6 @@ struct pmbus_data {
139 * A single status register covers multiple attributes, 139 * A single status register covers multiple attributes,
140 * so we keep them all together. 140 * so we keep them all together.
141 */ 141 */
142 u8 status_bits;
143 u8 status[PB_NUM_STATUS_REG]; 142 u8 status[PB_NUM_STATUS_REG];
144 143
145 u8 currpage; 144 u8 currpage;
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 38319a69bd0a..d6d58684712b 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -232,9 +232,17 @@ static int i2c_inb(struct i2c_adapter *i2c_adap)
232 * Sanity check for the adapter hardware - check the reaction of 232 * Sanity check for the adapter hardware - check the reaction of
233 * the bus lines only if it seems to be idle. 233 * the bus lines only if it seems to be idle.
234 */ 234 */
235static int test_bus(struct i2c_algo_bit_data *adap, char *name) 235static int test_bus(struct i2c_adapter *i2c_adap)
236{ 236{
237 int scl, sda; 237 struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
238 const char *name = i2c_adap->name;
239 int scl, sda, ret;
240
241 if (adap->pre_xfer) {
242 ret = adap->pre_xfer(i2c_adap);
243 if (ret < 0)
244 return -ENODEV;
245 }
238 246
239 if (adap->getscl == NULL) 247 if (adap->getscl == NULL)
240 pr_info("%s: Testing SDA only, SCL is not readable\n", name); 248 pr_info("%s: Testing SDA only, SCL is not readable\n", name);
@@ -297,11 +305,19 @@ static int test_bus(struct i2c_algo_bit_data *adap, char *name)
297 "while pulling SCL high!\n", name); 305 "while pulling SCL high!\n", name);
298 goto bailout; 306 goto bailout;
299 } 307 }
308
309 if (adap->post_xfer)
310 adap->post_xfer(i2c_adap);
311
300 pr_info("%s: Test OK\n", name); 312 pr_info("%s: Test OK\n", name);
301 return 0; 313 return 0;
302bailout: 314bailout:
303 sdahi(adap); 315 sdahi(adap);
304 sclhi(adap); 316 sclhi(adap);
317
318 if (adap->post_xfer)
319 adap->post_xfer(i2c_adap);
320
305 return -ENODEV; 321 return -ENODEV;
306} 322}
307 323
@@ -607,7 +623,7 @@ static int __i2c_bit_add_bus(struct i2c_adapter *adap,
607 int ret; 623 int ret;
608 624
609 if (bit_test) { 625 if (bit_test) {
610 ret = test_bus(bit_adap, adap->name); 626 ret = test_bus(adap);
611 if (ret < 0) 627 if (ret < 0)
612 return -ENODEV; 628 return -ENODEV;
613 } 629 }
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 70c30e6bce0b..9a58994ff7ea 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -797,7 +797,8 @@ static int i2c_do_add_adapter(struct i2c_driver *driver,
797 797
798 /* Let legacy drivers scan this bus for matching devices */ 798 /* Let legacy drivers scan this bus for matching devices */
799 if (driver->attach_adapter) { 799 if (driver->attach_adapter) {
800 dev_warn(&adap->dev, "attach_adapter method is deprecated\n"); 800 dev_warn(&adap->dev, "%s: attach_adapter method is deprecated\n",
801 driver->driver.name);
801 dev_warn(&adap->dev, "Please use another way to instantiate " 802 dev_warn(&adap->dev, "Please use another way to instantiate "
802 "your i2c_client\n"); 803 "your i2c_client\n");
803 /* We ignore the return code; if it fails, too bad */ 804 /* We ignore the return code; if it fails, too bad */
@@ -984,7 +985,8 @@ static int i2c_do_del_adapter(struct i2c_driver *driver,
984 985
985 if (!driver->detach_adapter) 986 if (!driver->detach_adapter)
986 return 0; 987 return 0;
987 dev_warn(&adapter->dev, "detach_adapter method is deprecated\n"); 988 dev_warn(&adapter->dev, "%s: detach_adapter method is deprecated\n",
989 driver->driver.name);
988 res = driver->detach_adapter(adapter); 990 res = driver->detach_adapter(adapter);
989 if (res) 991 if (res)
990 dev_err(&adapter->dev, "detach_adapter failed (%d) " 992 dev_err(&adapter->dev, "detach_adapter failed (%d) "
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index fd1e11799137..a5ec5a7cb381 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1782,7 +1782,6 @@ static int ide_cd_probe(ide_drive_t *drive)
1782 ide_cd_read_toc(drive, &sense); 1782 ide_cd_read_toc(drive, &sense);
1783 g->fops = &idecd_ops; 1783 g->fops = &idecd_ops;
1784 g->flags |= GENHD_FL_REMOVABLE; 1784 g->flags |= GENHD_FL_REMOVABLE;
1785 g->events = DISK_EVENT_MEDIA_CHANGE;
1786 add_disk(g); 1785 add_disk(g);
1787 return 0; 1786 return 0;
1788 1787
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 2a6bc50e8a41..02caa7dd51c8 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -79,6 +79,12 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
79 return CDS_DRIVE_NOT_READY; 79 return CDS_DRIVE_NOT_READY;
80} 80}
81 81
82/*
83 * ide-cd always generates media changed event if media is missing, which
84 * makes it impossible to use for proper event reporting, so disk->events
85 * is cleared to 0 and the following function is used only to trigger
86 * revalidation and never propagated to userland.
87 */
82unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi, 88unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi,
83 unsigned int clearing, int slot_nr) 89 unsigned int clearing, int slot_nr)
84{ 90{
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index c4ffd4888939..70ea8763567d 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -298,6 +298,12 @@ static unsigned int ide_gd_check_events(struct gendisk *disk,
298 return 0; 298 return 0;
299 } 299 }
300 300
301 /*
302 * The following is used to force revalidation on the first open on
303 * removeable devices, and never gets reported to userland as
304 * genhd->events is 0. This is intended as removeable ide disk
305 * can't really detect MEDIA_CHANGE events.
306 */
301 ret = drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED; 307 ret = drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED;
302 drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED; 308 drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
303 309
@@ -413,7 +419,6 @@ static int ide_gd_probe(ide_drive_t *drive)
413 if (drive->dev_flags & IDE_DFLAG_REMOVABLE) 419 if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
414 g->flags = GENHD_FL_REMOVABLE; 420 g->flags = GENHD_FL_REMOVABLE;
415 g->fops = &ide_gd_ops; 421 g->fops = &ide_gd_ops;
416 g->events = DISK_EVENT_MEDIA_CHANGE;
417 add_disk(g); 422 add_disk(g);
418 return 0; 423 return 0;
419 424
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 7de4b7ebffc5..d8ca0a0b970d 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -1799,7 +1799,7 @@ static int qib_6120_setup_reset(struct qib_devdata *dd)
1799 /* 1799 /*
1800 * Keep chip from being accessed until we are ready. Use 1800 * Keep chip from being accessed until we are ready. Use
1801 * writeq() directly, to allow the write even though QIB_PRESENT 1801 * writeq() directly, to allow the write even though QIB_PRESENT
1802 * isn't' set. 1802 * isn't set.
1803 */ 1803 */
1804 dd->flags &= ~(QIB_INITTED | QIB_PRESENT); 1804 dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
1805 dd->int_counter = 0; /* so we check interrupts work again */ 1805 dd->int_counter = 0; /* so we check interrupts work again */
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 74fe0360bec7..c765a2eb04cf 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -2111,7 +2111,7 @@ static int qib_setup_7220_reset(struct qib_devdata *dd)
2111 /* 2111 /*
2112 * Keep chip from being accessed until we are ready. Use 2112 * Keep chip from being accessed until we are ready. Use
2113 * writeq() directly, to allow the write even though QIB_PRESENT 2113 * writeq() directly, to allow the write even though QIB_PRESENT
2114 * isn't' set. 2114 * isn't set.
2115 */ 2115 */
2116 dd->flags &= ~(QIB_INITTED | QIB_PRESENT); 2116 dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
2117 dd->int_counter = 0; /* so we check interrupts work again */ 2117 dd->int_counter = 0; /* so we check interrupts work again */
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 55de3cf3441c..6bab3eaea70f 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -3299,7 +3299,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
3299 /* 3299 /*
3300 * Keep chip from being accessed until we are ready. Use 3300 * Keep chip from being accessed until we are ready. Use
3301 * writeq() directly, to allow the write even though QIB_PRESENT 3301 * writeq() directly, to allow the write even though QIB_PRESENT
3302 * isn't' set. 3302 * isn't set.
3303 */ 3303 */
3304 dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR); 3304 dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3305 dd->flags |= QIB_DOING_RESET; 3305 dd->flags |= QIB_DOING_RESET;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 7f42d3a454d2..88d8e4cb419a 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -39,13 +39,13 @@ struct evdev {
39}; 39};
40 40
41struct evdev_client { 41struct evdev_client {
42 int head; 42 unsigned int head;
43 int tail; 43 unsigned int tail;
44 spinlock_t buffer_lock; /* protects access to buffer, head and tail */ 44 spinlock_t buffer_lock; /* protects access to buffer, head and tail */
45 struct fasync_struct *fasync; 45 struct fasync_struct *fasync;
46 struct evdev *evdev; 46 struct evdev *evdev;
47 struct list_head node; 47 struct list_head node;
48 int bufsize; 48 unsigned int bufsize;
49 struct input_event buffer[]; 49 struct input_event buffer[];
50}; 50};
51 51
@@ -55,16 +55,25 @@ static DEFINE_MUTEX(evdev_table_mutex);
55static void evdev_pass_event(struct evdev_client *client, 55static void evdev_pass_event(struct evdev_client *client,
56 struct input_event *event) 56 struct input_event *event)
57{ 57{
58 /* 58 /* Interrupts are disabled, just acquire the lock. */
59 * Interrupts are disabled, just acquire the lock.
60 * Make sure we don't leave with the client buffer
61 * "empty" by having client->head == client->tail.
62 */
63 spin_lock(&client->buffer_lock); 59 spin_lock(&client->buffer_lock);
64 do { 60
65 client->buffer[client->head++] = *event; 61 client->buffer[client->head++] = *event;
66 client->head &= client->bufsize - 1; 62 client->head &= client->bufsize - 1;
67 } while (client->head == client->tail); 63
64 if (unlikely(client->head == client->tail)) {
65 /*
66 * This effectively "drops" all unconsumed events, leaving
67 * EV_SYN/SYN_DROPPED plus the newest event in the queue.
68 */
69 client->tail = (client->head - 2) & (client->bufsize - 1);
70
71 client->buffer[client->tail].time = event->time;
72 client->buffer[client->tail].type = EV_SYN;
73 client->buffer[client->tail].code = SYN_DROPPED;
74 client->buffer[client->tail].value = 0;
75 }
76
68 spin_unlock(&client->buffer_lock); 77 spin_unlock(&client->buffer_lock);
69 78
70 if (event->type == EV_SYN) 79 if (event->type == EV_SYN)
diff --git a/drivers/input/input.c b/drivers/input/input.c
index d6e8bd8a851c..ebbceedc92f4 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1746,6 +1746,42 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
1746} 1746}
1747EXPORT_SYMBOL(input_set_capability); 1747EXPORT_SYMBOL(input_set_capability);
1748 1748
1749static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
1750{
1751 int mt_slots;
1752 int i;
1753 unsigned int events;
1754
1755 if (dev->mtsize) {
1756 mt_slots = dev->mtsize;
1757 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
1758 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
1759 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
1760 clamp(mt_slots, 2, 32);
1761 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
1762 mt_slots = 2;
1763 } else {
1764 mt_slots = 0;
1765 }
1766
1767 events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
1768
1769 for (i = 0; i < ABS_CNT; i++) {
1770 if (test_bit(i, dev->absbit)) {
1771 if (input_is_mt_axis(i))
1772 events += mt_slots;
1773 else
1774 events++;
1775 }
1776 }
1777
1778 for (i = 0; i < REL_CNT; i++)
1779 if (test_bit(i, dev->relbit))
1780 events++;
1781
1782 return events;
1783}
1784
1749#define INPUT_CLEANSE_BITMASK(dev, type, bits) \ 1785#define INPUT_CLEANSE_BITMASK(dev, type, bits) \
1750 do { \ 1786 do { \
1751 if (!test_bit(EV_##type, dev->evbit)) \ 1787 if (!test_bit(EV_##type, dev->evbit)) \
@@ -1793,6 +1829,10 @@ int input_register_device(struct input_dev *dev)
1793 /* Make sure that bitmasks not mentioned in dev->evbit are clean. */ 1829 /* Make sure that bitmasks not mentioned in dev->evbit are clean. */
1794 input_cleanse_bitmasks(dev); 1830 input_cleanse_bitmasks(dev);
1795 1831
1832 if (!dev->hint_events_per_packet)
1833 dev->hint_events_per_packet =
1834 input_estimate_events_per_packet(dev);
1835
1796 /* 1836 /*
1797 * If delay and period are pre-set by the driver, then autorepeating 1837 * If delay and period are pre-set by the driver, then autorepeating
1798 * is handled by the driver itself and we don't do it in input.c. 1838 * is handled by the driver itself and we don't do it in input.c.
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index 09bef79d9da1..a26922cf0e84 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -332,18 +332,20 @@ static int __devinit twl4030_kp_program(struct twl4030_keypad *kp)
332static int __devinit twl4030_kp_probe(struct platform_device *pdev) 332static int __devinit twl4030_kp_probe(struct platform_device *pdev)
333{ 333{
334 struct twl4030_keypad_data *pdata = pdev->dev.platform_data; 334 struct twl4030_keypad_data *pdata = pdev->dev.platform_data;
335 const struct matrix_keymap_data *keymap_data = pdata->keymap_data; 335 const struct matrix_keymap_data *keymap_data;
336 struct twl4030_keypad *kp; 336 struct twl4030_keypad *kp;
337 struct input_dev *input; 337 struct input_dev *input;
338 u8 reg; 338 u8 reg;
339 int error; 339 int error;
340 340
341 if (!pdata || !pdata->rows || !pdata->cols || 341 if (!pdata || !pdata->rows || !pdata->cols || !pdata->keymap_data ||
342 pdata->rows > TWL4030_MAX_ROWS || pdata->cols > TWL4030_MAX_COLS) { 342 pdata->rows > TWL4030_MAX_ROWS || pdata->cols > TWL4030_MAX_COLS) {
343 dev_err(&pdev->dev, "Invalid platform_data\n"); 343 dev_err(&pdev->dev, "Invalid platform_data\n");
344 return -EINVAL; 344 return -EINVAL;
345 } 345 }
346 346
347 keymap_data = pdata->keymap_data;
348
347 kp = kzalloc(sizeof(*kp), GFP_KERNEL); 349 kp = kzalloc(sizeof(*kp), GFP_KERNEL);
348 input = input_allocate_device(); 350 input = input_allocate_device();
349 if (!kp || !input) { 351 if (!kp || !input) {
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 7077f9bf5ead..62bae99424e6 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -303,7 +303,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
303 enum xenbus_state backend_state) 303 enum xenbus_state backend_state)
304{ 304{
305 struct xenkbd_info *info = dev_get_drvdata(&dev->dev); 305 struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
306 int val; 306 int ret, val;
307 307
308 switch (backend_state) { 308 switch (backend_state) {
309 case XenbusStateInitialising: 309 case XenbusStateInitialising:
@@ -316,6 +316,17 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
316 316
317 case XenbusStateInitWait: 317 case XenbusStateInitWait:
318InitWait: 318InitWait:
319 ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
320 "feature-abs-pointer", "%d", &val);
321 if (ret < 0)
322 val = 0;
323 if (val) {
324 ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
325 "request-abs-pointer", "1");
326 if (ret)
327 pr_warning("xenkbd: can't request abs-pointer");
328 }
329
319 xenbus_switch_state(dev, XenbusStateConnected); 330 xenbus_switch_state(dev, XenbusStateConnected);
320 break; 331 break;
321 332
diff --git a/drivers/input/touchscreen/h3600_ts_input.c b/drivers/input/touchscreen/h3600_ts_input.c
index efa06882de00..45f93d0f5592 100644
--- a/drivers/input/touchscreen/h3600_ts_input.c
+++ b/drivers/input/touchscreen/h3600_ts_input.c
@@ -399,31 +399,34 @@ static int h3600ts_connect(struct serio *serio, struct serio_driver *drv)
399 IRQF_SHARED | IRQF_DISABLED, "h3600_action", &ts->dev)) { 399 IRQF_SHARED | IRQF_DISABLED, "h3600_action", &ts->dev)) {
400 printk(KERN_ERR "h3600ts.c: Could not allocate Action Button IRQ!\n"); 400 printk(KERN_ERR "h3600ts.c: Could not allocate Action Button IRQ!\n");
401 err = -EBUSY; 401 err = -EBUSY;
402 goto fail2; 402 goto fail1;
403 } 403 }
404 404
405 if (request_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, npower_button_handler, 405 if (request_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, npower_button_handler,
406 IRQF_SHARED | IRQF_DISABLED, "h3600_suspend", &ts->dev)) { 406 IRQF_SHARED | IRQF_DISABLED, "h3600_suspend", &ts->dev)) {
407 printk(KERN_ERR "h3600ts.c: Could not allocate Power Button IRQ!\n"); 407 printk(KERN_ERR "h3600ts.c: Could not allocate Power Button IRQ!\n");
408 err = -EBUSY; 408 err = -EBUSY;
409 goto fail3; 409 goto fail2;
410 } 410 }
411 411
412 serio_set_drvdata(serio, ts); 412 serio_set_drvdata(serio, ts);
413 413
414 err = serio_open(serio, drv); 414 err = serio_open(serio, drv);
415 if (err) 415 if (err)
416 return err; 416 goto fail3;
417 417
418 //h3600_flite_control(1, 25); /* default brightness */ 418 //h3600_flite_control(1, 25); /* default brightness */
419 input_register_device(ts->dev); 419 err = input_register_device(ts->dev);
420 if (err)
421 goto fail4;
420 422
421 return 0; 423 return 0;
422 424
423fail3: free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev); 425fail4: serio_close(serio);
426fail3: serio_set_drvdata(serio, NULL);
427 free_irq(IRQ_GPIO_BITSY_NPOWER_BUTTON, ts->dev);
424fail2: free_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, ts->dev); 428fail2: free_irq(IRQ_GPIO_BITSY_ACTION_BUTTON, ts->dev);
425fail1: serio_set_drvdata(serio, NULL); 429fail1: input_free_device(input_dev);
426 input_free_device(input_dev);
427 kfree(ts); 430 kfree(ts);
428 return err; 431 return err;
429} 432}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 5ef136cdba91..e5d8904fc8f6 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -390,13 +390,6 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
390 return md_raid5_congested(&rs->md, bits); 390 return md_raid5_congested(&rs->md, bits);
391} 391}
392 392
393static void raid_unplug(struct dm_target_callbacks *cb)
394{
395 struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
396
397 md_raid5_kick_device(rs->md.private);
398}
399
400/* 393/*
401 * Construct a RAID4/5/6 mapping: 394 * Construct a RAID4/5/6 mapping:
402 * Args: 395 * Args:
@@ -487,7 +480,6 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
487 } 480 }
488 481
489 rs->callbacks.congested_fn = raid_is_congested; 482 rs->callbacks.congested_fn = raid_is_congested;
490 rs->callbacks.unplug_fn = raid_unplug;
491 dm_table_add_target_callbacks(ti->table, &rs->callbacks); 483 dm_table_add_target_callbacks(ti->table, &rs->callbacks);
492 484
493 return 0; 485 return 0;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b12b3776c0c0..7d6f7f18a920 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -447,48 +447,59 @@ EXPORT_SYMBOL(md_flush_request);
447 447
448/* Support for plugging. 448/* Support for plugging.
449 * This mirrors the plugging support in request_queue, but does not 449 * This mirrors the plugging support in request_queue, but does not
450 * require having a whole queue 450 * require having a whole queue or request structures.
451 * We allocate an md_plug_cb for each md device and each thread it gets
452 * plugged on. This links tot the private plug_handle structure in the
453 * personality data where we keep a count of the number of outstanding
454 * plugs so other code can see if a plug is active.
451 */ 455 */
452static void plugger_work(struct work_struct *work) 456struct md_plug_cb {
453{ 457 struct blk_plug_cb cb;
454 struct plug_handle *plug = 458 mddev_t *mddev;
455 container_of(work, struct plug_handle, unplug_work); 459};
456 plug->unplug_fn(plug);
457}
458static void plugger_timeout(unsigned long data)
459{
460 struct plug_handle *plug = (void *)data;
461 kblockd_schedule_work(NULL, &plug->unplug_work);
462}
463void plugger_init(struct plug_handle *plug,
464 void (*unplug_fn)(struct plug_handle *))
465{
466 plug->unplug_flag = 0;
467 plug->unplug_fn = unplug_fn;
468 init_timer(&plug->unplug_timer);
469 plug->unplug_timer.function = plugger_timeout;
470 plug->unplug_timer.data = (unsigned long)plug;
471 INIT_WORK(&plug->unplug_work, plugger_work);
472}
473EXPORT_SYMBOL_GPL(plugger_init);
474 460
475void plugger_set_plug(struct plug_handle *plug) 461static void plugger_unplug(struct blk_plug_cb *cb)
476{ 462{
477 if (!test_and_set_bit(PLUGGED_FLAG, &plug->unplug_flag)) 463 struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb);
478 mod_timer(&plug->unplug_timer, jiffies + msecs_to_jiffies(3)+1); 464 if (atomic_dec_and_test(&mdcb->mddev->plug_cnt))
465 md_wakeup_thread(mdcb->mddev->thread);
466 kfree(mdcb);
479} 467}
480EXPORT_SYMBOL_GPL(plugger_set_plug);
481 468
482int plugger_remove_plug(struct plug_handle *plug) 469/* Check that an unplug wakeup will come shortly.
470 * If not, wakeup the md thread immediately
471 */
472int mddev_check_plugged(mddev_t *mddev)
483{ 473{
484 if (test_and_clear_bit(PLUGGED_FLAG, &plug->unplug_flag)) { 474 struct blk_plug *plug = current->plug;
485 del_timer(&plug->unplug_timer); 475 struct md_plug_cb *mdcb;
486 return 1; 476
487 } else 477 if (!plug)
488 return 0; 478 return 0;
489}
490EXPORT_SYMBOL_GPL(plugger_remove_plug);
491 479
480 list_for_each_entry(mdcb, &plug->cb_list, cb.list) {
481 if (mdcb->cb.callback == plugger_unplug &&
482 mdcb->mddev == mddev) {
483 /* Already on the list, move to top */
484 if (mdcb != list_first_entry(&plug->cb_list,
485 struct md_plug_cb,
486 cb.list))
487 list_move(&mdcb->cb.list, &plug->cb_list);
488 return 1;
489 }
490 }
491 /* Not currently on the callback list */
492 mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC);
493 if (!mdcb)
494 return 0;
495
496 mdcb->mddev = mddev;
497 mdcb->cb.callback = plugger_unplug;
498 atomic_inc(&mddev->plug_cnt);
499 list_add(&mdcb->cb.list, &plug->cb_list);
500 return 1;
501}
502EXPORT_SYMBOL_GPL(mddev_check_plugged);
492 503
493static inline mddev_t *mddev_get(mddev_t *mddev) 504static inline mddev_t *mddev_get(mddev_t *mddev)
494{ 505{
@@ -538,6 +549,7 @@ void mddev_init(mddev_t *mddev)
538 atomic_set(&mddev->active, 1); 549 atomic_set(&mddev->active, 1);
539 atomic_set(&mddev->openers, 0); 550 atomic_set(&mddev->openers, 0);
540 atomic_set(&mddev->active_io, 0); 551 atomic_set(&mddev->active_io, 0);
552 atomic_set(&mddev->plug_cnt, 0);
541 spin_lock_init(&mddev->write_lock); 553 spin_lock_init(&mddev->write_lock);
542 atomic_set(&mddev->flush_pending, 0); 554 atomic_set(&mddev->flush_pending, 0);
543 init_waitqueue_head(&mddev->sb_wait); 555 init_waitqueue_head(&mddev->sb_wait);
@@ -3158,6 +3170,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
3158 mddev->layout = mddev->new_layout; 3170 mddev->layout = mddev->new_layout;
3159 mddev->chunk_sectors = mddev->new_chunk_sectors; 3171 mddev->chunk_sectors = mddev->new_chunk_sectors;
3160 mddev->delta_disks = 0; 3172 mddev->delta_disks = 0;
3173 mddev->degraded = 0;
3161 if (mddev->pers->sync_request == NULL) { 3174 if (mddev->pers->sync_request == NULL) {
3162 /* this is now an array without redundancy, so 3175 /* this is now an array without redundancy, so
3163 * it must always be in_sync 3176 * it must always be in_sync
@@ -4723,7 +4736,6 @@ static void md_clean(mddev_t *mddev)
4723 mddev->bitmap_info.chunksize = 0; 4736 mddev->bitmap_info.chunksize = 0;
4724 mddev->bitmap_info.daemon_sleep = 0; 4737 mddev->bitmap_info.daemon_sleep = 0;
4725 mddev->bitmap_info.max_write_behind = 0; 4738 mddev->bitmap_info.max_write_behind = 0;
4726 mddev->plug = NULL;
4727} 4739}
4728 4740
4729static void __md_stop_writes(mddev_t *mddev) 4741static void __md_stop_writes(mddev_t *mddev)
@@ -6688,12 +6700,6 @@ int md_allow_write(mddev_t *mddev)
6688} 6700}
6689EXPORT_SYMBOL_GPL(md_allow_write); 6701EXPORT_SYMBOL_GPL(md_allow_write);
6690 6702
6691void md_unplug(mddev_t *mddev)
6692{
6693 if (mddev->plug)
6694 mddev->plug->unplug_fn(mddev->plug);
6695}
6696
6697#define SYNC_MARKS 10 6703#define SYNC_MARKS 10
6698#define SYNC_MARK_STEP (3*HZ) 6704#define SYNC_MARK_STEP (3*HZ)
6699void md_do_sync(mddev_t *mddev) 6705void md_do_sync(mddev_t *mddev)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 52b407369e13..0b1fd3f1d85b 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -29,26 +29,6 @@
29typedef struct mddev_s mddev_t; 29typedef struct mddev_s mddev_t;
30typedef struct mdk_rdev_s mdk_rdev_t; 30typedef struct mdk_rdev_s mdk_rdev_t;
31 31
32/* generic plugging support - like that provided with request_queue,
33 * but does not require a request_queue
34 */
35struct plug_handle {
36 void (*unplug_fn)(struct plug_handle *);
37 struct timer_list unplug_timer;
38 struct work_struct unplug_work;
39 unsigned long unplug_flag;
40};
41#define PLUGGED_FLAG 1
42void plugger_init(struct plug_handle *plug,
43 void (*unplug_fn)(struct plug_handle *));
44void plugger_set_plug(struct plug_handle *plug);
45int plugger_remove_plug(struct plug_handle *plug);
46static inline void plugger_flush(struct plug_handle *plug)
47{
48 del_timer_sync(&plug->unplug_timer);
49 cancel_work_sync(&plug->unplug_work);
50}
51
52/* 32/*
53 * MD's 'extended' device 33 * MD's 'extended' device
54 */ 34 */
@@ -199,6 +179,9 @@ struct mddev_s
199 int delta_disks, new_level, new_layout; 179 int delta_disks, new_level, new_layout;
200 int new_chunk_sectors; 180 int new_chunk_sectors;
201 181
182 atomic_t plug_cnt; /* If device is expecting
183 * more bios soon.
184 */
202 struct mdk_thread_s *thread; /* management thread */ 185 struct mdk_thread_s *thread; /* management thread */
203 struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ 186 struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
204 sector_t curr_resync; /* last block scheduled */ 187 sector_t curr_resync; /* last block scheduled */
@@ -336,7 +319,6 @@ struct mddev_s
336 struct list_head all_mddevs; 319 struct list_head all_mddevs;
337 320
338 struct attribute_group *to_remove; 321 struct attribute_group *to_remove;
339 struct plug_handle *plug; /* if used by personality */
340 322
341 struct bio_set *bio_set; 323 struct bio_set *bio_set;
342 324
@@ -516,7 +498,6 @@ extern int md_integrity_register(mddev_t *mddev);
516extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev); 498extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
517extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); 499extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
518extern void restore_bitmap_write_access(struct file *file); 500extern void restore_bitmap_write_access(struct file *file);
519extern void md_unplug(mddev_t *mddev);
520 501
521extern void mddev_init(mddev_t *mddev); 502extern void mddev_init(mddev_t *mddev);
522extern int md_run(mddev_t *mddev); 503extern int md_run(mddev_t *mddev);
@@ -530,4 +511,5 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
530 mddev_t *mddev); 511 mddev_t *mddev);
531extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 512extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
532 mddev_t *mddev); 513 mddev_t *mddev);
514extern int mddev_check_plugged(mddev_t *mddev);
533#endif /* _MD_MD_H */ 515#endif /* _MD_MD_H */
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index c2a21ae56d97..2b7a7ff401dc 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -565,12 +565,6 @@ static void flush_pending_writes(conf_t *conf)
565 spin_unlock_irq(&conf->device_lock); 565 spin_unlock_irq(&conf->device_lock);
566} 566}
567 567
568static void md_kick_device(mddev_t *mddev)
569{
570 blk_flush_plug(current);
571 md_wakeup_thread(mddev->thread);
572}
573
574/* Barriers.... 568/* Barriers....
575 * Sometimes we need to suspend IO while we do something else, 569 * Sometimes we need to suspend IO while we do something else,
576 * either some resync/recovery, or reconfigure the array. 570 * either some resync/recovery, or reconfigure the array.
@@ -600,7 +594,7 @@ static void raise_barrier(conf_t *conf)
600 594
601 /* Wait until no block IO is waiting */ 595 /* Wait until no block IO is waiting */
602 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, 596 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
603 conf->resync_lock, md_kick_device(conf->mddev)); 597 conf->resync_lock, );
604 598
605 /* block any new IO from starting */ 599 /* block any new IO from starting */
606 conf->barrier++; 600 conf->barrier++;
@@ -608,7 +602,7 @@ static void raise_barrier(conf_t *conf)
608 /* Now wait for all pending IO to complete */ 602 /* Now wait for all pending IO to complete */
609 wait_event_lock_irq(conf->wait_barrier, 603 wait_event_lock_irq(conf->wait_barrier,
610 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 604 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
611 conf->resync_lock, md_kick_device(conf->mddev)); 605 conf->resync_lock, );
612 606
613 spin_unlock_irq(&conf->resync_lock); 607 spin_unlock_irq(&conf->resync_lock);
614} 608}
@@ -630,7 +624,7 @@ static void wait_barrier(conf_t *conf)
630 conf->nr_waiting++; 624 conf->nr_waiting++;
631 wait_event_lock_irq(conf->wait_barrier, !conf->barrier, 625 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
632 conf->resync_lock, 626 conf->resync_lock,
633 md_kick_device(conf->mddev)); 627 );
634 conf->nr_waiting--; 628 conf->nr_waiting--;
635 } 629 }
636 conf->nr_pending++; 630 conf->nr_pending++;
@@ -666,8 +660,7 @@ static void freeze_array(conf_t *conf)
666 wait_event_lock_irq(conf->wait_barrier, 660 wait_event_lock_irq(conf->wait_barrier,
667 conf->nr_pending == conf->nr_queued+1, 661 conf->nr_pending == conf->nr_queued+1,
668 conf->resync_lock, 662 conf->resync_lock,
669 ({ flush_pending_writes(conf); 663 flush_pending_writes(conf));
670 md_kick_device(conf->mddev); }));
671 spin_unlock_irq(&conf->resync_lock); 664 spin_unlock_irq(&conf->resync_lock);
672} 665}
673static void unfreeze_array(conf_t *conf) 666static void unfreeze_array(conf_t *conf)
@@ -729,6 +722,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
729 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); 722 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
730 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); 723 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
731 mdk_rdev_t *blocked_rdev; 724 mdk_rdev_t *blocked_rdev;
725 int plugged;
732 726
733 /* 727 /*
734 * Register the new request and wait if the reconstruction 728 * Register the new request and wait if the reconstruction
@@ -820,6 +814,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
820 * inc refcount on their rdev. Record them by setting 814 * inc refcount on their rdev. Record them by setting
821 * bios[x] to bio 815 * bios[x] to bio
822 */ 816 */
817 plugged = mddev_check_plugged(mddev);
818
823 disks = conf->raid_disks; 819 disks = conf->raid_disks;
824 retry_write: 820 retry_write:
825 blocked_rdev = NULL; 821 blocked_rdev = NULL;
@@ -925,7 +921,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
925 /* In case raid1d snuck in to freeze_array */ 921 /* In case raid1d snuck in to freeze_array */
926 wake_up(&conf->wait_barrier); 922 wake_up(&conf->wait_barrier);
927 923
928 if (do_sync || !bitmap) 924 if (do_sync || !bitmap || !plugged)
929 md_wakeup_thread(mddev->thread); 925 md_wakeup_thread(mddev->thread);
930 926
931 return 0; 927 return 0;
@@ -1516,13 +1512,16 @@ static void raid1d(mddev_t *mddev)
1516 conf_t *conf = mddev->private; 1512 conf_t *conf = mddev->private;
1517 struct list_head *head = &conf->retry_list; 1513 struct list_head *head = &conf->retry_list;
1518 mdk_rdev_t *rdev; 1514 mdk_rdev_t *rdev;
1515 struct blk_plug plug;
1519 1516
1520 md_check_recovery(mddev); 1517 md_check_recovery(mddev);
1521 1518
1519 blk_start_plug(&plug);
1522 for (;;) { 1520 for (;;) {
1523 char b[BDEVNAME_SIZE]; 1521 char b[BDEVNAME_SIZE];
1524 1522
1525 flush_pending_writes(conf); 1523 if (atomic_read(&mddev->plug_cnt) == 0)
1524 flush_pending_writes(conf);
1526 1525
1527 spin_lock_irqsave(&conf->device_lock, flags); 1526 spin_lock_irqsave(&conf->device_lock, flags);
1528 if (list_empty(head)) { 1527 if (list_empty(head)) {
@@ -1593,6 +1592,7 @@ static void raid1d(mddev_t *mddev)
1593 } 1592 }
1594 cond_resched(); 1593 cond_resched();
1595 } 1594 }
1595 blk_finish_plug(&plug);
1596} 1596}
1597 1597
1598 1598
@@ -2039,7 +2039,6 @@ static int stop(mddev_t *mddev)
2039 2039
2040 md_unregister_thread(mddev->thread); 2040 md_unregister_thread(mddev->thread);
2041 mddev->thread = NULL; 2041 mddev->thread = NULL;
2042 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
2043 if (conf->r1bio_pool) 2042 if (conf->r1bio_pool)
2044 mempool_destroy(conf->r1bio_pool); 2043 mempool_destroy(conf->r1bio_pool);
2045 kfree(conf->mirrors); 2044 kfree(conf->mirrors);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 2da83d566592..8e9462626ec5 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -634,12 +634,6 @@ static void flush_pending_writes(conf_t *conf)
634 spin_unlock_irq(&conf->device_lock); 634 spin_unlock_irq(&conf->device_lock);
635} 635}
636 636
637static void md_kick_device(mddev_t *mddev)
638{
639 blk_flush_plug(current);
640 md_wakeup_thread(mddev->thread);
641}
642
643/* Barriers.... 637/* Barriers....
644 * Sometimes we need to suspend IO while we do something else, 638 * Sometimes we need to suspend IO while we do something else,
645 * either some resync/recovery, or reconfigure the array. 639 * either some resync/recovery, or reconfigure the array.
@@ -669,15 +663,15 @@ static void raise_barrier(conf_t *conf, int force)
669 663
670 /* Wait until no block IO is waiting (unless 'force') */ 664 /* Wait until no block IO is waiting (unless 'force') */
671 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, 665 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
672 conf->resync_lock, md_kick_device(conf->mddev)); 666 conf->resync_lock, );
673 667
674 /* block any new IO from starting */ 668 /* block any new IO from starting */
675 conf->barrier++; 669 conf->barrier++;
676 670
677 /* No wait for all pending IO to complete */ 671 /* Now wait for all pending IO to complete */
678 wait_event_lock_irq(conf->wait_barrier, 672 wait_event_lock_irq(conf->wait_barrier,
679 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 673 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
680 conf->resync_lock, md_kick_device(conf->mddev)); 674 conf->resync_lock, );
681 675
682 spin_unlock_irq(&conf->resync_lock); 676 spin_unlock_irq(&conf->resync_lock);
683} 677}
@@ -698,7 +692,7 @@ static void wait_barrier(conf_t *conf)
698 conf->nr_waiting++; 692 conf->nr_waiting++;
699 wait_event_lock_irq(conf->wait_barrier, !conf->barrier, 693 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
700 conf->resync_lock, 694 conf->resync_lock,
701 md_kick_device(conf->mddev)); 695 );
702 conf->nr_waiting--; 696 conf->nr_waiting--;
703 } 697 }
704 conf->nr_pending++; 698 conf->nr_pending++;
@@ -734,8 +728,8 @@ static void freeze_array(conf_t *conf)
734 wait_event_lock_irq(conf->wait_barrier, 728 wait_event_lock_irq(conf->wait_barrier,
735 conf->nr_pending == conf->nr_queued+1, 729 conf->nr_pending == conf->nr_queued+1,
736 conf->resync_lock, 730 conf->resync_lock,
737 ({ flush_pending_writes(conf); 731 flush_pending_writes(conf));
738 md_kick_device(conf->mddev); })); 732
739 spin_unlock_irq(&conf->resync_lock); 733 spin_unlock_irq(&conf->resync_lock);
740} 734}
741 735
@@ -762,6 +756,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
762 const unsigned long do_fua = (bio->bi_rw & REQ_FUA); 756 const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
763 unsigned long flags; 757 unsigned long flags;
764 mdk_rdev_t *blocked_rdev; 758 mdk_rdev_t *blocked_rdev;
759 int plugged;
765 760
766 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 761 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
767 md_flush_request(mddev, bio); 762 md_flush_request(mddev, bio);
@@ -870,6 +865,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
870 * inc refcount on their rdev. Record them by setting 865 * inc refcount on their rdev. Record them by setting
871 * bios[x] to bio 866 * bios[x] to bio
872 */ 867 */
868 plugged = mddev_check_plugged(mddev);
869
873 raid10_find_phys(conf, r10_bio); 870 raid10_find_phys(conf, r10_bio);
874 retry_write: 871 retry_write:
875 blocked_rdev = NULL; 872 blocked_rdev = NULL;
@@ -946,9 +943,8 @@ static int make_request(mddev_t *mddev, struct bio * bio)
946 /* In case raid10d snuck in to freeze_array */ 943 /* In case raid10d snuck in to freeze_array */
947 wake_up(&conf->wait_barrier); 944 wake_up(&conf->wait_barrier);
948 945
949 if (do_sync || !mddev->bitmap) 946 if (do_sync || !mddev->bitmap || !plugged)
950 md_wakeup_thread(mddev->thread); 947 md_wakeup_thread(mddev->thread);
951
952 return 0; 948 return 0;
953} 949}
954 950
@@ -1640,9 +1636,11 @@ static void raid10d(mddev_t *mddev)
1640 conf_t *conf = mddev->private; 1636 conf_t *conf = mddev->private;
1641 struct list_head *head = &conf->retry_list; 1637 struct list_head *head = &conf->retry_list;
1642 mdk_rdev_t *rdev; 1638 mdk_rdev_t *rdev;
1639 struct blk_plug plug;
1643 1640
1644 md_check_recovery(mddev); 1641 md_check_recovery(mddev);
1645 1642
1643 blk_start_plug(&plug);
1646 for (;;) { 1644 for (;;) {
1647 char b[BDEVNAME_SIZE]; 1645 char b[BDEVNAME_SIZE];
1648 1646
@@ -1716,6 +1714,7 @@ static void raid10d(mddev_t *mddev)
1716 } 1714 }
1717 cond_resched(); 1715 cond_resched();
1718 } 1716 }
1717 blk_finish_plug(&plug);
1719} 1718}
1720 1719
1721 1720
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e867ee42b152..49bf5f891435 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -27,12 +27,12 @@
27 * 27 *
28 * We group bitmap updates into batches. Each batch has a number. 28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important. 29 * We may write out several batches at once, but that isn't very important.
30 * conf->bm_write is the number of the last batch successfully written. 30 * conf->seq_write is the number of the last batch successfully written.
31 * conf->bm_flush is the number of the last batch that was closed to 31 * conf->seq_flush is the number of the last batch that was closed to
32 * new additions. 32 * new additions.
33 * When we discover that we will need to write to any block in a stripe 33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is bm_flush+1. 35 * the number of the batch it will be in. This is seq_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet, 36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later. 37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current 38 * When an unplug happens, we increment bm_flush, thus closing the current
@@ -199,14 +199,12 @@ static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
199 BUG_ON(!list_empty(&sh->lru)); 199 BUG_ON(!list_empty(&sh->lru));
200 BUG_ON(atomic_read(&conf->active_stripes)==0); 200 BUG_ON(atomic_read(&conf->active_stripes)==0);
201 if (test_bit(STRIPE_HANDLE, &sh->state)) { 201 if (test_bit(STRIPE_HANDLE, &sh->state)) {
202 if (test_bit(STRIPE_DELAYED, &sh->state)) { 202 if (test_bit(STRIPE_DELAYED, &sh->state))
203 list_add_tail(&sh->lru, &conf->delayed_list); 203 list_add_tail(&sh->lru, &conf->delayed_list);
204 plugger_set_plug(&conf->plug); 204 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
205 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 205 sh->bm_seq - conf->seq_write > 0)
206 sh->bm_seq - conf->seq_write > 0) {
207 list_add_tail(&sh->lru, &conf->bitmap_list); 206 list_add_tail(&sh->lru, &conf->bitmap_list);
208 plugger_set_plug(&conf->plug); 207 else {
209 } else {
210 clear_bit(STRIPE_BIT_DELAY, &sh->state); 208 clear_bit(STRIPE_BIT_DELAY, &sh->state);
211 list_add_tail(&sh->lru, &conf->handle_list); 209 list_add_tail(&sh->lru, &conf->handle_list);
212 } 210 }
@@ -461,7 +459,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
461 < (conf->max_nr_stripes *3/4) 459 < (conf->max_nr_stripes *3/4)
462 || !conf->inactive_blocked), 460 || !conf->inactive_blocked),
463 conf->device_lock, 461 conf->device_lock,
464 md_raid5_kick_device(conf)); 462 );
465 conf->inactive_blocked = 0; 463 conf->inactive_blocked = 0;
466 } else 464 } else
467 init_stripe(sh, sector, previous); 465 init_stripe(sh, sector, previous);
@@ -1470,7 +1468,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
1470 wait_event_lock_irq(conf->wait_for_stripe, 1468 wait_event_lock_irq(conf->wait_for_stripe,
1471 !list_empty(&conf->inactive_list), 1469 !list_empty(&conf->inactive_list),
1472 conf->device_lock, 1470 conf->device_lock,
1473 blk_flush_plug(current)); 1471 );
1474 osh = get_free_stripe(conf); 1472 osh = get_free_stripe(conf);
1475 spin_unlock_irq(&conf->device_lock); 1473 spin_unlock_irq(&conf->device_lock);
1476 atomic_set(&nsh->count, 1); 1474 atomic_set(&nsh->count, 1);
@@ -3623,8 +3621,7 @@ static void raid5_activate_delayed(raid5_conf_t *conf)
3623 atomic_inc(&conf->preread_active_stripes); 3621 atomic_inc(&conf->preread_active_stripes);
3624 list_add_tail(&sh->lru, &conf->hold_list); 3622 list_add_tail(&sh->lru, &conf->hold_list);
3625 } 3623 }
3626 } else 3624 }
3627 plugger_set_plug(&conf->plug);
3628} 3625}
3629 3626
3630static void activate_bit_delay(raid5_conf_t *conf) 3627static void activate_bit_delay(raid5_conf_t *conf)
@@ -3641,21 +3638,6 @@ static void activate_bit_delay(raid5_conf_t *conf)
3641 } 3638 }
3642} 3639}
3643 3640
3644void md_raid5_kick_device(raid5_conf_t *conf)
3645{
3646 blk_flush_plug(current);
3647 raid5_activate_delayed(conf);
3648 md_wakeup_thread(conf->mddev->thread);
3649}
3650EXPORT_SYMBOL_GPL(md_raid5_kick_device);
3651
3652static void raid5_unplug(struct plug_handle *plug)
3653{
3654 raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
3655
3656 md_raid5_kick_device(conf);
3657}
3658
3659int md_raid5_congested(mddev_t *mddev, int bits) 3641int md_raid5_congested(mddev_t *mddev, int bits)
3660{ 3642{
3661 raid5_conf_t *conf = mddev->private; 3643 raid5_conf_t *conf = mddev->private;
@@ -3945,6 +3927,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
3945 struct stripe_head *sh; 3927 struct stripe_head *sh;
3946 const int rw = bio_data_dir(bi); 3928 const int rw = bio_data_dir(bi);
3947 int remaining; 3929 int remaining;
3930 int plugged;
3948 3931
3949 if (unlikely(bi->bi_rw & REQ_FLUSH)) { 3932 if (unlikely(bi->bi_rw & REQ_FLUSH)) {
3950 md_flush_request(mddev, bi); 3933 md_flush_request(mddev, bi);
@@ -3963,6 +3946,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
3963 bi->bi_next = NULL; 3946 bi->bi_next = NULL;
3964 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 3947 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
3965 3948
3949 plugged = mddev_check_plugged(mddev);
3966 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 3950 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
3967 DEFINE_WAIT(w); 3951 DEFINE_WAIT(w);
3968 int disks, data_disks; 3952 int disks, data_disks;
@@ -4057,7 +4041,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
4057 * add failed due to overlap. Flush everything 4041 * add failed due to overlap. Flush everything
4058 * and wait a while 4042 * and wait a while
4059 */ 4043 */
4060 md_raid5_kick_device(conf); 4044 md_wakeup_thread(mddev->thread);
4061 release_stripe(sh); 4045 release_stripe(sh);
4062 schedule(); 4046 schedule();
4063 goto retry; 4047 goto retry;
@@ -4077,6 +4061,9 @@ static int make_request(mddev_t *mddev, struct bio * bi)
4077 } 4061 }
4078 4062
4079 } 4063 }
4064 if (!plugged)
4065 md_wakeup_thread(mddev->thread);
4066
4080 spin_lock_irq(&conf->device_lock); 4067 spin_lock_irq(&conf->device_lock);
4081 remaining = raid5_dec_bi_phys_segments(bi); 4068 remaining = raid5_dec_bi_phys_segments(bi);
4082 spin_unlock_irq(&conf->device_lock); 4069 spin_unlock_irq(&conf->device_lock);
@@ -4478,24 +4465,30 @@ static void raid5d(mddev_t *mddev)
4478 struct stripe_head *sh; 4465 struct stripe_head *sh;
4479 raid5_conf_t *conf = mddev->private; 4466 raid5_conf_t *conf = mddev->private;
4480 int handled; 4467 int handled;
4468 struct blk_plug plug;
4481 4469
4482 pr_debug("+++ raid5d active\n"); 4470 pr_debug("+++ raid5d active\n");
4483 4471
4484 md_check_recovery(mddev); 4472 md_check_recovery(mddev);
4485 4473
4474 blk_start_plug(&plug);
4486 handled = 0; 4475 handled = 0;
4487 spin_lock_irq(&conf->device_lock); 4476 spin_lock_irq(&conf->device_lock);
4488 while (1) { 4477 while (1) {
4489 struct bio *bio; 4478 struct bio *bio;
4490 4479
4491 if (conf->seq_flush != conf->seq_write) { 4480 if (atomic_read(&mddev->plug_cnt) == 0 &&
4492 int seq = conf->seq_flush; 4481 !list_empty(&conf->bitmap_list)) {
4482 /* Now is a good time to flush some bitmap updates */
4483 conf->seq_flush++;
4493 spin_unlock_irq(&conf->device_lock); 4484 spin_unlock_irq(&conf->device_lock);
4494 bitmap_unplug(mddev->bitmap); 4485 bitmap_unplug(mddev->bitmap);
4495 spin_lock_irq(&conf->device_lock); 4486 spin_lock_irq(&conf->device_lock);
4496 conf->seq_write = seq; 4487 conf->seq_write = conf->seq_flush;
4497 activate_bit_delay(conf); 4488 activate_bit_delay(conf);
4498 } 4489 }
4490 if (atomic_read(&mddev->plug_cnt) == 0)
4491 raid5_activate_delayed(conf);
4499 4492
4500 while ((bio = remove_bio_from_retry(conf))) { 4493 while ((bio = remove_bio_from_retry(conf))) {
4501 int ok; 4494 int ok;
@@ -4525,6 +4518,7 @@ static void raid5d(mddev_t *mddev)
4525 spin_unlock_irq(&conf->device_lock); 4518 spin_unlock_irq(&conf->device_lock);
4526 4519
4527 async_tx_issue_pending_all(); 4520 async_tx_issue_pending_all();
4521 blk_finish_plug(&plug);
4528 4522
4529 pr_debug("--- raid5d inactive\n"); 4523 pr_debug("--- raid5d inactive\n");
4530} 4524}
@@ -5141,8 +5135,6 @@ static int run(mddev_t *mddev)
5141 mdname(mddev)); 5135 mdname(mddev));
5142 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 5136 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
5143 5137
5144 plugger_init(&conf->plug, raid5_unplug);
5145 mddev->plug = &conf->plug;
5146 if (mddev->queue) { 5138 if (mddev->queue) {
5147 int chunk_size; 5139 int chunk_size;
5148 /* read-ahead size must cover two whole stripes, which 5140 /* read-ahead size must cover two whole stripes, which
@@ -5159,7 +5151,6 @@ static int run(mddev_t *mddev)
5159 5151
5160 mddev->queue->backing_dev_info.congested_data = mddev; 5152 mddev->queue->backing_dev_info.congested_data = mddev;
5161 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 5153 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
5162 mddev->queue->queue_lock = &conf->device_lock;
5163 5154
5164 chunk_size = mddev->chunk_sectors << 9; 5155 chunk_size = mddev->chunk_sectors << 9;
5165 blk_queue_io_min(mddev->queue, chunk_size); 5156 blk_queue_io_min(mddev->queue, chunk_size);
@@ -5192,7 +5183,6 @@ static int stop(mddev_t *mddev)
5192 mddev->thread = NULL; 5183 mddev->thread = NULL;
5193 if (mddev->queue) 5184 if (mddev->queue)
5194 mddev->queue->backing_dev_info.congested_fn = NULL; 5185 mddev->queue->backing_dev_info.congested_fn = NULL;
5195 plugger_flush(&conf->plug); /* the unplug fn references 'conf'*/
5196 free_conf(conf); 5186 free_conf(conf);
5197 mddev->private = NULL; 5187 mddev->private = NULL;
5198 mddev->to_remove = &raid5_attrs_group; 5188 mddev->to_remove = &raid5_attrs_group;
@@ -5688,6 +5678,7 @@ static void raid5_quiesce(mddev_t *mddev, int state)
5688static void *raid45_takeover_raid0(mddev_t *mddev, int level) 5678static void *raid45_takeover_raid0(mddev_t *mddev, int level)
5689{ 5679{
5690 struct raid0_private_data *raid0_priv = mddev->private; 5680 struct raid0_private_data *raid0_priv = mddev->private;
5681 sector_t sectors;
5691 5682
5692 /* for raid0 takeover only one zone is supported */ 5683 /* for raid0 takeover only one zone is supported */
5693 if (raid0_priv->nr_strip_zones > 1) { 5684 if (raid0_priv->nr_strip_zones > 1) {
@@ -5696,6 +5687,9 @@ static void *raid45_takeover_raid0(mddev_t *mddev, int level)
5696 return ERR_PTR(-EINVAL); 5687 return ERR_PTR(-EINVAL);
5697 } 5688 }
5698 5689
5690 sectors = raid0_priv->strip_zone[0].zone_end;
5691 sector_div(sectors, raid0_priv->strip_zone[0].nb_dev);
5692 mddev->dev_sectors = sectors;
5699 mddev->new_level = level; 5693 mddev->new_level = level;
5700 mddev->new_layout = ALGORITHM_PARITY_N; 5694 mddev->new_layout = ALGORITHM_PARITY_N;
5701 mddev->new_chunk_sectors = mddev->chunk_sectors; 5695 mddev->new_chunk_sectors = mddev->chunk_sectors;
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 8d563a4f022a..3ca77a2613ba 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -400,8 +400,6 @@ struct raid5_private_data {
400 * Cleared when a sync completes. 400 * Cleared when a sync completes.
401 */ 401 */
402 402
403 struct plug_handle plug;
404
405 /* per cpu variables */ 403 /* per cpu variables */
406 struct raid5_percpu { 404 struct raid5_percpu {
407 struct page *spare_page; /* Used when checking P/Q in raid6 */ 405 struct page *spare_page; /* Used when checking P/Q in raid6 */
diff --git a/drivers/media/common/tuners/tda18271-common.c b/drivers/media/common/tuners/tda18271-common.c
index 5466d47db899..aae40e52af5b 100644
--- a/drivers/media/common/tuners/tda18271-common.c
+++ b/drivers/media/common/tuners/tda18271-common.c
@@ -533,16 +533,7 @@ int tda18271_calc_main_pll(struct dvb_frontend *fe, u32 freq)
533 if (tda_fail(ret)) 533 if (tda_fail(ret))
534 goto fail; 534 goto fail;
535 535
536 regs[R_MPD] = (0x77 & pd); 536 regs[R_MPD] = (0x7f & pd);
537
538 switch (priv->mode) {
539 case TDA18271_ANALOG:
540 regs[R_MPD] &= ~0x08;
541 break;
542 case TDA18271_DIGITAL:
543 regs[R_MPD] |= 0x08;
544 break;
545 }
546 537
547 div = ((d * (freq / 1000)) << 7) / 125; 538 div = ((d * (freq / 1000)) << 7) / 125;
548 539
diff --git a/drivers/media/common/tuners/tda18271-fe.c b/drivers/media/common/tuners/tda18271-fe.c
index 9ad4454a148d..d884f5eee73c 100644
--- a/drivers/media/common/tuners/tda18271-fe.c
+++ b/drivers/media/common/tuners/tda18271-fe.c
@@ -579,8 +579,8 @@ static int tda18271_rf_tracking_filters_init(struct dvb_frontend *fe, u32 freq)
579#define RF3 2 579#define RF3 2
580 u32 rf_default[3]; 580 u32 rf_default[3];
581 u32 rf_freq[3]; 581 u32 rf_freq[3];
582 u8 prog_cal[3]; 582 s32 prog_cal[3];
583 u8 prog_tab[3]; 583 s32 prog_tab[3];
584 584
585 i = tda18271_lookup_rf_band(fe, &freq, NULL); 585 i = tda18271_lookup_rf_band(fe, &freq, NULL);
586 586
@@ -602,32 +602,33 @@ static int tda18271_rf_tracking_filters_init(struct dvb_frontend *fe, u32 freq)
602 return bcal; 602 return bcal;
603 603
604 tda18271_calc_rf_cal(fe, &rf_freq[rf]); 604 tda18271_calc_rf_cal(fe, &rf_freq[rf]);
605 prog_tab[rf] = regs[R_EB14]; 605 prog_tab[rf] = (s32)regs[R_EB14];
606 606
607 if (1 == bcal) 607 if (1 == bcal)
608 prog_cal[rf] = tda18271_calibrate_rf(fe, rf_freq[rf]); 608 prog_cal[rf] =
609 (s32)tda18271_calibrate_rf(fe, rf_freq[rf]);
609 else 610 else
610 prog_cal[rf] = prog_tab[rf]; 611 prog_cal[rf] = prog_tab[rf];
611 612
612 switch (rf) { 613 switch (rf) {
613 case RF1: 614 case RF1:
614 map[i].rf_a1 = 0; 615 map[i].rf_a1 = 0;
615 map[i].rf_b1 = (s32)(prog_cal[RF1] - prog_tab[RF1]); 616 map[i].rf_b1 = (prog_cal[RF1] - prog_tab[RF1]);
616 map[i].rf1 = rf_freq[RF1] / 1000; 617 map[i].rf1 = rf_freq[RF1] / 1000;
617 break; 618 break;
618 case RF2: 619 case RF2:
619 dividend = (s32)(prog_cal[RF2] - prog_tab[RF2]) - 620 dividend = (prog_cal[RF2] - prog_tab[RF2] -
620 (s32)(prog_cal[RF1] + prog_tab[RF1]); 621 prog_cal[RF1] + prog_tab[RF1]);
621 divisor = (s32)(rf_freq[RF2] - rf_freq[RF1]) / 1000; 622 divisor = (s32)(rf_freq[RF2] - rf_freq[RF1]) / 1000;
622 map[i].rf_a1 = (dividend / divisor); 623 map[i].rf_a1 = (dividend / divisor);
623 map[i].rf2 = rf_freq[RF2] / 1000; 624 map[i].rf2 = rf_freq[RF2] / 1000;
624 break; 625 break;
625 case RF3: 626 case RF3:
626 dividend = (s32)(prog_cal[RF3] - prog_tab[RF3]) - 627 dividend = (prog_cal[RF3] - prog_tab[RF3] -
627 (s32)(prog_cal[RF2] + prog_tab[RF2]); 628 prog_cal[RF2] + prog_tab[RF2]);
628 divisor = (s32)(rf_freq[RF3] - rf_freq[RF2]) / 1000; 629 divisor = (s32)(rf_freq[RF3] - rf_freq[RF2]) / 1000;
629 map[i].rf_a2 = (dividend / divisor); 630 map[i].rf_a2 = (dividend / divisor);
630 map[i].rf_b2 = (s32)(prog_cal[RF2] - prog_tab[RF2]); 631 map[i].rf_b2 = (prog_cal[RF2] - prog_tab[RF2]);
631 map[i].rf3 = rf_freq[RF3] / 1000; 632 map[i].rf3 = rf_freq[RF3] / 1000;
632 break; 633 break;
633 default: 634 default:
diff --git a/drivers/media/common/tuners/tda18271-maps.c b/drivers/media/common/tuners/tda18271-maps.c
index e7f84c705da8..3d5b6ab7e332 100644
--- a/drivers/media/common/tuners/tda18271-maps.c
+++ b/drivers/media/common/tuners/tda18271-maps.c
@@ -229,8 +229,7 @@ static struct tda18271_map tda18271c2_km[] = {
229static struct tda18271_map tda18271_rf_band[] = { 229static struct tda18271_map tda18271_rf_band[] = {
230 { .rfmax = 47900, .val = 0x00 }, 230 { .rfmax = 47900, .val = 0x00 },
231 { .rfmax = 61100, .val = 0x01 }, 231 { .rfmax = 61100, .val = 0x01 },
232/* { .rfmax = 152600, .val = 0x02 }, */ 232 { .rfmax = 152600, .val = 0x02 },
233 { .rfmax = 121200, .val = 0x02 },
234 { .rfmax = 164700, .val = 0x03 }, 233 { .rfmax = 164700, .val = 0x03 },
235 { .rfmax = 203500, .val = 0x04 }, 234 { .rfmax = 203500, .val = 0x04 },
236 { .rfmax = 457800, .val = 0x05 }, 235 { .rfmax = 457800, .val = 0x05 },
@@ -448,7 +447,7 @@ static struct tda18271_map tda18271c2_rf_cal[] = {
448 { .rfmax = 150000, .val = 0xb0 }, 447 { .rfmax = 150000, .val = 0xb0 },
449 { .rfmax = 151000, .val = 0xb1 }, 448 { .rfmax = 151000, .val = 0xb1 },
450 { .rfmax = 152000, .val = 0xb7 }, 449 { .rfmax = 152000, .val = 0xb7 },
451 { .rfmax = 153000, .val = 0xbd }, 450 { .rfmax = 152600, .val = 0xbd },
452 { .rfmax = 154000, .val = 0x20 }, 451 { .rfmax = 154000, .val = 0x20 },
453 { .rfmax = 155000, .val = 0x22 }, 452 { .rfmax = 155000, .val = 0x22 },
454 { .rfmax = 156000, .val = 0x24 }, 453 { .rfmax = 156000, .val = 0x24 },
@@ -459,7 +458,7 @@ static struct tda18271_map tda18271c2_rf_cal[] = {
459 { .rfmax = 161000, .val = 0x2d }, 458 { .rfmax = 161000, .val = 0x2d },
460 { .rfmax = 163000, .val = 0x2e }, 459 { .rfmax = 163000, .val = 0x2e },
461 { .rfmax = 164000, .val = 0x2f }, 460 { .rfmax = 164000, .val = 0x2f },
462 { .rfmax = 165000, .val = 0x30 }, 461 { .rfmax = 164700, .val = 0x30 },
463 { .rfmax = 166000, .val = 0x11 }, 462 { .rfmax = 166000, .val = 0x11 },
464 { .rfmax = 167000, .val = 0x12 }, 463 { .rfmax = 167000, .val = 0x12 },
465 { .rfmax = 168000, .val = 0x13 }, 464 { .rfmax = 168000, .val = 0x13 },
@@ -510,7 +509,8 @@ static struct tda18271_map tda18271c2_rf_cal[] = {
510 { .rfmax = 236000, .val = 0x1b }, 509 { .rfmax = 236000, .val = 0x1b },
511 { .rfmax = 237000, .val = 0x1c }, 510 { .rfmax = 237000, .val = 0x1c },
512 { .rfmax = 240000, .val = 0x1d }, 511 { .rfmax = 240000, .val = 0x1d },
513 { .rfmax = 242000, .val = 0x1f }, 512 { .rfmax = 242000, .val = 0x1e },
513 { .rfmax = 244000, .val = 0x1f },
514 { .rfmax = 247000, .val = 0x20 }, 514 { .rfmax = 247000, .val = 0x20 },
515 { .rfmax = 249000, .val = 0x21 }, 515 { .rfmax = 249000, .val = 0x21 },
516 { .rfmax = 252000, .val = 0x22 }, 516 { .rfmax = 252000, .val = 0x22 },
@@ -624,7 +624,7 @@ static struct tda18271_map tda18271c2_rf_cal[] = {
624 { .rfmax = 453000, .val = 0x93 }, 624 { .rfmax = 453000, .val = 0x93 },
625 { .rfmax = 454000, .val = 0x94 }, 625 { .rfmax = 454000, .val = 0x94 },
626 { .rfmax = 456000, .val = 0x96 }, 626 { .rfmax = 456000, .val = 0x96 },
627 { .rfmax = 457000, .val = 0x98 }, 627 { .rfmax = 457800, .val = 0x98 },
628 { .rfmax = 461000, .val = 0x11 }, 628 { .rfmax = 461000, .val = 0x11 },
629 { .rfmax = 468000, .val = 0x12 }, 629 { .rfmax = 468000, .val = 0x12 },
630 { .rfmax = 472000, .val = 0x13 }, 630 { .rfmax = 472000, .val = 0x13 },
diff --git a/drivers/media/dvb/b2c2/flexcop-pci.c b/drivers/media/dvb/b2c2/flexcop-pci.c
index 955254090a0e..03f96d6ca894 100644
--- a/drivers/media/dvb/b2c2/flexcop-pci.c
+++ b/drivers/media/dvb/b2c2/flexcop-pci.c
@@ -38,7 +38,7 @@ MODULE_PARM_DESC(debug,
38 DEBSTATUS); 38 DEBSTATUS);
39 39
40#define DRIVER_VERSION "0.1" 40#define DRIVER_VERSION "0.1"
41#define DRIVER_NAME "Technisat/B2C2 FlexCop II/IIb/III Digital TV PCI Driver" 41#define DRIVER_NAME "flexcop-pci"
42#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@desy.de>" 42#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@desy.de>"
43 43
44struct flexcop_pci { 44struct flexcop_pci {
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index fe4f894183ff..ccbd39a38c46 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -362,7 +362,7 @@ config DVB_USB_LME2510
362config DVB_USB_TECHNISAT_USB2 362config DVB_USB_TECHNISAT_USB2
363 tristate "Technisat DVB-S/S2 USB2.0 support" 363 tristate "Technisat DVB-S/S2 USB2.0 support"
364 depends on DVB_USB 364 depends on DVB_USB
365 select DVB_STB0899 if !DVB_FE_CUSTOMISE 365 select DVB_STV090x if !DVB_FE_CUSTOMISE
366 select DVB_STB6100 if !DVB_FE_CUSTOMISE 366 select DVB_STV6110x if !DVB_FE_CUSTOMISE
367 help 367 help
368 Say Y here to support the Technisat USB2 DVB-S/S2 device 368 Say Y here to support the Technisat USB2 DVB-S/S2 device
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index 97af266d7f1d..65214af5cd74 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -2162,7 +2162,7 @@ struct dibx000_agc_config dib7090_agc_config[2] = {
2162 .agc1_pt3 = 98, 2162 .agc1_pt3 = 98,
2163 .agc1_slope1 = 0, 2163 .agc1_slope1 = 0,
2164 .agc1_slope2 = 167, 2164 .agc1_slope2 = 167,
2165 .agc1_pt1 = 98, 2165 .agc2_pt1 = 98,
2166 .agc2_pt2 = 255, 2166 .agc2_pt2 = 255,
2167 .agc2_slope1 = 104, 2167 .agc2_slope1 = 104,
2168 .agc2_slope2 = 0, 2168 .agc2_slope2 = 0,
@@ -2440,11 +2440,11 @@ static int tfe7090pvr_frontend0_attach(struct dvb_usb_adapter *adap)
2440 dib0700_set_i2c_speed(adap->dev, 340); 2440 dib0700_set_i2c_speed(adap->dev, 340);
2441 adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x90, &tfe7090pvr_dib7000p_config[0]); 2441 adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x90, &tfe7090pvr_dib7000p_config[0]);
2442 2442
2443 dib7090_slave_reset(adap->fe);
2444
2445 if (adap->fe == NULL) 2443 if (adap->fe == NULL)
2446 return -ENODEV; 2444 return -ENODEV;
2447 2445
2446 dib7090_slave_reset(adap->fe);
2447
2448 return 0; 2448 return 0;
2449} 2449}
2450 2450
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index 23640ed44d85..056138f63c7d 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -378,7 +378,6 @@ EXPORT_SYMBOL_GPL(media_entity_create_link);
378 378
379static int __media_entity_setup_link_notify(struct media_link *link, u32 flags) 379static int __media_entity_setup_link_notify(struct media_link *link, u32 flags)
380{ 380{
381 const u32 mask = MEDIA_LNK_FL_ENABLED;
382 int ret; 381 int ret;
383 382
384 /* Notify both entities. */ 383 /* Notify both entities. */
@@ -395,7 +394,7 @@ static int __media_entity_setup_link_notify(struct media_link *link, u32 flags)
395 return ret; 394 return ret;
396 } 395 }
397 396
398 link->flags = (link->flags & ~mask) | (flags & mask); 397 link->flags = flags;
399 link->reverse->flags = link->flags; 398 link->reverse->flags = link->flags;
400 399
401 return 0; 400 return 0;
@@ -417,6 +416,7 @@ static int __media_entity_setup_link_notify(struct media_link *link, u32 flags)
417 */ 416 */
418int __media_entity_setup_link(struct media_link *link, u32 flags) 417int __media_entity_setup_link(struct media_link *link, u32 flags)
419{ 418{
419 const u32 mask = MEDIA_LNK_FL_ENABLED;
420 struct media_device *mdev; 420 struct media_device *mdev;
421 struct media_entity *source, *sink; 421 struct media_entity *source, *sink;
422 int ret = -EBUSY; 422 int ret = -EBUSY;
@@ -424,6 +424,10 @@ int __media_entity_setup_link(struct media_link *link, u32 flags)
424 if (link == NULL) 424 if (link == NULL)
425 return -EINVAL; 425 return -EINVAL;
426 426
427 /* The non-modifiable link flags must not be modified. */
428 if ((link->flags & ~mask) != (flags & ~mask))
429 return -EINVAL;
430
427 if (link->flags & MEDIA_LNK_FL_IMMUTABLE) 431 if (link->flags & MEDIA_LNK_FL_IMMUTABLE)
428 return link->flags == flags ? 0 : -EINVAL; 432 return link->flags == flags ? 0 : -EINVAL;
429 433
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index dc3f04c52d5e..87bad7678d92 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -170,7 +170,7 @@ static int fmr2_setfreq(struct fmr2 *dev)
170 return 0; 170 return 0;
171} 171}
172 172
173/* !!! not tested, in my card this does't work !!! */ 173/* !!! not tested, in my card this doesn't work !!! */
174static int fmr2_setvolume(struct fmr2 *dev) 174static int fmr2_setvolume(struct fmr2 *dev)
175{ 175{
176 int vol[16] = { 0x021, 0x084, 0x090, 0x104, 176 int vol[16] = { 0x021, 0x084, 0x090, 0x104,
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 4498b944dec8..00f51dd121f3 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -875,7 +875,7 @@ config MX3_VIDEO
875config VIDEO_MX3 875config VIDEO_MX3
876 tristate "i.MX3x Camera Sensor Interface driver" 876 tristate "i.MX3x Camera Sensor Interface driver"
877 depends on VIDEO_DEV && MX3_IPU && SOC_CAMERA 877 depends on VIDEO_DEV && MX3_IPU && SOC_CAMERA
878 select VIDEOBUF_DMA_CONTIG 878 select VIDEOBUF2_DMA_CONTIG
879 select MX3_VIDEO 879 select MX3_VIDEO
880 ---help--- 880 ---help---
881 This is a v4l2 driver for the i.MX3x Camera Sensor Interface 881 This is a v4l2 driver for the i.MX3x Camera Sensor Interface
diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c
index c6e2ca3b1149..6fbc356113c1 100644
--- a/drivers/media/video/cx18/cx18-streams.c
+++ b/drivers/media/video/cx18/cx18-streams.c
@@ -350,9 +350,17 @@ void cx18_streams_cleanup(struct cx18 *cx, int unregister)
350 350
351 /* No struct video_device, but can have buffers allocated */ 351 /* No struct video_device, but can have buffers allocated */
352 if (type == CX18_ENC_STREAM_TYPE_IDX) { 352 if (type == CX18_ENC_STREAM_TYPE_IDX) {
353 /* If the module params didn't inhibit IDX ... */
353 if (cx->stream_buffers[type] != 0) { 354 if (cx->stream_buffers[type] != 0) {
354 cx->stream_buffers[type] = 0; 355 cx->stream_buffers[type] = 0;
355 cx18_stream_free(&cx->streams[type]); 356 /*
357 * Before calling cx18_stream_free(),
358 * check if the IDX stream was actually set up.
359 * Needed, since the cx18_probe() error path
360 * exits through here as well as normal clean up
361 */
362 if (cx->streams[type].buffers != 0)
363 cx18_stream_free(&cx->streams[type]);
356 } 364 }
357 continue; 365 continue;
358 } 366 }
diff --git a/drivers/media/video/cx23885/Kconfig b/drivers/media/video/cx23885/Kconfig
index 3b6e7f28568e..caab1bfb79e2 100644
--- a/drivers/media/video/cx23885/Kconfig
+++ b/drivers/media/video/cx23885/Kconfig
@@ -22,6 +22,7 @@ config VIDEO_CX23885
22 select DVB_CX24116 if !DVB_FE_CUSTOMISE 22 select DVB_CX24116 if !DVB_FE_CUSTOMISE
23 select DVB_STV0900 if !DVB_FE_CUSTOMISE 23 select DVB_STV0900 if !DVB_FE_CUSTOMISE
24 select DVB_DS3000 if !DVB_FE_CUSTOMISE 24 select DVB_DS3000 if !DVB_FE_CUSTOMISE
25 select DVB_STV0367 if !DVB_FE_CUSTOMISE
25 select MEDIA_TUNER_MT2131 if !MEDIA_TUNER_CUSTOMISE 26 select MEDIA_TUNER_MT2131 if !MEDIA_TUNER_CUSTOMISE
26 select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE 27 select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE
27 select MEDIA_TUNER_TDA8290 if !MEDIA_TUNER_CUSTOMISE 28 select MEDIA_TUNER_TDA8290 if !MEDIA_TUNER_CUSTOMISE
diff --git a/drivers/media/video/imx074.c b/drivers/media/video/imx074.c
index 1a1169115716..0382ea752e6f 100644
--- a/drivers/media/video/imx074.c
+++ b/drivers/media/video/imx074.c
@@ -298,7 +298,7 @@ static unsigned long imx074_query_bus_param(struct soc_camera_device *icd)
298static int imx074_set_bus_param(struct soc_camera_device *icd, 298static int imx074_set_bus_param(struct soc_camera_device *icd,
299 unsigned long flags) 299 unsigned long flags)
300{ 300{
301 return -1; 301 return -EINVAL;
302} 302}
303 303
304static struct soc_camera_ops imx074_ops = { 304static struct soc_camera_ops imx074_ops = {
diff --git a/drivers/media/video/omap3isp/isp.c b/drivers/media/video/omap3isp/isp.c
index 503bd7922bd6..472a69359e60 100644
--- a/drivers/media/video/omap3isp/isp.c
+++ b/drivers/media/video/omap3isp/isp.c
@@ -215,20 +215,21 @@ static u32 isp_set_xclk(struct isp_device *isp, u32 xclk, u8 xclksel)
215 } 215 }
216 216
217 switch (xclksel) { 217 switch (xclksel) {
218 case 0: 218 case ISP_XCLK_A:
219 isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL, 219 isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL,
220 ISPTCTRL_CTRL_DIVA_MASK, 220 ISPTCTRL_CTRL_DIVA_MASK,
221 divisor << ISPTCTRL_CTRL_DIVA_SHIFT); 221 divisor << ISPTCTRL_CTRL_DIVA_SHIFT);
222 dev_dbg(isp->dev, "isp_set_xclk(): cam_xclka set to %d Hz\n", 222 dev_dbg(isp->dev, "isp_set_xclk(): cam_xclka set to %d Hz\n",
223 currentxclk); 223 currentxclk);
224 break; 224 break;
225 case 1: 225 case ISP_XCLK_B:
226 isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL, 226 isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL,
227 ISPTCTRL_CTRL_DIVB_MASK, 227 ISPTCTRL_CTRL_DIVB_MASK,
228 divisor << ISPTCTRL_CTRL_DIVB_SHIFT); 228 divisor << ISPTCTRL_CTRL_DIVB_SHIFT);
229 dev_dbg(isp->dev, "isp_set_xclk(): cam_xclkb set to %d Hz\n", 229 dev_dbg(isp->dev, "isp_set_xclk(): cam_xclkb set to %d Hz\n",
230 currentxclk); 230 currentxclk);
231 break; 231 break;
232 case ISP_XCLK_NONE:
232 default: 233 default:
233 omap3isp_put(isp); 234 omap3isp_put(isp);
234 dev_dbg(isp->dev, "ISP_ERR: isp_set_xclk(): Invalid requested " 235 dev_dbg(isp->dev, "ISP_ERR: isp_set_xclk(): Invalid requested "
@@ -237,13 +238,13 @@ static u32 isp_set_xclk(struct isp_device *isp, u32 xclk, u8 xclksel)
237 } 238 }
238 239
239 /* Do we go from stable whatever to clock? */ 240 /* Do we go from stable whatever to clock? */
240 if (divisor >= 2 && isp->xclk_divisor[xclksel] < 2) 241 if (divisor >= 2 && isp->xclk_divisor[xclksel - 1] < 2)
241 omap3isp_get(isp); 242 omap3isp_get(isp);
242 /* Stopping the clock. */ 243 /* Stopping the clock. */
243 else if (divisor < 2 && isp->xclk_divisor[xclksel] >= 2) 244 else if (divisor < 2 && isp->xclk_divisor[xclksel - 1] >= 2)
244 omap3isp_put(isp); 245 omap3isp_put(isp);
245 246
246 isp->xclk_divisor[xclksel] = divisor; 247 isp->xclk_divisor[xclksel - 1] = divisor;
247 248
248 omap3isp_put(isp); 249 omap3isp_put(isp);
249 250
@@ -285,7 +286,8 @@ static void isp_power_settings(struct isp_device *isp, int idle)
285 */ 286 */
286void omap3isp_configure_bridge(struct isp_device *isp, 287void omap3isp_configure_bridge(struct isp_device *isp,
287 enum ccdc_input_entity input, 288 enum ccdc_input_entity input,
288 const struct isp_parallel_platform_data *pdata) 289 const struct isp_parallel_platform_data *pdata,
290 unsigned int shift)
289{ 291{
290 u32 ispctrl_val; 292 u32 ispctrl_val;
291 293
@@ -298,9 +300,9 @@ void omap3isp_configure_bridge(struct isp_device *isp,
298 switch (input) { 300 switch (input) {
299 case CCDC_INPUT_PARALLEL: 301 case CCDC_INPUT_PARALLEL:
300 ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_PARALLEL; 302 ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_PARALLEL;
301 ispctrl_val |= pdata->data_lane_shift << ISPCTRL_SHIFT_SHIFT;
302 ispctrl_val |= pdata->clk_pol << ISPCTRL_PAR_CLK_POL_SHIFT; 303 ispctrl_val |= pdata->clk_pol << ISPCTRL_PAR_CLK_POL_SHIFT;
303 ispctrl_val |= pdata->bridge << ISPCTRL_PAR_BRIDGE_SHIFT; 304 ispctrl_val |= pdata->bridge << ISPCTRL_PAR_BRIDGE_SHIFT;
305 shift += pdata->data_lane_shift * 2;
304 break; 306 break;
305 307
306 case CCDC_INPUT_CSI2A: 308 case CCDC_INPUT_CSI2A:
@@ -319,6 +321,8 @@ void omap3isp_configure_bridge(struct isp_device *isp,
319 return; 321 return;
320 } 322 }
321 323
324 ispctrl_val |= ((shift/2) << ISPCTRL_SHIFT_SHIFT) & ISPCTRL_SHIFT_MASK;
325
322 ispctrl_val &= ~ISPCTRL_SYNC_DETECT_MASK; 326 ispctrl_val &= ~ISPCTRL_SYNC_DETECT_MASK;
323 ispctrl_val |= ISPCTRL_SYNC_DETECT_VSRISE; 327 ispctrl_val |= ISPCTRL_SYNC_DETECT_VSRISE;
324 328
@@ -658,6 +662,8 @@ int omap3isp_pipeline_pm_use(struct media_entity *entity, int use)
658 662
659 /* Apply power change to connected non-nodes. */ 663 /* Apply power change to connected non-nodes. */
660 ret = isp_pipeline_pm_power(entity, change); 664 ret = isp_pipeline_pm_power(entity, change);
665 if (ret < 0)
666 entity->use_count -= change;
661 667
662 mutex_unlock(&entity->parent->graph_mutex); 668 mutex_unlock(&entity->parent->graph_mutex);
663 669
@@ -872,6 +878,9 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe)
872 } 878 }
873 } 879 }
874 880
881 if (failure < 0)
882 isp->needs_reset = true;
883
875 return failure; 884 return failure;
876} 885}
877 886
@@ -884,7 +893,8 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe)
884 * single-shot or continuous mode. 893 * single-shot or continuous mode.
885 * 894 *
886 * Return 0 if successful, or the return value of the failed video::s_stream 895 * Return 0 if successful, or the return value of the failed video::s_stream
887 * operation otherwise. 896 * operation otherwise. The pipeline state is not updated when the operation
897 * fails, except when stopping the pipeline.
888 */ 898 */
889int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe, 899int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe,
890 enum isp_pipeline_stream_state state) 900 enum isp_pipeline_stream_state state)
@@ -895,7 +905,9 @@ int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe,
895 ret = isp_pipeline_disable(pipe); 905 ret = isp_pipeline_disable(pipe);
896 else 906 else
897 ret = isp_pipeline_enable(pipe, state); 907 ret = isp_pipeline_enable(pipe, state);
898 pipe->stream_state = state; 908
909 if (ret == 0 || state == ISP_PIPELINE_STREAM_STOPPED)
910 pipe->stream_state = state;
899 911
900 return ret; 912 return ret;
901} 913}
@@ -1481,6 +1493,10 @@ void omap3isp_put(struct isp_device *isp)
1481 if (--isp->ref_count == 0) { 1493 if (--isp->ref_count == 0) {
1482 isp_disable_interrupts(isp); 1494 isp_disable_interrupts(isp);
1483 isp_save_ctx(isp); 1495 isp_save_ctx(isp);
1496 if (isp->needs_reset) {
1497 isp_reset(isp);
1498 isp->needs_reset = false;
1499 }
1484 isp_disable_clocks(isp); 1500 isp_disable_clocks(isp);
1485 } 1501 }
1486 mutex_unlock(&isp->isp_mutex); 1502 mutex_unlock(&isp->isp_mutex);
diff --git a/drivers/media/video/omap3isp/isp.h b/drivers/media/video/omap3isp/isp.h
index cf5214e95a92..2620c405f5e4 100644
--- a/drivers/media/video/omap3isp/isp.h
+++ b/drivers/media/video/omap3isp/isp.h
@@ -132,7 +132,6 @@ struct isp_reg {
132 132
133/** 133/**
134 * struct isp_parallel_platform_data - Parallel interface platform data 134 * struct isp_parallel_platform_data - Parallel interface platform data
135 * @width: Parallel bus width in bits (8, 10, 11 or 12)
136 * @data_lane_shift: Data lane shifter 135 * @data_lane_shift: Data lane shifter
137 * 0 - CAMEXT[13:0] -> CAM[13:0] 136 * 0 - CAMEXT[13:0] -> CAM[13:0]
138 * 1 - CAMEXT[13:2] -> CAM[11:0] 137 * 1 - CAMEXT[13:2] -> CAM[11:0]
@@ -146,7 +145,6 @@ struct isp_reg {
146 * ISPCTRL_PAR_BRIDGE_BENDIAN - Big endian 145 * ISPCTRL_PAR_BRIDGE_BENDIAN - Big endian
147 */ 146 */
148struct isp_parallel_platform_data { 147struct isp_parallel_platform_data {
149 unsigned int width;
150 unsigned int data_lane_shift:2; 148 unsigned int data_lane_shift:2;
151 unsigned int clk_pol:1; 149 unsigned int clk_pol:1;
152 unsigned int bridge:4; 150 unsigned int bridge:4;
@@ -262,6 +260,7 @@ struct isp_device {
262 /* ISP Obj */ 260 /* ISP Obj */
263 spinlock_t stat_lock; /* common lock for statistic drivers */ 261 spinlock_t stat_lock; /* common lock for statistic drivers */
264 struct mutex isp_mutex; /* For handling ref_count field */ 262 struct mutex isp_mutex; /* For handling ref_count field */
263 bool needs_reset;
265 int has_context; 264 int has_context;
266 int ref_count; 265 int ref_count;
267 unsigned int autoidle; 266 unsigned int autoidle;
@@ -311,11 +310,12 @@ int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe,
311 enum isp_pipeline_stream_state state); 310 enum isp_pipeline_stream_state state);
312void omap3isp_configure_bridge(struct isp_device *isp, 311void omap3isp_configure_bridge(struct isp_device *isp,
313 enum ccdc_input_entity input, 312 enum ccdc_input_entity input,
314 const struct isp_parallel_platform_data *pdata); 313 const struct isp_parallel_platform_data *pdata,
314 unsigned int shift);
315 315
316#define ISP_XCLK_NONE -1 316#define ISP_XCLK_NONE 0
317#define ISP_XCLK_A 0 317#define ISP_XCLK_A 1
318#define ISP_XCLK_B 1 318#define ISP_XCLK_B 2
319 319
320struct isp_device *omap3isp_get(struct isp_device *isp); 320struct isp_device *omap3isp_get(struct isp_device *isp);
321void omap3isp_put(struct isp_device *isp); 321void omap3isp_put(struct isp_device *isp);
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
index 5ff9d14ce710..39d501bda636 100644
--- a/drivers/media/video/omap3isp/ispccdc.c
+++ b/drivers/media/video/omap3isp/ispccdc.c
@@ -43,6 +43,12 @@ __ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_fh *fh,
43 43
44static const unsigned int ccdc_fmts[] = { 44static const unsigned int ccdc_fmts[] = {
45 V4L2_MBUS_FMT_Y8_1X8, 45 V4L2_MBUS_FMT_Y8_1X8,
46 V4L2_MBUS_FMT_Y10_1X10,
47 V4L2_MBUS_FMT_Y12_1X12,
48 V4L2_MBUS_FMT_SGRBG8_1X8,
49 V4L2_MBUS_FMT_SRGGB8_1X8,
50 V4L2_MBUS_FMT_SBGGR8_1X8,
51 V4L2_MBUS_FMT_SGBRG8_1X8,
46 V4L2_MBUS_FMT_SGRBG10_1X10, 52 V4L2_MBUS_FMT_SGRBG10_1X10,
47 V4L2_MBUS_FMT_SRGGB10_1X10, 53 V4L2_MBUS_FMT_SRGGB10_1X10,
48 V4L2_MBUS_FMT_SBGGR10_1X10, 54 V4L2_MBUS_FMT_SBGGR10_1X10,
@@ -1110,21 +1116,38 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
1110 struct isp_parallel_platform_data *pdata = NULL; 1116 struct isp_parallel_platform_data *pdata = NULL;
1111 struct v4l2_subdev *sensor; 1117 struct v4l2_subdev *sensor;
1112 struct v4l2_mbus_framefmt *format; 1118 struct v4l2_mbus_framefmt *format;
1119 const struct isp_format_info *fmt_info;
1120 struct v4l2_subdev_format fmt_src;
1121 unsigned int depth_out;
1122 unsigned int depth_in = 0;
1113 struct media_pad *pad; 1123 struct media_pad *pad;
1114 unsigned long flags; 1124 unsigned long flags;
1125 unsigned int shift;
1115 u32 syn_mode; 1126 u32 syn_mode;
1116 u32 ccdc_pattern; 1127 u32 ccdc_pattern;
1117 1128
1118 if (ccdc->input == CCDC_INPUT_PARALLEL) { 1129 pad = media_entity_remote_source(&ccdc->pads[CCDC_PAD_SINK]);
1119 pad = media_entity_remote_source(&ccdc->pads[CCDC_PAD_SINK]); 1130 sensor = media_entity_to_v4l2_subdev(pad->entity);
1120 sensor = media_entity_to_v4l2_subdev(pad->entity); 1131 if (ccdc->input == CCDC_INPUT_PARALLEL)
1121 pdata = &((struct isp_v4l2_subdevs_group *)sensor->host_priv) 1132 pdata = &((struct isp_v4l2_subdevs_group *)sensor->host_priv)
1122 ->bus.parallel; 1133 ->bus.parallel;
1134
1135 /* Compute shift value for lane shifter to configure the bridge. */
1136 fmt_src.pad = pad->index;
1137 fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
1138 if (!v4l2_subdev_call(sensor, pad, get_fmt, NULL, &fmt_src)) {
1139 fmt_info = omap3isp_video_format_info(fmt_src.format.code);
1140 depth_in = fmt_info->bpp;
1123 } 1141 }
1124 1142
1125 omap3isp_configure_bridge(isp, ccdc->input, pdata); 1143 fmt_info = omap3isp_video_format_info
1144 (isp->isp_ccdc.formats[CCDC_PAD_SINK].code);
1145 depth_out = fmt_info->bpp;
1146
1147 shift = depth_in - depth_out;
1148 omap3isp_configure_bridge(isp, ccdc->input, pdata, shift);
1126 1149
1127 ccdc->syncif.datsz = pdata ? pdata->width : 10; 1150 ccdc->syncif.datsz = depth_out;
1128 ccdc_config_sync_if(ccdc, &ccdc->syncif); 1151 ccdc_config_sync_if(ccdc, &ccdc->syncif);
1129 1152
1130 /* CCDC_PAD_SINK */ 1153 /* CCDC_PAD_SINK */
@@ -1338,7 +1361,7 @@ static int ccdc_sbl_wait_idle(struct isp_ccdc_device *ccdc,
1338 * @ccdc: Pointer to ISP CCDC device. 1361 * @ccdc: Pointer to ISP CCDC device.
1339 * @event: Pointing which event trigger handler 1362 * @event: Pointing which event trigger handler
1340 * 1363 *
1341 * Return 1 when the event and stopping request combination is satisfyied, 1364 * Return 1 when the event and stopping request combination is satisfied,
1342 * zero otherwise. 1365 * zero otherwise.
1343 */ 1366 */
1344static int __ccdc_handle_stopping(struct isp_ccdc_device *ccdc, u32 event) 1367static int __ccdc_handle_stopping(struct isp_ccdc_device *ccdc, u32 event)
@@ -1618,7 +1641,7 @@ static int ccdc_video_queue(struct isp_video *video, struct isp_buffer *buffer)
1618 1641
1619 ccdc_set_outaddr(ccdc, buffer->isp_addr); 1642 ccdc_set_outaddr(ccdc, buffer->isp_addr);
1620 1643
1621 /* We now have a buffer queued on the output, restart the pipeline in 1644 /* We now have a buffer queued on the output, restart the pipeline
1622 * on the next CCDC interrupt if running in continuous mode (or when 1645 * on the next CCDC interrupt if running in continuous mode (or when
1623 * starting the stream). 1646 * starting the stream).
1624 */ 1647 */
diff --git a/drivers/media/video/omap3isp/isppreview.c b/drivers/media/video/omap3isp/isppreview.c
index 2b16988a501d..aba537af87e4 100644
--- a/drivers/media/video/omap3isp/isppreview.c
+++ b/drivers/media/video/omap3isp/isppreview.c
@@ -755,7 +755,7 @@ static struct preview_update update_attrs[] = {
755 * @configs - pointer to update config structure. 755 * @configs - pointer to update config structure.
756 * @config - return pointer to appropriate structure field. 756 * @config - return pointer to appropriate structure field.
757 * @bit - for which feature to return pointers. 757 * @bit - for which feature to return pointers.
758 * Return size of coresponding prev_params member 758 * Return size of corresponding prev_params member
759 */ 759 */
760static u32 760static u32
761__preview_get_ptrs(struct prev_params *params, void **param, 761__preview_get_ptrs(struct prev_params *params, void **param,
diff --git a/drivers/media/video/omap3isp/ispqueue.c b/drivers/media/video/omap3isp/ispqueue.c
index 8fddc5806b0d..9c317148205f 100644
--- a/drivers/media/video/omap3isp/ispqueue.c
+++ b/drivers/media/video/omap3isp/ispqueue.c
@@ -339,7 +339,7 @@ static int isp_video_buffer_prepare_user(struct isp_video_buffer *buf)
339 up_read(&current->mm->mmap_sem); 339 up_read(&current->mm->mmap_sem);
340 340
341 if (ret != buf->npages) { 341 if (ret != buf->npages) {
342 buf->npages = ret; 342 buf->npages = ret < 0 ? 0 : ret;
343 isp_video_buffer_cleanup(buf); 343 isp_video_buffer_cleanup(buf);
344 return -EFAULT; 344 return -EFAULT;
345 } 345 }
@@ -408,8 +408,8 @@ done:
408 * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address 408 * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address
409 * 409 *
410 * This function locates the VMAs for the buffer's userspace address and checks 410 * This function locates the VMAs for the buffer's userspace address and checks
411 * that their flags match. The onlflag that we need to care for at the moment is 411 * that their flags match. The only flag that we need to care for at the moment
412 * VM_PFNMAP. 412 * is VM_PFNMAP.
413 * 413 *
414 * The buffer vm_flags field is set to the first VMA flags. 414 * The buffer vm_flags field is set to the first VMA flags.
415 * 415 *
diff --git a/drivers/media/video/omap3isp/ispresizer.c b/drivers/media/video/omap3isp/ispresizer.c
index 653f88ba56db..0bb0f8cd36f5 100644
--- a/drivers/media/video/omap3isp/ispresizer.c
+++ b/drivers/media/video/omap3isp/ispresizer.c
@@ -714,19 +714,50 @@ static void resizer_print_status(struct isp_res_device *res)
714 * iw and ih are the input width and height after cropping. Those equations need 714 * iw and ih are the input width and height after cropping. Those equations need
715 * to be satisfied exactly for the resizer to work correctly. 715 * to be satisfied exactly for the resizer to work correctly.
716 * 716 *
717 * Reverting the equations, we can compute the resizing ratios with 717 * The equations can't be easily reverted, as the >> 8 operation is not linear.
718 * In addition, not all input sizes can be achieved for a given output size. To
719 * get the highest input size lower than or equal to the requested input size,
720 * we need to compute the highest resizing ratio that satisfies the following
721 * inequality (taking the 4-tap mode width equation as an example)
722 *
723 * iw >= (32 * sph + (ow - 1) * hrsz + 16) >> 8 - 7
724 *
725 * (where iw is the requested input width) which can be rewritten as
726 *
727 * iw - 7 >= (32 * sph + (ow - 1) * hrsz + 16) >> 8
728 * (iw - 7) << 8 >= 32 * sph + (ow - 1) * hrsz + 16 - b
729 * ((iw - 7) << 8) + b >= 32 * sph + (ow - 1) * hrsz + 16
730 *
731 * where b is the value of the 8 least significant bits of the right hand side
732 * expression of the last inequality. The highest resizing ratio value will be
733 * achieved when b is equal to its maximum value of 255. That resizing ratio
734 * value will still satisfy the original inequality, as b will disappear when
735 * the expression will be shifted right by 8.
736 *
737 * The reverted the equations thus become
718 * 738 *
719 * - 8-phase, 4-tap mode 739 * - 8-phase, 4-tap mode
720 * hrsz = ((iw - 7) * 256 - 16 - 32 * sph) / (ow - 1) 740 * hrsz = ((iw - 7) * 256 + 255 - 16 - 32 * sph) / (ow - 1)
721 * vrsz = ((ih - 4) * 256 - 16 - 32 * spv) / (oh - 1) 741 * vrsz = ((ih - 4) * 256 + 255 - 16 - 32 * spv) / (oh - 1)
722 * - 4-phase, 7-tap mode 742 * - 4-phase, 7-tap mode
723 * hrsz = ((iw - 7) * 256 - 32 - 64 * sph) / (ow - 1) 743 * hrsz = ((iw - 7) * 256 + 255 - 32 - 64 * sph) / (ow - 1)
724 * vrsz = ((ih - 7) * 256 - 32 - 64 * spv) / (oh - 1) 744 * vrsz = ((ih - 7) * 256 + 255 - 32 - 64 * spv) / (oh - 1)
725 * 745 *
726 * The ratios are integer values, and must be rounded down to ensure that the 746 * The ratios are integer values, and are rounded down to ensure that the
727 * cropped input size is not bigger than the uncropped input size. As the ratio 747 * cropped input size is not bigger than the uncropped input size.
728 * in 7-tap mode is always smaller than the ratio in 4-tap mode, we can use the 748 *
729 * 7-tap mode equations to compute a ratio approximation. 749 * As the number of phases/taps, used to select the correct equations to compute
750 * the ratio, depends on the ratio, we start with the 4-tap mode equations to
751 * compute an approximation of the ratio, and switch to the 7-tap mode equations
752 * if the approximation is higher than the ratio threshold.
753 *
754 * As the 7-tap mode equations will return a ratio smaller than or equal to the
755 * 4-tap mode equations, the resulting ratio could become lower than or equal to
756 * the ratio threshold. This 'equations loop' isn't an issue as long as the
757 * correct equations are used to compute the final input size. Starting with the
758 * 4-tap mode equations ensure that, in case of values resulting in a 'ratio
759 * loop', the smallest of the ratio values will be used, never exceeding the
760 * requested input size.
730 * 761 *
731 * We first clamp the output size according to the hardware capabilitie to avoid 762 * We first clamp the output size according to the hardware capabilitie to avoid
732 * auto-cropping the input more than required to satisfy the TRM equations. The 763 * auto-cropping the input more than required to satisfy the TRM equations. The
@@ -775,6 +806,8 @@ static void resizer_calc_ratios(struct isp_res_device *res,
775 unsigned int max_width; 806 unsigned int max_width;
776 unsigned int max_height; 807 unsigned int max_height;
777 unsigned int width_alignment; 808 unsigned int width_alignment;
809 unsigned int width;
810 unsigned int height;
778 811
779 /* 812 /*
780 * Clamp the output height based on the hardware capabilities and 813 * Clamp the output height based on the hardware capabilities and
@@ -786,19 +819,22 @@ static void resizer_calc_ratios(struct isp_res_device *res,
786 max_height = min_t(unsigned int, max_height, MAX_OUT_HEIGHT); 819 max_height = min_t(unsigned int, max_height, MAX_OUT_HEIGHT);
787 output->height = clamp(output->height, min_height, max_height); 820 output->height = clamp(output->height, min_height, max_height);
788 821
789 ratio->vert = ((input->height - 7) * 256 - 32 - 64 * spv) 822 ratio->vert = ((input->height - 4) * 256 + 255 - 16 - 32 * spv)
790 / (output->height - 1); 823 / (output->height - 1);
824 if (ratio->vert > MID_RESIZE_VALUE)
825 ratio->vert = ((input->height - 7) * 256 + 255 - 32 - 64 * spv)
826 / (output->height - 1);
791 ratio->vert = clamp_t(unsigned int, ratio->vert, 827 ratio->vert = clamp_t(unsigned int, ratio->vert,
792 MIN_RESIZE_VALUE, MAX_RESIZE_VALUE); 828 MIN_RESIZE_VALUE, MAX_RESIZE_VALUE);
793 829
794 if (ratio->vert <= MID_RESIZE_VALUE) { 830 if (ratio->vert <= MID_RESIZE_VALUE) {
795 upscaled_height = (output->height - 1) * ratio->vert 831 upscaled_height = (output->height - 1) * ratio->vert
796 + 32 * spv + 16; 832 + 32 * spv + 16;
797 input->height = (upscaled_height >> 8) + 4; 833 height = (upscaled_height >> 8) + 4;
798 } else { 834 } else {
799 upscaled_height = (output->height - 1) * ratio->vert 835 upscaled_height = (output->height - 1) * ratio->vert
800 + 64 * spv + 32; 836 + 64 * spv + 32;
801 input->height = (upscaled_height >> 8) + 7; 837 height = (upscaled_height >> 8) + 7;
802 } 838 }
803 839
804 /* 840 /*
@@ -854,20 +890,29 @@ static void resizer_calc_ratios(struct isp_res_device *res,
854 max_width & ~(width_alignment - 1)); 890 max_width & ~(width_alignment - 1));
855 output->width = ALIGN(output->width, width_alignment); 891 output->width = ALIGN(output->width, width_alignment);
856 892
857 ratio->horz = ((input->width - 7) * 256 - 32 - 64 * sph) 893 ratio->horz = ((input->width - 7) * 256 + 255 - 16 - 32 * sph)
858 / (output->width - 1); 894 / (output->width - 1);
895 if (ratio->horz > MID_RESIZE_VALUE)
896 ratio->horz = ((input->width - 7) * 256 + 255 - 32 - 64 * sph)
897 / (output->width - 1);
859 ratio->horz = clamp_t(unsigned int, ratio->horz, 898 ratio->horz = clamp_t(unsigned int, ratio->horz,
860 MIN_RESIZE_VALUE, MAX_RESIZE_VALUE); 899 MIN_RESIZE_VALUE, MAX_RESIZE_VALUE);
861 900
862 if (ratio->horz <= MID_RESIZE_VALUE) { 901 if (ratio->horz <= MID_RESIZE_VALUE) {
863 upscaled_width = (output->width - 1) * ratio->horz 902 upscaled_width = (output->width - 1) * ratio->horz
864 + 32 * sph + 16; 903 + 32 * sph + 16;
865 input->width = (upscaled_width >> 8) + 7; 904 width = (upscaled_width >> 8) + 7;
866 } else { 905 } else {
867 upscaled_width = (output->width - 1) * ratio->horz 906 upscaled_width = (output->width - 1) * ratio->horz
868 + 64 * sph + 32; 907 + 64 * sph + 32;
869 input->width = (upscaled_width >> 8) + 7; 908 width = (upscaled_width >> 8) + 7;
870 } 909 }
910
911 /* Center the new crop rectangle. */
912 input->left += (input->width - width) / 2;
913 input->top += (input->height - height) / 2;
914 input->width = width;
915 input->height = height;
871} 916}
872 917
873/* 918/*
diff --git a/drivers/media/video/omap3isp/ispstat.h b/drivers/media/video/omap3isp/ispstat.h
index 820950c9ef46..d86da94fa50d 100644
--- a/drivers/media/video/omap3isp/ispstat.h
+++ b/drivers/media/video/omap3isp/ispstat.h
@@ -131,9 +131,9 @@ struct ispstat {
131struct ispstat_generic_config { 131struct ispstat_generic_config {
132 /* 132 /*
133 * Fields must be in the same order as in: 133 * Fields must be in the same order as in:
134 * - isph3a_aewb_config 134 * - omap3isp_h3a_aewb_config
135 * - isph3a_af_config 135 * - omap3isp_h3a_af_config
136 * - isphist_config 136 * - omap3isp_hist_config
137 */ 137 */
138 u32 buf_size; 138 u32 buf_size;
139 u16 config_counter; 139 u16 config_counter;
diff --git a/drivers/media/video/omap3isp/ispvideo.c b/drivers/media/video/omap3isp/ispvideo.c
index 208a7ec739d7..9cd8f1aa567b 100644
--- a/drivers/media/video/omap3isp/ispvideo.c
+++ b/drivers/media/video/omap3isp/ispvideo.c
@@ -47,29 +47,59 @@
47 47
48static struct isp_format_info formats[] = { 48static struct isp_format_info formats[] = {
49 { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8, 49 { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
50 V4L2_MBUS_FMT_Y8_1X8, V4L2_PIX_FMT_GREY, 8, }, 50 V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
51 V4L2_PIX_FMT_GREY, 8, },
52 { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10,
53 V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8,
54 V4L2_PIX_FMT_Y10, 10, },
55 { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10,
56 V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8,
57 V4L2_PIX_FMT_Y12, 12, },
58 { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
59 V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
60 V4L2_PIX_FMT_SBGGR8, 8, },
61 { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
62 V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
63 V4L2_PIX_FMT_SGBRG8, 8, },
64 { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
65 V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
66 V4L2_PIX_FMT_SGRBG8, 8, },
67 { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
68 V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
69 V4L2_PIX_FMT_SRGGB8, 8, },
51 { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, 70 { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
52 V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10DPCM8, 8, }, 71 V4L2_MBUS_FMT_SGRBG10_1X10, 0,
72 V4L2_PIX_FMT_SGRBG10DPCM8, 8, },
53 { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10, 73 { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10,
54 V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_PIX_FMT_SBGGR10, 10, }, 74 V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8,
75 V4L2_PIX_FMT_SBGGR10, 10, },
55 { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10, 76 { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10,
56 V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_PIX_FMT_SGBRG10, 10, }, 77 V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8,
78 V4L2_PIX_FMT_SGBRG10, 10, },
57 { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10, 79 { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10,
58 V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10, 10, }, 80 V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8,
81 V4L2_PIX_FMT_SGRBG10, 10, },
59 { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10, 82 { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10,
60 V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_PIX_FMT_SRGGB10, 10, }, 83 V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8,
84 V4L2_PIX_FMT_SRGGB10, 10, },
61 { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10, 85 { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10,
62 V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_PIX_FMT_SBGGR12, 12, }, 86 V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8,
87 V4L2_PIX_FMT_SBGGR12, 12, },
63 { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10, 88 { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10,
64 V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_PIX_FMT_SGBRG12, 12, }, 89 V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8,
90 V4L2_PIX_FMT_SGBRG12, 12, },
65 { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10, 91 { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10,
66 V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_PIX_FMT_SGRBG12, 12, }, 92 V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8,
93 V4L2_PIX_FMT_SGRBG12, 12, },
67 { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10, 94 { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10,
68 V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_PIX_FMT_SRGGB12, 12, }, 95 V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8,
96 V4L2_PIX_FMT_SRGGB12, 12, },
69 { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16, 97 { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16,
70 V4L2_MBUS_FMT_UYVY8_1X16, V4L2_PIX_FMT_UYVY, 16, }, 98 V4L2_MBUS_FMT_UYVY8_1X16, 0,
99 V4L2_PIX_FMT_UYVY, 16, },
71 { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16, 100 { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16,
72 V4L2_MBUS_FMT_YUYV8_1X16, V4L2_PIX_FMT_YUYV, 16, }, 101 V4L2_MBUS_FMT_YUYV8_1X16, 0,
102 V4L2_PIX_FMT_YUYV, 16, },
73}; 103};
74 104
75const struct isp_format_info * 105const struct isp_format_info *
@@ -86,6 +116,37 @@ omap3isp_video_format_info(enum v4l2_mbus_pixelcode code)
86} 116}
87 117
88/* 118/*
119 * Decide whether desired output pixel code can be obtained with
120 * the lane shifter by shifting the input pixel code.
121 * @in: input pixelcode to shifter
122 * @out: output pixelcode from shifter
123 * @additional_shift: # of bits the sensor's LSB is offset from CAMEXT[0]
124 *
125 * return true if the combination is possible
126 * return false otherwise
127 */
128static bool isp_video_is_shiftable(enum v4l2_mbus_pixelcode in,
129 enum v4l2_mbus_pixelcode out,
130 unsigned int additional_shift)
131{
132 const struct isp_format_info *in_info, *out_info;
133
134 if (in == out)
135 return true;
136
137 in_info = omap3isp_video_format_info(in);
138 out_info = omap3isp_video_format_info(out);
139
140 if ((in_info->flavor == 0) || (out_info->flavor == 0))
141 return false;
142
143 if (in_info->flavor != out_info->flavor)
144 return false;
145
146 return in_info->bpp - out_info->bpp + additional_shift <= 6;
147}
148
149/*
89 * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format 150 * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
90 * @video: ISP video instance 151 * @video: ISP video instance
91 * @mbus: v4l2_mbus_framefmt format (input) 152 * @mbus: v4l2_mbus_framefmt format (input)
@@ -235,6 +296,7 @@ static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
235 return -EPIPE; 296 return -EPIPE;
236 297
237 while (1) { 298 while (1) {
299 unsigned int shifter_link;
238 /* Retrieve the sink format */ 300 /* Retrieve the sink format */
239 pad = &subdev->entity.pads[0]; 301 pad = &subdev->entity.pads[0];
240 if (!(pad->flags & MEDIA_PAD_FL_SINK)) 302 if (!(pad->flags & MEDIA_PAD_FL_SINK))
@@ -263,6 +325,10 @@ static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
263 return -ENOSPC; 325 return -ENOSPC;
264 } 326 }
265 327
328 /* If sink pad is on CCDC, the link has the lane shifter
329 * in the middle of it. */
330 shifter_link = subdev == &isp->isp_ccdc.subdev;
331
266 /* Retrieve the source format */ 332 /* Retrieve the source format */
267 pad = media_entity_remote_source(pad); 333 pad = media_entity_remote_source(pad);
268 if (pad == NULL || 334 if (pad == NULL ||
@@ -278,10 +344,24 @@ static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
278 return -EPIPE; 344 return -EPIPE;
279 345
280 /* Check if the two ends match */ 346 /* Check if the two ends match */
281 if (fmt_source.format.code != fmt_sink.format.code || 347 if (fmt_source.format.width != fmt_sink.format.width ||
282 fmt_source.format.width != fmt_sink.format.width ||
283 fmt_source.format.height != fmt_sink.format.height) 348 fmt_source.format.height != fmt_sink.format.height)
284 return -EPIPE; 349 return -EPIPE;
350
351 if (shifter_link) {
352 unsigned int parallel_shift = 0;
353 if (isp->isp_ccdc.input == CCDC_INPUT_PARALLEL) {
354 struct isp_parallel_platform_data *pdata =
355 &((struct isp_v4l2_subdevs_group *)
356 subdev->host_priv)->bus.parallel;
357 parallel_shift = pdata->data_lane_shift * 2;
358 }
359 if (!isp_video_is_shiftable(fmt_source.format.code,
360 fmt_sink.format.code,
361 parallel_shift))
362 return -EPIPE;
363 } else if (fmt_source.format.code != fmt_sink.format.code)
364 return -EPIPE;
285 } 365 }
286 366
287 return 0; 367 return 0;
diff --git a/drivers/media/video/omap3isp/ispvideo.h b/drivers/media/video/omap3isp/ispvideo.h
index 524a1acd0906..911bea64e78a 100644
--- a/drivers/media/video/omap3isp/ispvideo.h
+++ b/drivers/media/video/omap3isp/ispvideo.h
@@ -49,6 +49,8 @@ struct v4l2_pix_format;
49 * bits. Identical to @code if the format is 10 bits wide or less. 49 * bits. Identical to @code if the format is 10 bits wide or less.
50 * @uncompressed: V4L2 media bus format code for the corresponding uncompressed 50 * @uncompressed: V4L2 media bus format code for the corresponding uncompressed
51 * format. Identical to @code if the format is not DPCM compressed. 51 * format. Identical to @code if the format is not DPCM compressed.
52 * @flavor: V4L2 media bus format code for the same pixel layout but
53 * shifted to be 8 bits per pixel. =0 if format is not shiftable.
52 * @pixelformat: V4L2 pixel format FCC identifier 54 * @pixelformat: V4L2 pixel format FCC identifier
53 * @bpp: Bits per pixel 55 * @bpp: Bits per pixel
54 */ 56 */
@@ -56,6 +58,7 @@ struct isp_format_info {
56 enum v4l2_mbus_pixelcode code; 58 enum v4l2_mbus_pixelcode code;
57 enum v4l2_mbus_pixelcode truncated; 59 enum v4l2_mbus_pixelcode truncated;
58 enum v4l2_mbus_pixelcode uncompressed; 60 enum v4l2_mbus_pixelcode uncompressed;
61 enum v4l2_mbus_pixelcode flavor;
59 u32 pixelformat; 62 u32 pixelformat;
60 unsigned int bpp; 63 unsigned int bpp;
61}; 64};
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index 95f8b4e11e46..d142b40ea64e 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -527,7 +527,7 @@ static int fimc_cap_s_fmt_mplane(struct file *file, void *priv,
527 if (ret) 527 if (ret)
528 return ret; 528 return ret;
529 529
530 if (vb2_is_streaming(&fimc->vid_cap.vbq) || fimc_capture_active(fimc)) 530 if (vb2_is_busy(&fimc->vid_cap.vbq) || fimc_capture_active(fimc))
531 return -EBUSY; 531 return -EBUSY;
532 532
533 frame = &ctx->d_frame; 533 frame = &ctx->d_frame;
@@ -539,8 +539,10 @@ static int fimc_cap_s_fmt_mplane(struct file *file, void *priv,
539 return -EINVAL; 539 return -EINVAL;
540 } 540 }
541 541
542 for (i = 0; i < frame->fmt->colplanes; i++) 542 for (i = 0; i < frame->fmt->colplanes; i++) {
543 frame->payload[i] = pix->plane_fmt[i].bytesperline * pix->height; 543 frame->payload[i] =
544 (pix->width * pix->height * frame->fmt->depth[i]) >> 3;
545 }
544 546
545 /* Output DMA frame pixel size and offsets. */ 547 /* Output DMA frame pixel size and offsets. */
546 frame->f_width = pix->plane_fmt[0].bytesperline * 8 548 frame->f_width = pix->plane_fmt[0].bytesperline * 8
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index 6c919b38a3d8..dc91a8511af6 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -361,10 +361,20 @@ static void fimc_capture_irq_handler(struct fimc_dev *fimc)
361{ 361{
362 struct fimc_vid_cap *cap = &fimc->vid_cap; 362 struct fimc_vid_cap *cap = &fimc->vid_cap;
363 struct fimc_vid_buffer *v_buf; 363 struct fimc_vid_buffer *v_buf;
364 struct timeval *tv;
365 struct timespec ts;
364 366
365 if (!list_empty(&cap->active_buf_q) && 367 if (!list_empty(&cap->active_buf_q) &&
366 test_bit(ST_CAPT_RUN, &fimc->state)) { 368 test_bit(ST_CAPT_RUN, &fimc->state)) {
369 ktime_get_real_ts(&ts);
370
367 v_buf = active_queue_pop(cap); 371 v_buf = active_queue_pop(cap);
372
373 tv = &v_buf->vb.v4l2_buf.timestamp;
374 tv->tv_sec = ts.tv_sec;
375 tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
376 v_buf->vb.v4l2_buf.sequence = cap->frame_count++;
377
368 vb2_buffer_done(&v_buf->vb, VB2_BUF_STATE_DONE); 378 vb2_buffer_done(&v_buf->vb, VB2_BUF_STATE_DONE);
369 } 379 }
370 380
@@ -758,7 +768,7 @@ static void fimc_unlock(struct vb2_queue *vq)
758 mutex_unlock(&ctx->fimc_dev->lock); 768 mutex_unlock(&ctx->fimc_dev->lock);
759} 769}
760 770
761struct vb2_ops fimc_qops = { 771static struct vb2_ops fimc_qops = {
762 .queue_setup = fimc_queue_setup, 772 .queue_setup = fimc_queue_setup,
763 .buf_prepare = fimc_buf_prepare, 773 .buf_prepare = fimc_buf_prepare,
764 .buf_queue = fimc_buf_queue, 774 .buf_queue = fimc_buf_queue,
@@ -927,23 +937,23 @@ int fimc_vidioc_try_fmt_mplane(struct file *file, void *priv,
927 pix->num_planes = fmt->memplanes; 937 pix->num_planes = fmt->memplanes;
928 pix->colorspace = V4L2_COLORSPACE_JPEG; 938 pix->colorspace = V4L2_COLORSPACE_JPEG;
929 939
930 for (i = 0; i < pix->num_planes; ++i) {
931 int bpl = pix->plane_fmt[i].bytesperline;
932 940
933 dbg("[%d] bpl: %d, depth: %d, w: %d, h: %d", 941 for (i = 0; i < pix->num_planes; ++i) {
934 i, bpl, fmt->depth[i], pix->width, pix->height); 942 u32 bpl = pix->plane_fmt[i].bytesperline;
943 u32 *sizeimage = &pix->plane_fmt[i].sizeimage;
935 944
936 if (!bpl || (bpl * 8 / fmt->depth[i]) > pix->width) 945 if (fmt->colplanes > 1 && (bpl == 0 || bpl < pix->width))
937 bpl = (pix->width * fmt->depth[0]) >> 3; 946 bpl = pix->width; /* Planar */
938 947
939 if (!pix->plane_fmt[i].sizeimage) 948 if (fmt->colplanes == 1 && /* Packed */
940 pix->plane_fmt[i].sizeimage = pix->height * bpl; 949 (bpl == 0 || ((bpl * 8) / fmt->depth[i]) < pix->width))
950 bpl = (pix->width * fmt->depth[0]) / 8;
941 951
942 pix->plane_fmt[i].bytesperline = bpl; 952 if (i == 0) /* Same bytesperline for each plane. */
953 mod_x = bpl;
943 954
944 dbg("[%d]: bpl: %d, sizeimage: %d", 955 pix->plane_fmt[i].bytesperline = mod_x;
945 i, pix->plane_fmt[i].bytesperline, 956 *sizeimage = (pix->width * pix->height * fmt->depth[i]) / 8;
946 pix->plane_fmt[i].sizeimage);
947 } 957 }
948 958
949 return 0; 959 return 0;
@@ -965,7 +975,7 @@ static int fimc_m2m_s_fmt_mplane(struct file *file, void *priv,
965 975
966 vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); 976 vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
967 977
968 if (vb2_is_streaming(vq)) { 978 if (vb2_is_busy(vq)) {
969 v4l2_err(&fimc->m2m.v4l2_dev, "queue (%d) busy\n", f->type); 979 v4l2_err(&fimc->m2m.v4l2_dev, "queue (%d) busy\n", f->type);
970 return -EBUSY; 980 return -EBUSY;
971 } 981 }
@@ -985,8 +995,10 @@ static int fimc_m2m_s_fmt_mplane(struct file *file, void *priv,
985 if (!frame->fmt) 995 if (!frame->fmt)
986 return -EINVAL; 996 return -EINVAL;
987 997
988 for (i = 0; i < frame->fmt->colplanes; i++) 998 for (i = 0; i < frame->fmt->colplanes; i++) {
989 frame->payload[i] = pix->plane_fmt[i].bytesperline * pix->height; 999 frame->payload[i] =
1000 (pix->width * pix->height * frame->fmt->depth[i]) / 8;
1001 }
990 1002
991 frame->f_width = pix->plane_fmt[0].bytesperline * 8 / 1003 frame->f_width = pix->plane_fmt[0].bytesperline * 8 /
992 frame->fmt->depth[0]; 1004 frame->fmt->depth[0];
@@ -1750,7 +1762,7 @@ static int __devexit fimc_remove(struct platform_device *pdev)
1750} 1762}
1751 1763
1752/* Image pixel limits, similar across several FIMC HW revisions. */ 1764/* Image pixel limits, similar across several FIMC HW revisions. */
1753static struct fimc_pix_limit s5p_pix_limit[3] = { 1765static struct fimc_pix_limit s5p_pix_limit[4] = {
1754 [0] = { 1766 [0] = {
1755 .scaler_en_w = 3264, 1767 .scaler_en_w = 3264,
1756 .scaler_dis_w = 8192, 1768 .scaler_dis_w = 8192,
@@ -1775,6 +1787,14 @@ static struct fimc_pix_limit s5p_pix_limit[3] = {
1775 .out_rot_en_w = 1280, 1787 .out_rot_en_w = 1280,
1776 .out_rot_dis_w = 1920, 1788 .out_rot_dis_w = 1920,
1777 }, 1789 },
1790 [3] = {
1791 .scaler_en_w = 1920,
1792 .scaler_dis_w = 8192,
1793 .in_rot_en_h = 1366,
1794 .in_rot_dis_w = 8192,
1795 .out_rot_en_w = 1366,
1796 .out_rot_dis_w = 1920,
1797 },
1778}; 1798};
1779 1799
1780static struct samsung_fimc_variant fimc0_variant_s5p = { 1800static struct samsung_fimc_variant fimc0_variant_s5p = {
@@ -1827,7 +1847,7 @@ static struct samsung_fimc_variant fimc2_variant_s5pv210 = {
1827 .pix_limit = &s5p_pix_limit[2], 1847 .pix_limit = &s5p_pix_limit[2],
1828}; 1848};
1829 1849
1830static struct samsung_fimc_variant fimc0_variant_s5pv310 = { 1850static struct samsung_fimc_variant fimc0_variant_exynos4 = {
1831 .pix_hoff = 1, 1851 .pix_hoff = 1,
1832 .has_inp_rot = 1, 1852 .has_inp_rot = 1,
1833 .has_out_rot = 1, 1853 .has_out_rot = 1,
@@ -1840,7 +1860,7 @@ static struct samsung_fimc_variant fimc0_variant_s5pv310 = {
1840 .pix_limit = &s5p_pix_limit[1], 1860 .pix_limit = &s5p_pix_limit[1],
1841}; 1861};
1842 1862
1843static struct samsung_fimc_variant fimc2_variant_s5pv310 = { 1863static struct samsung_fimc_variant fimc2_variant_exynos4 = {
1844 .pix_hoff = 1, 1864 .pix_hoff = 1,
1845 .has_cistatus2 = 1, 1865 .has_cistatus2 = 1,
1846 .has_mainscaler_ext = 1, 1866 .has_mainscaler_ext = 1,
@@ -1848,7 +1868,7 @@ static struct samsung_fimc_variant fimc2_variant_s5pv310 = {
1848 .min_out_pixsize = 16, 1868 .min_out_pixsize = 16,
1849 .hor_offs_align = 1, 1869 .hor_offs_align = 1,
1850 .out_buf_count = 32, 1870 .out_buf_count = 32,
1851 .pix_limit = &s5p_pix_limit[2], 1871 .pix_limit = &s5p_pix_limit[3],
1852}; 1872};
1853 1873
1854/* S5PC100 */ 1874/* S5PC100 */
@@ -1874,12 +1894,12 @@ static struct samsung_fimc_driverdata fimc_drvdata_s5pv210 = {
1874}; 1894};
1875 1895
1876/* S5PV310, S5PC210 */ 1896/* S5PV310, S5PC210 */
1877static struct samsung_fimc_driverdata fimc_drvdata_s5pv310 = { 1897static struct samsung_fimc_driverdata fimc_drvdata_exynos4 = {
1878 .variant = { 1898 .variant = {
1879 [0] = &fimc0_variant_s5pv310, 1899 [0] = &fimc0_variant_exynos4,
1880 [1] = &fimc0_variant_s5pv310, 1900 [1] = &fimc0_variant_exynos4,
1881 [2] = &fimc0_variant_s5pv310, 1901 [2] = &fimc0_variant_exynos4,
1882 [3] = &fimc2_variant_s5pv310, 1902 [3] = &fimc2_variant_exynos4,
1883 }, 1903 },
1884 .num_entities = 4, 1904 .num_entities = 4,
1885 .lclk_frequency = 166000000UL, 1905 .lclk_frequency = 166000000UL,
@@ -1893,8 +1913,8 @@ static struct platform_device_id fimc_driver_ids[] = {
1893 .name = "s5pv210-fimc", 1913 .name = "s5pv210-fimc",
1894 .driver_data = (unsigned long)&fimc_drvdata_s5pv210, 1914 .driver_data = (unsigned long)&fimc_drvdata_s5pv210,
1895 }, { 1915 }, {
1896 .name = "s5pv310-fimc", 1916 .name = "exynos4-fimc",
1897 .driver_data = (unsigned long)&fimc_drvdata_s5pv310, 1917 .driver_data = (unsigned long)&fimc_drvdata_exynos4,
1898 }, 1918 },
1899 {}, 1919 {},
1900}; 1920};
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index 3fe54bf41142..134e86bf6d97 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -922,7 +922,7 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
922 /* Try 2560x1920, 1280x960, 640x480, 320x240 */ 922 /* Try 2560x1920, 1280x960, 640x480, 320x240 */
923 mf.width = 2560 >> shift; 923 mf.width = 2560 >> shift;
924 mf.height = 1920 >> shift; 924 mf.height = 1920 >> shift;
925 ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, 925 ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
926 s_mbus_fmt, &mf); 926 s_mbus_fmt, &mf);
927 if (ret < 0) 927 if (ret < 0)
928 return ret; 928 return ret;
@@ -1224,7 +1224,7 @@ static int client_s_fmt(struct soc_camera_device *icd,
1224 struct v4l2_cropcap cap; 1224 struct v4l2_cropcap cap;
1225 int ret; 1225 int ret;
1226 1226
1227 ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, 1227 ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
1228 s_mbus_fmt, mf); 1228 s_mbus_fmt, mf);
1229 if (ret < 0) 1229 if (ret < 0)
1230 return ret; 1230 return ret;
@@ -1254,7 +1254,7 @@ static int client_s_fmt(struct soc_camera_device *icd,
1254 tmp_h = min(2 * tmp_h, max_height); 1254 tmp_h = min(2 * tmp_h, max_height);
1255 mf->width = tmp_w; 1255 mf->width = tmp_w;
1256 mf->height = tmp_h; 1256 mf->height = tmp_h;
1257 ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, 1257 ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
1258 s_mbus_fmt, mf); 1258 s_mbus_fmt, mf);
1259 dev_geo(dev, "Camera scaled to %ux%u\n", 1259 dev_geo(dev, "Camera scaled to %ux%u\n",
1260 mf->width, mf->height); 1260 mf->width, mf->height);
@@ -1658,7 +1658,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
1658 mf.code = xlate->code; 1658 mf.code = xlate->code;
1659 mf.colorspace = pix->colorspace; 1659 mf.colorspace = pix->colorspace;
1660 1660
1661 ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, try_mbus_fmt, &mf); 1661 ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, try_mbus_fmt, &mf);
1662 if (ret < 0) 1662 if (ret < 0)
1663 return ret; 1663 return ret;
1664 1664
@@ -1682,7 +1682,7 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
1682 */ 1682 */
1683 mf.width = 2560; 1683 mf.width = 2560;
1684 mf.height = 1920; 1684 mf.height = 1920;
1685 ret = v4l2_device_call_until_err(sd->v4l2_dev, 0, video, 1685 ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
1686 try_mbus_fmt, &mf); 1686 try_mbus_fmt, &mf);
1687 if (ret < 0) { 1687 if (ret < 0) {
1688 /* Shouldn't actually happen... */ 1688 /* Shouldn't actually happen... */
diff --git a/drivers/media/video/sh_mobile_csi2.c b/drivers/media/video/sh_mobile_csi2.c
index dd1b81b1442b..98b87481fa94 100644
--- a/drivers/media/video/sh_mobile_csi2.c
+++ b/drivers/media/video/sh_mobile_csi2.c
@@ -38,6 +38,8 @@ struct sh_csi2 {
38 void __iomem *base; 38 void __iomem *base;
39 struct platform_device *pdev; 39 struct platform_device *pdev;
40 struct sh_csi2_client_config *client; 40 struct sh_csi2_client_config *client;
41 unsigned long (*query_bus_param)(struct soc_camera_device *);
42 int (*set_bus_param)(struct soc_camera_device *, unsigned long);
41}; 43};
42 44
43static int sh_csi2_try_fmt(struct v4l2_subdev *sd, 45static int sh_csi2_try_fmt(struct v4l2_subdev *sd,
@@ -208,6 +210,7 @@ static int sh_csi2_notify(struct notifier_block *nb,
208 case BUS_NOTIFY_BOUND_DRIVER: 210 case BUS_NOTIFY_BOUND_DRIVER:
209 snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s%s", 211 snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s%s",
210 dev_name(v4l2_dev->dev), ".mipi-csi"); 212 dev_name(v4l2_dev->dev), ".mipi-csi");
213 priv->subdev.grp_id = (long)icd;
211 ret = v4l2_device_register_subdev(v4l2_dev, &priv->subdev); 214 ret = v4l2_device_register_subdev(v4l2_dev, &priv->subdev);
212 dev_dbg(dev, "%s(%p): ret(register_subdev) = %d\n", __func__, priv, ret); 215 dev_dbg(dev, "%s(%p): ret(register_subdev) = %d\n", __func__, priv, ret);
213 if (ret < 0) 216 if (ret < 0)
@@ -215,6 +218,8 @@ static int sh_csi2_notify(struct notifier_block *nb,
215 218
216 priv->client = pdata->clients + i; 219 priv->client = pdata->clients + i;
217 220
221 priv->set_bus_param = icd->ops->set_bus_param;
222 priv->query_bus_param = icd->ops->query_bus_param;
218 icd->ops->set_bus_param = sh_csi2_set_bus_param; 223 icd->ops->set_bus_param = sh_csi2_set_bus_param;
219 icd->ops->query_bus_param = sh_csi2_query_bus_param; 224 icd->ops->query_bus_param = sh_csi2_query_bus_param;
220 225
@@ -226,8 +231,10 @@ static int sh_csi2_notify(struct notifier_block *nb,
226 priv->client = NULL; 231 priv->client = NULL;
227 232
228 /* Driver is about to be unbound */ 233 /* Driver is about to be unbound */
229 icd->ops->set_bus_param = NULL; 234 icd->ops->set_bus_param = priv->set_bus_param;
230 icd->ops->query_bus_param = NULL; 235 icd->ops->query_bus_param = priv->query_bus_param;
236 priv->set_bus_param = NULL;
237 priv->query_bus_param = NULL;
231 238
232 v4l2_device_unregister_subdev(&priv->subdev); 239 v4l2_device_unregister_subdev(&priv->subdev);
233 240
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 46284489e4eb..3973f9a94753 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -996,10 +996,11 @@ static void soc_camera_free_i2c(struct soc_camera_device *icd)
996{ 996{
997 struct i2c_client *client = 997 struct i2c_client *client =
998 to_i2c_client(to_soc_camera_control(icd)); 998 to_i2c_client(to_soc_camera_control(icd));
999 struct i2c_adapter *adap = client->adapter;
999 dev_set_drvdata(&icd->dev, NULL); 1000 dev_set_drvdata(&icd->dev, NULL);
1000 v4l2_device_unregister_subdev(i2c_get_clientdata(client)); 1001 v4l2_device_unregister_subdev(i2c_get_clientdata(client));
1001 i2c_unregister_device(client); 1002 i2c_unregister_device(client);
1002 i2c_put_adapter(client->adapter); 1003 i2c_put_adapter(adap);
1003} 1004}
1004#else 1005#else
1005#define soc_camera_init_i2c(icd, icl) (-ENODEV) 1006#define soc_camera_init_i2c(icd, icl) (-ENODEV)
@@ -1071,6 +1072,9 @@ static int soc_camera_probe(struct device *dev)
1071 } 1072 }
1072 } 1073 }
1073 1074
1075 sd = soc_camera_to_subdev(icd);
1076 sd->grp_id = (long)icd;
1077
1074 /* At this point client .probe() should have run already */ 1078 /* At this point client .probe() should have run already */
1075 ret = soc_camera_init_user_formats(icd); 1079 ret = soc_camera_init_user_formats(icd);
1076 if (ret < 0) 1080 if (ret < 0)
@@ -1092,7 +1096,6 @@ static int soc_camera_probe(struct device *dev)
1092 goto evidstart; 1096 goto evidstart;
1093 1097
1094 /* Try to improve our guess of a reasonable window format */ 1098 /* Try to improve our guess of a reasonable window format */
1095 sd = soc_camera_to_subdev(icd);
1096 if (!v4l2_subdev_call(sd, video, g_mbus_fmt, &mf)) { 1099 if (!v4l2_subdev_call(sd, video, g_mbus_fmt, &mf)) {
1097 icd->user_width = mf.width; 1100 icd->user_width = mf.width;
1098 icd->user_height = mf.height; 1101 icd->user_height = mf.height;
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 498e6742579e..6dc7196296b3 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -389,7 +389,8 @@ static int v4l2_open(struct inode *inode, struct file *filp)
389 video_get(vdev); 389 video_get(vdev);
390 mutex_unlock(&videodev_lock); 390 mutex_unlock(&videodev_lock);
391#if defined(CONFIG_MEDIA_CONTROLLER) 391#if defined(CONFIG_MEDIA_CONTROLLER)
392 if (vdev->v4l2_dev && vdev->v4l2_dev->mdev) { 392 if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
393 vdev->vfl_type != VFL_TYPE_SUBDEV) {
393 entity = media_entity_get(&vdev->entity); 394 entity = media_entity_get(&vdev->entity);
394 if (!entity) { 395 if (!entity) {
395 ret = -EBUSY; 396 ret = -EBUSY;
@@ -415,7 +416,8 @@ err:
415 /* decrease the refcount in case of an error */ 416 /* decrease the refcount in case of an error */
416 if (ret) { 417 if (ret) {
417#if defined(CONFIG_MEDIA_CONTROLLER) 418#if defined(CONFIG_MEDIA_CONTROLLER)
418 if (vdev->v4l2_dev && vdev->v4l2_dev->mdev) 419 if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
420 vdev->vfl_type != VFL_TYPE_SUBDEV)
419 media_entity_put(entity); 421 media_entity_put(entity);
420#endif 422#endif
421 video_put(vdev); 423 video_put(vdev);
@@ -437,7 +439,8 @@ static int v4l2_release(struct inode *inode, struct file *filp)
437 mutex_unlock(vdev->lock); 439 mutex_unlock(vdev->lock);
438 } 440 }
439#if defined(CONFIG_MEDIA_CONTROLLER) 441#if defined(CONFIG_MEDIA_CONTROLLER)
440 if (vdev->v4l2_dev && vdev->v4l2_dev->mdev) 442 if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
443 vdev->vfl_type != VFL_TYPE_SUBDEV)
441 media_entity_put(&vdev->entity); 444 media_entity_put(&vdev->entity);
442#endif 445#endif
443 /* decrease the refcount unconditionally since the release() 446 /* decrease the refcount unconditionally since the release()
@@ -686,7 +689,8 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
686 689
687#if defined(CONFIG_MEDIA_CONTROLLER) 690#if defined(CONFIG_MEDIA_CONTROLLER)
688 /* Part 5: Register the entity. */ 691 /* Part 5: Register the entity. */
689 if (vdev->v4l2_dev && vdev->v4l2_dev->mdev) { 692 if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
693 vdev->vfl_type != VFL_TYPE_SUBDEV) {
690 vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L; 694 vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L;
691 vdev->entity.name = vdev->name; 695 vdev->entity.name = vdev->name;
692 vdev->entity.v4l.major = VIDEO_MAJOR; 696 vdev->entity.v4l.major = VIDEO_MAJOR;
@@ -733,7 +737,8 @@ void video_unregister_device(struct video_device *vdev)
733 return; 737 return;
734 738
735#if defined(CONFIG_MEDIA_CONTROLLER) 739#if defined(CONFIG_MEDIA_CONTROLLER)
736 if (vdev->v4l2_dev && vdev->v4l2_dev->mdev) 740 if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
741 vdev->vfl_type != VFL_TYPE_SUBDEV)
737 media_device_unregister_entity(&vdev->entity); 742 media_device_unregister_entity(&vdev->entity);
738#endif 743#endif
739 744
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index c4742fc15529..c9691115f2d2 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -300,7 +300,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
300 300
301 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 301 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
302 retval = remap_pfn_range(vma, vma->vm_start, 302 retval = remap_pfn_range(vma, vma->vm_start,
303 PFN_DOWN(virt_to_phys(mem->vaddr)), 303 mem->dma_handle >> PAGE_SHIFT,
304 size, vma->vm_page_prot); 304 size, vma->vm_page_prot);
305 if (retval) { 305 if (retval) {
306 dev_err(q->dev, "mmap: remap failed with error %d. ", retval); 306 dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
diff --git a/drivers/media/video/videobuf2-core.c b/drivers/media/video/videobuf2-core.c
index 6698c77e0f64..6ba1461d51ef 100644
--- a/drivers/media/video/videobuf2-core.c
+++ b/drivers/media/video/videobuf2-core.c
@@ -37,6 +37,9 @@ module_param(debug, int, 0644);
37#define call_qop(q, op, args...) \ 37#define call_qop(q, op, args...) \
38 (((q)->ops->op) ? ((q)->ops->op(args)) : 0) 38 (((q)->ops->op) ? ((q)->ops->op(args)) : 0)
39 39
40#define V4L2_BUFFER_STATE_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
41 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR)
42
40/** 43/**
41 * __vb2_buf_mem_alloc() - allocate video memory for the given buffer 44 * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
42 */ 45 */
@@ -51,7 +54,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb,
51 for (plane = 0; plane < vb->num_planes; ++plane) { 54 for (plane = 0; plane < vb->num_planes; ++plane) {
52 mem_priv = call_memop(q, plane, alloc, q->alloc_ctx[plane], 55 mem_priv = call_memop(q, plane, alloc, q->alloc_ctx[plane],
53 plane_sizes[plane]); 56 plane_sizes[plane]);
54 if (!mem_priv) 57 if (IS_ERR_OR_NULL(mem_priv))
55 goto free; 58 goto free;
56 59
57 /* Associate allocator private data with this plane */ 60 /* Associate allocator private data with this plane */
@@ -284,7 +287,7 @@ static int __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
284 struct vb2_queue *q = vb->vb2_queue; 287 struct vb2_queue *q = vb->vb2_queue;
285 int ret = 0; 288 int ret = 0;
286 289
287 /* Copy back data such as timestamp, input, etc. */ 290 /* Copy back data such as timestamp, flags, input, etc. */
288 memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m)); 291 memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
289 b->input = vb->v4l2_buf.input; 292 b->input = vb->v4l2_buf.input;
290 b->reserved = vb->v4l2_buf.reserved; 293 b->reserved = vb->v4l2_buf.reserved;
@@ -313,7 +316,10 @@ static int __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
313 b->m.userptr = vb->v4l2_planes[0].m.userptr; 316 b->m.userptr = vb->v4l2_planes[0].m.userptr;
314 } 317 }
315 318
316 b->flags = 0; 319 /*
320 * Clear any buffer state related flags.
321 */
322 b->flags &= ~V4L2_BUFFER_STATE_FLAGS;
317 323
318 switch (vb->state) { 324 switch (vb->state) {
319 case VB2_BUF_STATE_QUEUED: 325 case VB2_BUF_STATE_QUEUED:
@@ -519,6 +525,7 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
519 num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME); 525 num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME);
520 memset(plane_sizes, 0, sizeof(plane_sizes)); 526 memset(plane_sizes, 0, sizeof(plane_sizes));
521 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); 527 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
528 q->memory = req->memory;
522 529
523 /* 530 /*
524 * Ask the driver how many buffers and planes per buffer it requires. 531 * Ask the driver how many buffers and planes per buffer it requires.
@@ -560,8 +567,6 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
560 ret = num_buffers; 567 ret = num_buffers;
561 } 568 }
562 569
563 q->memory = req->memory;
564
565 /* 570 /*
566 * Return the number of successfully allocated buffers 571 * Return the number of successfully allocated buffers
567 * to the userspace. 572 * to the userspace.
@@ -715,6 +720,8 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b,
715 720
716 vb->v4l2_buf.field = b->field; 721 vb->v4l2_buf.field = b->field;
717 vb->v4l2_buf.timestamp = b->timestamp; 722 vb->v4l2_buf.timestamp = b->timestamp;
723 vb->v4l2_buf.input = b->input;
724 vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_STATE_FLAGS;
718 725
719 return 0; 726 return 0;
720} 727}
diff --git a/drivers/media/video/videobuf2-dma-contig.c b/drivers/media/video/videobuf2-dma-contig.c
index 58205d596138..a790a5f8c06f 100644
--- a/drivers/media/video/videobuf2-dma-contig.c
+++ b/drivers/media/video/videobuf2-dma-contig.c
@@ -46,7 +46,7 @@ static void *vb2_dma_contig_alloc(void *alloc_ctx, unsigned long size)
46 GFP_KERNEL); 46 GFP_KERNEL);
47 if (!buf->vaddr) { 47 if (!buf->vaddr) {
48 dev_err(conf->dev, "dma_alloc_coherent of size %ld failed\n", 48 dev_err(conf->dev, "dma_alloc_coherent of size %ld failed\n",
49 buf->size); 49 size);
50 kfree(buf); 50 kfree(buf);
51 return ERR_PTR(-ENOMEM); 51 return ERR_PTR(-ENOMEM);
52 } 52 }
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 96c0b34ba8db..657b9f4b6f9b 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -400,7 +400,7 @@ static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
400 doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE); 400 doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
401 doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); 401 doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
402 402
403 /* We can't' use dev_ready here, but at least we wait for the 403 /* We can't use dev_ready here, but at least we wait for the
404 * command to complete 404 * command to complete
405 */ 405 */
406 udelay(50); 406 udelay(50);
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
index e3de0b8625cd..7581518ecfa2 100644
--- a/drivers/net/bna/bfa_ioc.c
+++ b/drivers/net/bna/bfa_ioc.c
@@ -38,6 +38,8 @@
38#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) 38#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
39#define bfa_ioc_notify_fail(__ioc) \ 39#define bfa_ioc_notify_fail(__ioc) \
40 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) 40 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
41#define bfa_ioc_sync_start(__ioc) \
42 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
41#define bfa_ioc_sync_join(__ioc) \ 43#define bfa_ioc_sync_join(__ioc) \
42 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) 44 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
43#define bfa_ioc_sync_leave(__ioc) \ 45#define bfa_ioc_sync_leave(__ioc) \
@@ -602,7 +604,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
602 switch (event) { 604 switch (event) {
603 case IOCPF_E_SEMLOCKED: 605 case IOCPF_E_SEMLOCKED:
604 if (bfa_ioc_firmware_lock(ioc)) { 606 if (bfa_ioc_firmware_lock(ioc)) {
605 if (bfa_ioc_sync_complete(ioc)) { 607 if (bfa_ioc_sync_start(ioc)) {
606 iocpf->retry_count = 0; 608 iocpf->retry_count = 0;
607 bfa_ioc_sync_join(ioc); 609 bfa_ioc_sync_join(ioc);
608 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); 610 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
@@ -1314,7 +1316,7 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1314 * execution context (driver/bios) must match. 1316 * execution context (driver/bios) must match.
1315 */ 1317 */
1316static bool 1318static bool
1317bfa_ioc_fwver_valid(struct bfa_ioc *ioc) 1319bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
1318{ 1320{
1319 struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr; 1321 struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
1320 1322
@@ -1325,7 +1327,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
1325 if (fwhdr.signature != drv_fwhdr->signature) 1327 if (fwhdr.signature != drv_fwhdr->signature)
1326 return false; 1328 return false;
1327 1329
1328 if (fwhdr.exec != drv_fwhdr->exec) 1330 if (swab32(fwhdr.param) != boot_env)
1329 return false; 1331 return false;
1330 1332
1331 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr); 1333 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
@@ -1352,9 +1354,12 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1352{ 1354{
1353 enum bfi_ioc_state ioc_fwstate; 1355 enum bfi_ioc_state ioc_fwstate;
1354 bool fwvalid; 1356 bool fwvalid;
1357 u32 boot_env;
1355 1358
1356 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); 1359 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1357 1360
1361 boot_env = BFI_BOOT_LOADER_OS;
1362
1358 if (force) 1363 if (force)
1359 ioc_fwstate = BFI_IOC_UNINIT; 1364 ioc_fwstate = BFI_IOC_UNINIT;
1360 1365
@@ -1362,10 +1367,10 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1362 * check if firmware is valid 1367 * check if firmware is valid
1363 */ 1368 */
1364 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? 1369 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1365 false : bfa_ioc_fwver_valid(ioc); 1370 false : bfa_ioc_fwver_valid(ioc, boot_env);
1366 1371
1367 if (!fwvalid) { 1372 if (!fwvalid) {
1368 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); 1373 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
1369 return; 1374 return;
1370 } 1375 }
1371 1376
@@ -1396,7 +1401,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1396 /** 1401 /**
1397 * Initialize the h/w for any other states. 1402 * Initialize the h/w for any other states.
1398 */ 1403 */
1399 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id); 1404 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
1400} 1405}
1401 1406
1402void 1407void
@@ -1506,7 +1511,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1506 */ 1511 */
1507static void 1512static void
1508bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, 1513bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1509 u32 boot_param) 1514 u32 boot_env)
1510{ 1515{
1511 u32 *fwimg; 1516 u32 *fwimg;
1512 u32 pgnum, pgoff; 1517 u32 pgnum, pgoff;
@@ -1558,10 +1563,10 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1558 /* 1563 /*
1559 * Set boot type and boot param at the end. 1564 * Set boot type and boot param at the end.
1560 */ 1565 */
1561 writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start) 1566 writel(boot_type, ((ioc->ioc_regs.smem_page_start)
1562 + (BFI_BOOT_TYPE_OFF))); 1567 + (BFI_BOOT_TYPE_OFF)));
1563 writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start) 1568 writel(boot_env, ((ioc->ioc_regs.smem_page_start)
1564 + (BFI_BOOT_PARAM_OFF))); 1569 + (BFI_BOOT_LOADER_OFF)));
1565} 1570}
1566 1571
1567static void 1572static void
@@ -1721,7 +1726,7 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
1721 * as the entry vector. 1726 * as the entry vector.
1722 */ 1727 */
1723static void 1728static void
1724bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param) 1729bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env)
1725{ 1730{
1726 void __iomem *rb; 1731 void __iomem *rb;
1727 1732
@@ -1734,7 +1739,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
1734 * Initialize IOC state of all functions on a chip reset. 1739 * Initialize IOC state of all functions on a chip reset.
1735 */ 1740 */
1736 rb = ioc->pcidev.pci_bar_kva; 1741 rb = ioc->pcidev.pci_bar_kva;
1737 if (boot_param == BFI_BOOT_TYPE_MEMTEST) { 1742 if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
1738 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG)); 1743 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
1739 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG)); 1744 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1740 } else { 1745 } else {
@@ -1743,7 +1748,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
1743 } 1748 }
1744 1749
1745 bfa_ioc_msgflush(ioc); 1750 bfa_ioc_msgflush(ioc);
1746 bfa_ioc_download_fw(ioc, boot_type, boot_param); 1751 bfa_ioc_download_fw(ioc, boot_type, boot_env);
1747 1752
1748 /** 1753 /**
1749 * Enable interrupts just before starting LPU 1754 * Enable interrupts just before starting LPU
diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/bna/bfa_ioc.h
index e4974bc24ef6..bd48abee781f 100644
--- a/drivers/net/bna/bfa_ioc.h
+++ b/drivers/net/bna/bfa_ioc.h
@@ -194,6 +194,7 @@ struct bfa_ioc_hwif {
194 bool msix); 194 bool msix);
195 void (*ioc_notify_fail) (struct bfa_ioc *ioc); 195 void (*ioc_notify_fail) (struct bfa_ioc *ioc);
196 void (*ioc_ownership_reset) (struct bfa_ioc *ioc); 196 void (*ioc_ownership_reset) (struct bfa_ioc *ioc);
197 bool (*ioc_sync_start) (struct bfa_ioc *ioc);
197 void (*ioc_sync_join) (struct bfa_ioc *ioc); 198 void (*ioc_sync_join) (struct bfa_ioc *ioc);
198 void (*ioc_sync_leave) (struct bfa_ioc *ioc); 199 void (*ioc_sync_leave) (struct bfa_ioc *ioc);
199 void (*ioc_sync_ack) (struct bfa_ioc *ioc); 200 void (*ioc_sync_ack) (struct bfa_ioc *ioc);
diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
index 469997c4ffd1..87aecdf22cf9 100644
--- a/drivers/net/bna/bfa_ioc_ct.c
+++ b/drivers/net/bna/bfa_ioc_ct.c
@@ -41,6 +41,7 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
41static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); 41static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
42static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc); 42static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
43static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); 43static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
44static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
44static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc); 45static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
45static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc); 46static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
46static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); 47static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
@@ -63,6 +64,7 @@ bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
63 nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; 64 nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
64 nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail; 65 nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
65 nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; 66 nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
67 nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
66 nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join; 68 nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
67 nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave; 69 nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
68 nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack; 70 nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
@@ -345,6 +347,32 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
345/** 347/**
346 * Synchronized IOC failure processing routines 348 * Synchronized IOC failure processing routines
347 */ 349 */
350static bool
351bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
352{
353 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
354 u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
355
356 /*
357 * Driver load time. If the sync required bit for this PCI fn
358 * is set, it is due to an unclean exit by the driver for this
359 * PCI fn in the previous incarnation. Whoever comes here first
360 * should clean it up, no matter which PCI fn.
361 */
362
363 if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
364 writel(0, ioc->ioc_regs.ioc_fail_sync);
365 writel(1, ioc->ioc_regs.ioc_usage_reg);
366 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
367 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
368 return true;
369 }
370
371 return bfa_ioc_ct_sync_complete(ioc);
372}
373/**
374 * Synchronized IOC failure processing routines
375 */
348static void 376static void
349bfa_ioc_ct_sync_join(struct bfa_ioc *ioc) 377bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
350{ 378{
diff --git a/drivers/net/bna/bfi.h b/drivers/net/bna/bfi.h
index a97396811050..6050379526f7 100644
--- a/drivers/net/bna/bfi.h
+++ b/drivers/net/bna/bfi.h
@@ -184,12 +184,14 @@ enum bfi_mclass {
184#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */ 184#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
185 185
186#define BFI_BOOT_TYPE_OFF 8 186#define BFI_BOOT_TYPE_OFF 8
187#define BFI_BOOT_PARAM_OFF 12 187#define BFI_BOOT_LOADER_OFF 12
188 188
189#define BFI_BOOT_TYPE_NORMAL 0 /* param is device id */ 189#define BFI_BOOT_TYPE_NORMAL 0
190#define BFI_BOOT_TYPE_FLASH 1 190#define BFI_BOOT_TYPE_FLASH 1
191#define BFI_BOOT_TYPE_MEMTEST 2 191#define BFI_BOOT_TYPE_MEMTEST 2
192 192
193#define BFI_BOOT_LOADER_OS 0
194
193#define BFI_BOOT_MEMTEST_RES_ADDR 0x900 195#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
194#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3 196#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
195 197
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index 9f356d5d0f33..8e6ceab9f4d8 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -1837,7 +1837,6 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
1837 /* Initialize the Rx event handlers */ 1837 /* Initialize the Rx event handlers */
1838 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup; 1838 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
1839 rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy; 1839 rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
1840 rx_cbfn.rcb_destroy_cbfn = NULL;
1841 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup; 1840 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1842 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy; 1841 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
1843 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup; 1842 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index f5050155c6b5..89cb977898cb 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -2114,19 +2114,18 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
2114 for (i = 0; i < (data * 2); i++) { 2114 for (i = 0; i < (data * 2); i++) {
2115 if ((i % 2) == 0) 2115 if ((i % 2) == 0)
2116 bnx2x_set_led(&bp->link_params, &bp->link_vars, 2116 bnx2x_set_led(&bp->link_params, &bp->link_vars,
2117 LED_MODE_OPER, SPEED_1000); 2117 LED_MODE_ON, SPEED_1000);
2118 else 2118 else
2119 bnx2x_set_led(&bp->link_params, &bp->link_vars, 2119 bnx2x_set_led(&bp->link_params, &bp->link_vars,
2120 LED_MODE_OFF, 0); 2120 LED_MODE_FRONT_PANEL_OFF, 0);
2121 2121
2122 msleep_interruptible(500); 2122 msleep_interruptible(500);
2123 if (signal_pending(current)) 2123 if (signal_pending(current))
2124 break; 2124 break;
2125 } 2125 }
2126 2126
2127 if (bp->link_vars.link_up) 2127 bnx2x_set_led(&bp->link_params, &bp->link_vars,
2128 bnx2x_set_led(&bp->link_params, &bp->link_vars, LED_MODE_OPER, 2128 LED_MODE_OPER, bp->link_vars.line_speed);
2129 bp->link_vars.line_speed);
2130 2129
2131 return 0; 2130 return 0;
2132} 2131}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 9bc5de3e04a8..ba715826e2a8 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -176,7 +176,7 @@ static int tlb_initialize(struct bonding *bond)
176 bond_info->tx_hashtbl = new_hashtbl; 176 bond_info->tx_hashtbl = new_hashtbl;
177 177
178 for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) { 178 for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) {
179 tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1); 179 tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
180 } 180 }
181 181
182 _unlock_tx_hashtbl(bond); 182 _unlock_tx_hashtbl(bond);
@@ -701,7 +701,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
701 */ 701 */
702 rlb_choose_channel(skb, bond); 702 rlb_choose_channel(skb, bond);
703 703
704 /* The ARP relpy packets must be delayed so that 704 /* The ARP reply packets must be delayed so that
705 * they can cancel out the influence of the ARP request. 705 * they can cancel out the influence of the ARP request.
706 */ 706 */
707 bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY; 707 bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY;
@@ -1042,7 +1042,7 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
1042 * 1042 *
1043 * If the permanent hw address of @slave is @bond's hw address, we need to 1043 * If the permanent hw address of @slave is @bond's hw address, we need to
1044 * find a different hw address to give @slave, that isn't in use by any other 1044 * find a different hw address to give @slave, that isn't in use by any other
1045 * slave in the bond. This address must be, of course, one of the premanent 1045 * slave in the bond. This address must be, of course, one of the permanent
1046 * addresses of the other slaves. 1046 * addresses of the other slaves.
1047 * 1047 *
1048 * We go over the slave list, and for each slave there we compare its 1048 * We go over the slave list, and for each slave there we compare its
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index 86861f08b24d..8ca7158b2dda 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -75,7 +75,7 @@ struct tlb_client_info {
75 * gave this entry index. 75 * gave this entry index.
76 */ 76 */
77 u32 tx_bytes; /* Each Client accumulates the BytesTx that 77 u32 tx_bytes; /* Each Client accumulates the BytesTx that
78 * were tranmitted to it, and after each 78 * were transmitted to it, and after each
79 * CallBack the LoadHistory is divided 79 * CallBack the LoadHistory is divided
80 * by the balance interval 80 * by the balance interval
81 */ 81 */
@@ -122,7 +122,6 @@ struct tlb_slave_info {
122}; 122};
123 123
124struct alb_bond_info { 124struct alb_bond_info {
125 struct timer_list alb_timer;
126 struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */ 125 struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */
127 spinlock_t tx_hashtbl_lock; 126 spinlock_t tx_hashtbl_lock;
128 u32 unbalanced_load; 127 u32 unbalanced_load;
@@ -140,7 +139,6 @@ struct alb_bond_info {
140 struct slave *next_rx_slave;/* next slave to be assigned 139 struct slave *next_rx_slave;/* next slave to be assigned
141 * to a new rx client for 140 * to a new rx client for
142 */ 141 */
143 u32 rlb_interval_counter;
144 u8 primary_is_promisc; /* boolean */ 142 u8 primary_is_promisc; /* boolean */
145 u32 rlb_promisc_timeout_counter;/* counts primary 143 u32 rlb_promisc_timeout_counter;/* counts primary
146 * promiscuity time 144 * promiscuity time
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index c0a1bc5b1435..bd1d811c204f 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -260,7 +260,7 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
260 260
261 if (!ofdev->dev.of_match) 261 if (!ofdev->dev.of_match)
262 return -EINVAL; 262 return -EINVAL;
263 data = (struct mpc5xxx_can_data *)of_dev->dev.of_match->data; 263 data = (struct mpc5xxx_can_data *)ofdev->dev.of_match->data;
264 264
265 base = of_iomap(np, 0); 265 base = of_iomap(np, 0);
266 if (!base) { 266 if (!base) {
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index ea0dc451da9c..d70fb76edb77 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -173,7 +173,8 @@ static void loopback_setup(struct net_device *dev)
173 | NETIF_F_RXCSUM 173 | NETIF_F_RXCSUM
174 | NETIF_F_HIGHDMA 174 | NETIF_F_HIGHDMA
175 | NETIF_F_LLTX 175 | NETIF_F_LLTX
176 | NETIF_F_NETNS_LOCAL; 176 | NETIF_F_NETNS_LOCAL
177 | NETIF_F_VLAN_CHALLENGED;
177 dev->ethtool_ops = &loopback_ethtool_ops; 178 dev->ethtool_ops = &loopback_ethtool_ops;
178 dev->header_ops = &eth_header_ops; 179 dev->header_ops = &eth_header_ops;
179 dev->netdev_ops = &loopback_ops; 180 dev->netdev_ops = &loopback_ops;
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index aa2813e06d00..1074231f0a0d 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -860,6 +860,9 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
860 prev_eedata = eedata; 860 prev_eedata = eedata;
861 } 861 }
862 862
863 /* Store MAC Address in perm_addr */
864 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
865
863 dev->base_addr = (unsigned long __force) ioaddr; 866 dev->base_addr = (unsigned long __force) ioaddr;
864 dev->irq = irq; 867 dev->irq = irq;
865 868
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index d7299f1a4940..679dc8519c5b 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -174,7 +174,7 @@
174 174
175#define MAX_NUM_CARDS 4 175#define MAX_NUM_CARDS 4
176 176
177#define MAX_BUFFERS_PER_CMD 32 177#define NETXEN_MAX_FRAGS_PER_TX 14
178#define MAX_TSO_HEADER_DESC 2 178#define MAX_TSO_HEADER_DESC 2
179#define MGMT_CMD_DESC_RESV 4 179#define MGMT_CMD_DESC_RESV 4
180#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ 180#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
@@ -558,7 +558,7 @@ struct netxen_recv_crb {
558 */ 558 */
559struct netxen_cmd_buffer { 559struct netxen_cmd_buffer {
560 struct sk_buff *skb; 560 struct sk_buff *skb;
561 struct netxen_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1]; 561 struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1];
562 u32 frag_count; 562 u32 frag_count;
563}; 563};
564 564
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 83348dc4b184..e8a4b6655999 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1844,6 +1844,8 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1844 struct cmd_desc_type0 *hwdesc, *first_desc; 1844 struct cmd_desc_type0 *hwdesc, *first_desc;
1845 struct pci_dev *pdev; 1845 struct pci_dev *pdev;
1846 int i, k; 1846 int i, k;
1847 int delta = 0;
1848 struct skb_frag_struct *frag;
1847 1849
1848 u32 producer; 1850 u32 producer;
1849 int frag_count, no_of_desc; 1851 int frag_count, no_of_desc;
@@ -1851,6 +1853,21 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1851 1853
1852 frag_count = skb_shinfo(skb)->nr_frags + 1; 1854 frag_count = skb_shinfo(skb)->nr_frags + 1;
1853 1855
1856 /* 14 frags supported for normal packet and
1857 * 32 frags supported for TSO packet
1858 */
1859 if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) {
1860
1861 for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) {
1862 frag = &skb_shinfo(skb)->frags[i];
1863 delta += frag->size;
1864 }
1865
1866 if (!__pskb_pull_tail(skb, delta))
1867 goto drop_packet;
1868
1869 frag_count = 1 + skb_shinfo(skb)->nr_frags;
1870 }
1854 /* 4 fragments per cmd des */ 1871 /* 4 fragments per cmd des */
1855 no_of_desc = (frag_count + 3) >> 2; 1872 no_of_desc = (frag_count + 3) >> 2;
1856 1873
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index dc44564ef6f9..b0dead00b2d1 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -99,6 +99,7 @@
99#define TX_UDPV6_PKT 0x0c 99#define TX_UDPV6_PKT 0x0c
100 100
101/* Tx defines */ 101/* Tx defines */
102#define QLCNIC_MAX_FRAGS_PER_TX 14
102#define MAX_TSO_HEADER_DESC 2 103#define MAX_TSO_HEADER_DESC 2
103#define MGMT_CMD_DESC_RESV 4 104#define MGMT_CMD_DESC_RESV 4
104#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \ 105#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index cd88c7e1bfa9..cb1a1ef36c0a 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -2099,6 +2099,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2099 struct cmd_desc_type0 *hwdesc, *first_desc; 2099 struct cmd_desc_type0 *hwdesc, *first_desc;
2100 struct pci_dev *pdev; 2100 struct pci_dev *pdev;
2101 struct ethhdr *phdr; 2101 struct ethhdr *phdr;
2102 int delta = 0;
2102 int i, k; 2103 int i, k;
2103 2104
2104 u32 producer; 2105 u32 producer;
@@ -2118,6 +2119,19 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2118 } 2119 }
2119 2120
2120 frag_count = skb_shinfo(skb)->nr_frags + 1; 2121 frag_count = skb_shinfo(skb)->nr_frags + 1;
2122 /* 14 frags supported for normal packet and
2123 * 32 frags supported for TSO packet
2124 */
2125 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
2126
2127 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
2128 delta += skb_shinfo(skb)->frags[i].size;
2129
2130 if (!__pskb_pull_tail(skb, delta))
2131 goto drop_packet;
2132
2133 frag_count = 1 + skb_shinfo(skb)->nr_frags;
2134 }
2121 2135
2122 /* 4 fragments per cmd des */ 2136 /* 4 fragments per cmd des */
2123 no_of_desc = (frag_count + 3) >> 2; 2137 no_of_desc = (frag_count + 3) >> 2;
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index d890679e4c4d..a3c2aab53de8 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -328,7 +328,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
328 * processing to finish, then directly poll (and ack ) the eventq. 328 * processing to finish, then directly poll (and ack ) the eventq.
329 * Finally reenable NAPI and interrupts. 329 * Finally reenable NAPI and interrupts.
330 * 330 *
331 * Since we are touching interrupts the caller should hold the suspend lock 331 * This is for use only during a loopback self-test. It must not
332 * deliver any packets up the stack as this can result in deadlock.
332 */ 333 */
333void efx_process_channel_now(struct efx_channel *channel) 334void efx_process_channel_now(struct efx_channel *channel)
334{ 335{
@@ -336,6 +337,7 @@ void efx_process_channel_now(struct efx_channel *channel)
336 337
337 BUG_ON(channel->channel >= efx->n_channels); 338 BUG_ON(channel->channel >= efx->n_channels);
338 BUG_ON(!channel->enabled); 339 BUG_ON(!channel->enabled);
340 BUG_ON(!efx->loopback_selftest);
339 341
340 /* Disable interrupts and wait for ISRs to complete */ 342 /* Disable interrupts and wait for ISRs to complete */
341 efx_nic_disable_interrupts(efx); 343 efx_nic_disable_interrupts(efx);
@@ -1436,7 +1438,7 @@ static void efx_start_all(struct efx_nic *efx)
1436 * restart the transmit interface early so the watchdog timer stops */ 1438 * restart the transmit interface early so the watchdog timer stops */
1437 efx_start_port(efx); 1439 efx_start_port(efx);
1438 1440
1439 if (efx_dev_registered(efx)) 1441 if (efx_dev_registered(efx) && !efx->port_inhibited)
1440 netif_tx_wake_all_queues(efx->net_dev); 1442 netif_tx_wake_all_queues(efx->net_dev);
1441 1443
1442 efx_for_each_channel(channel, efx) 1444 efx_for_each_channel(channel, efx)
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index d9d8c2ef1074..cc978803d484 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -152,6 +152,7 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
152 152
153 spin_lock_irqsave(&efx->biu_lock, flags); 153 spin_lock_irqsave(&efx->biu_lock, flags);
154 value->u32[0] = _efx_readd(efx, reg + 0); 154 value->u32[0] = _efx_readd(efx, reg + 0);
155 rmb();
155 value->u32[1] = _efx_readd(efx, reg + 4); 156 value->u32[1] = _efx_readd(efx, reg + 4);
156 value->u32[2] = _efx_readd(efx, reg + 8); 157 value->u32[2] = _efx_readd(efx, reg + 8);
157 value->u32[3] = _efx_readd(efx, reg + 12); 158 value->u32[3] = _efx_readd(efx, reg + 12);
@@ -174,6 +175,7 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
174 value->u64[0] = (__force __le64)__raw_readq(membase + addr); 175 value->u64[0] = (__force __le64)__raw_readq(membase + addr);
175#else 176#else
176 value->u32[0] = (__force __le32)__raw_readl(membase + addr); 177 value->u32[0] = (__force __le32)__raw_readl(membase + addr);
178 rmb();
177 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); 179 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
178#endif 180#endif
179 spin_unlock_irqrestore(&efx->biu_lock, flags); 181 spin_unlock_irqrestore(&efx->biu_lock, flags);
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 9ffa9a6b55a0..191a311da2dc 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -330,7 +330,6 @@ enum efx_rx_alloc_method {
330 * @eventq_mask: Event queue pointer mask 330 * @eventq_mask: Event queue pointer mask
331 * @eventq_read_ptr: Event queue read pointer 331 * @eventq_read_ptr: Event queue read pointer
332 * @last_eventq_read_ptr: Last event queue read pointer value. 332 * @last_eventq_read_ptr: Last event queue read pointer value.
333 * @magic_count: Event queue test event count
334 * @irq_count: Number of IRQs since last adaptive moderation decision 333 * @irq_count: Number of IRQs since last adaptive moderation decision
335 * @irq_mod_score: IRQ moderation score 334 * @irq_mod_score: IRQ moderation score
336 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors 335 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
@@ -360,7 +359,6 @@ struct efx_channel {
360 unsigned int eventq_mask; 359 unsigned int eventq_mask;
361 unsigned int eventq_read_ptr; 360 unsigned int eventq_read_ptr;
362 unsigned int last_eventq_read_ptr; 361 unsigned int last_eventq_read_ptr;
363 unsigned int magic_count;
364 362
365 unsigned int irq_count; 363 unsigned int irq_count;
366 unsigned int irq_mod_score; 364 unsigned int irq_mod_score;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index e8396614daf3..10f1cb79c147 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -84,7 +84,8 @@ static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
84static inline efx_qword_t *efx_event(struct efx_channel *channel, 84static inline efx_qword_t *efx_event(struct efx_channel *channel,
85 unsigned int index) 85 unsigned int index)
86{ 86{
87 return ((efx_qword_t *) (channel->eventq.addr)) + index; 87 return ((efx_qword_t *) (channel->eventq.addr)) +
88 (index & channel->eventq_mask);
88} 89}
89 90
90/* See if an event is present 91/* See if an event is present
@@ -673,7 +674,8 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel)
673 efx_dword_t reg; 674 efx_dword_t reg;
674 struct efx_nic *efx = channel->efx; 675 struct efx_nic *efx = channel->efx;
675 676
676 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr); 677 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
678 channel->eventq_read_ptr & channel->eventq_mask);
677 efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base, 679 efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
678 channel->channel); 680 channel->channel);
679} 681}
@@ -908,7 +910,7 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
908 910
909 code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 911 code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
910 if (code == EFX_CHANNEL_MAGIC_TEST(channel)) 912 if (code == EFX_CHANNEL_MAGIC_TEST(channel))
911 ++channel->magic_count; 913 ; /* ignore */
912 else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) 914 else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
913 /* The queue must be empty, so we won't receive any rx 915 /* The queue must be empty, so we won't receive any rx
914 * events, so efx_process_channel() won't refill the 916 * events, so efx_process_channel() won't refill the
@@ -1015,8 +1017,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1015 /* Clear this event by marking it all ones */ 1017 /* Clear this event by marking it all ones */
1016 EFX_SET_QWORD(*p_event); 1018 EFX_SET_QWORD(*p_event);
1017 1019
1018 /* Increment read pointer */ 1020 ++read_ptr;
1019 read_ptr = (read_ptr + 1) & channel->eventq_mask;
1020 1021
1021 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1022 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1022 1023
@@ -1060,6 +1061,13 @@ out:
1060 return spent; 1061 return spent;
1061} 1062}
1062 1063
1064/* Check whether an event is present in the eventq at the current
1065 * read pointer. Only useful for self-test.
1066 */
1067bool efx_nic_event_present(struct efx_channel *channel)
1068{
1069 return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
1070}
1063 1071
1064/* Allocate buffer table entries for event queue */ 1072/* Allocate buffer table entries for event queue */
1065int efx_nic_probe_eventq(struct efx_channel *channel) 1073int efx_nic_probe_eventq(struct efx_channel *channel)
@@ -1165,7 +1173,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1165 struct efx_tx_queue *tx_queue; 1173 struct efx_tx_queue *tx_queue;
1166 struct efx_rx_queue *rx_queue; 1174 struct efx_rx_queue *rx_queue;
1167 unsigned int read_ptr = channel->eventq_read_ptr; 1175 unsigned int read_ptr = channel->eventq_read_ptr;
1168 unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask; 1176 unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
1169 1177
1170 do { 1178 do {
1171 efx_qword_t *event = efx_event(channel, read_ptr); 1179 efx_qword_t *event = efx_event(channel, read_ptr);
@@ -1205,7 +1213,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1205 * it's ok to throw away every non-flush event */ 1213 * it's ok to throw away every non-flush event */
1206 EFX_SET_QWORD(*event); 1214 EFX_SET_QWORD(*event);
1207 1215
1208 read_ptr = (read_ptr + 1) & channel->eventq_mask; 1216 ++read_ptr;
1209 } while (read_ptr != end_ptr); 1217 } while (read_ptr != end_ptr);
1210 1218
1211 channel->eventq_read_ptr = read_ptr; 1219 channel->eventq_read_ptr = read_ptr;
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index d9de1b647d41..a42db6e35be3 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -184,6 +184,7 @@ extern void efx_nic_fini_eventq(struct efx_channel *channel);
184extern void efx_nic_remove_eventq(struct efx_channel *channel); 184extern void efx_nic_remove_eventq(struct efx_channel *channel);
185extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota); 185extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
186extern void efx_nic_eventq_read_ack(struct efx_channel *channel); 186extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
187extern bool efx_nic_event_present(struct efx_channel *channel);
187 188
188/* MAC/PHY */ 189/* MAC/PHY */
189extern void falcon_drain_tx_fifo(struct efx_nic *efx); 190extern void falcon_drain_tx_fifo(struct efx_nic *efx);
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index a0f49b348d62..50ad3bcaf68a 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -131,8 +131,6 @@ static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
131static int efx_test_interrupts(struct efx_nic *efx, 131static int efx_test_interrupts(struct efx_nic *efx,
132 struct efx_self_tests *tests) 132 struct efx_self_tests *tests)
133{ 133{
134 struct efx_channel *channel;
135
136 netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); 134 netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
137 tests->interrupt = -1; 135 tests->interrupt = -1;
138 136
@@ -140,15 +138,6 @@ static int efx_test_interrupts(struct efx_nic *efx,
140 efx->last_irq_cpu = -1; 138 efx->last_irq_cpu = -1;
141 smp_wmb(); 139 smp_wmb();
142 140
143 /* ACK each interrupting event queue. Receiving an interrupt due to
144 * traffic before a test event is raised is considered a pass */
145 efx_for_each_channel(channel, efx) {
146 if (channel->work_pending)
147 efx_process_channel_now(channel);
148 if (efx->last_irq_cpu >= 0)
149 goto success;
150 }
151
152 efx_nic_generate_interrupt(efx); 141 efx_nic_generate_interrupt(efx);
153 142
154 /* Wait for arrival of test interrupt. */ 143 /* Wait for arrival of test interrupt. */
@@ -173,13 +162,13 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
173 struct efx_self_tests *tests) 162 struct efx_self_tests *tests)
174{ 163{
175 struct efx_nic *efx = channel->efx; 164 struct efx_nic *efx = channel->efx;
176 unsigned int magic_count, count; 165 unsigned int read_ptr, count;
177 166
178 tests->eventq_dma[channel->channel] = -1; 167 tests->eventq_dma[channel->channel] = -1;
179 tests->eventq_int[channel->channel] = -1; 168 tests->eventq_int[channel->channel] = -1;
180 tests->eventq_poll[channel->channel] = -1; 169 tests->eventq_poll[channel->channel] = -1;
181 170
182 magic_count = channel->magic_count; 171 read_ptr = channel->eventq_read_ptr;
183 channel->efx->last_irq_cpu = -1; 172 channel->efx->last_irq_cpu = -1;
184 smp_wmb(); 173 smp_wmb();
185 174
@@ -190,10 +179,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
190 do { 179 do {
191 schedule_timeout_uninterruptible(HZ / 100); 180 schedule_timeout_uninterruptible(HZ / 100);
192 181
193 if (channel->work_pending) 182 if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr)
194 efx_process_channel_now(channel);
195
196 if (channel->magic_count != magic_count)
197 goto eventq_ok; 183 goto eventq_ok;
198 } while (++count < 2); 184 } while (++count < 2);
199 185
@@ -211,8 +197,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
211 } 197 }
212 198
213 /* Check to see if event was received even if interrupt wasn't */ 199 /* Check to see if event was received even if interrupt wasn't */
214 efx_process_channel_now(channel); 200 if (efx_nic_event_present(channel)) {
215 if (channel->magic_count != magic_count) {
216 netif_err(efx, drv, efx->net_dev, 201 netif_err(efx, drv, efx->net_dev,
217 "channel %d event was generated, but " 202 "channel %d event was generated, but "
218 "failed to trigger an interrupt\n", channel->channel); 203 "failed to trigger an interrupt\n", channel->channel);
@@ -770,6 +755,8 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
770 __efx_reconfigure_port(efx); 755 __efx_reconfigure_port(efx);
771 mutex_unlock(&efx->mac_lock); 756 mutex_unlock(&efx->mac_lock);
772 757
758 netif_tx_wake_all_queues(efx->net_dev);
759
773 return rc_test; 760 return rc_test;
774} 761}
775 762
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 139801908217..d2c85dfdf3bf 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -435,7 +435,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
435 * queue state. */ 435 * queue state. */
436 smp_mb(); 436 smp_mb();
437 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && 437 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
438 likely(efx->port_enabled)) { 438 likely(efx->port_enabled) &&
439 likely(!efx->port_inhibited)) {
439 fill_level = tx_queue->insert_count - tx_queue->read_count; 440 fill_level = tx_queue->insert_count - tx_queue->read_count;
440 if (fill_level < EFX_TXQ_THRESHOLD(efx)) { 441 if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
441 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); 442 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index cb317cd069ff..484f795a779d 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -240,7 +240,8 @@ static const struct ethtool_ops sis900_ethtool_ops;
240 * @net_dev: the net device to get address for 240 * @net_dev: the net device to get address for
241 * 241 *
242 * Older SiS900 and friends, use EEPROM to store MAC address. 242 * Older SiS900 and friends, use EEPROM to store MAC address.
243 * MAC address is read from read_eeprom() into @net_dev->dev_addr. 243 * MAC address is read from read_eeprom() into @net_dev->dev_addr and
244 * @net_dev->perm_addr.
244 */ 245 */
245 246
246static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev) 247static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev)
@@ -261,6 +262,9 @@ static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_de
261 for (i = 0; i < 3; i++) 262 for (i = 0; i < 3; i++)
262 ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr); 263 ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
263 264
265 /* Store MAC Address in perm_addr */
266 memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
267
264 return 1; 268 return 1;
265} 269}
266 270
@@ -271,7 +275,8 @@ static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_de
271 * 275 *
272 * SiS630E model, use APC CMOS RAM to store MAC address. 276 * SiS630E model, use APC CMOS RAM to store MAC address.
273 * APC CMOS RAM is accessed through ISA bridge. 277 * APC CMOS RAM is accessed through ISA bridge.
274 * MAC address is read into @net_dev->dev_addr. 278 * MAC address is read into @net_dev->dev_addr and
279 * @net_dev->perm_addr.
275 */ 280 */
276 281
277static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev, 282static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
@@ -296,6 +301,10 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
296 outb(0x09 + i, 0x70); 301 outb(0x09 + i, 0x70);
297 ((u8 *)(net_dev->dev_addr))[i] = inb(0x71); 302 ((u8 *)(net_dev->dev_addr))[i] = inb(0x71);
298 } 303 }
304
305 /* Store MAC Address in perm_addr */
306 memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
307
299 pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40); 308 pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40);
300 pci_dev_put(isa_bridge); 309 pci_dev_put(isa_bridge);
301 310
@@ -310,7 +319,7 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
310 * 319 *
311 * SiS635 model, set MAC Reload Bit to load Mac address from APC 320 * SiS635 model, set MAC Reload Bit to load Mac address from APC
312 * to rfdr. rfdr is accessed through rfcr. MAC address is read into 321 * to rfdr. rfdr is accessed through rfcr. MAC address is read into
313 * @net_dev->dev_addr. 322 * @net_dev->dev_addr and @net_dev->perm_addr.
314 */ 323 */
315 324
316static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev, 325static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
@@ -334,6 +343,9 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
334 *( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr); 343 *( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr);
335 } 344 }
336 345
346 /* Store MAC Address in perm_addr */
347 memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
348
337 /* enable packet filtering */ 349 /* enable packet filtering */
338 outl(rfcrSave | RFEN, rfcr + ioaddr); 350 outl(rfcrSave | RFEN, rfcr + ioaddr);
339 351
@@ -353,7 +365,7 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
353 * EEDONE signal to refuse EEPROM access by LAN. 365 * EEDONE signal to refuse EEPROM access by LAN.
354 * The EEPROM map of SiS962 or SiS963 is different to SiS900. 366 * The EEPROM map of SiS962 or SiS963 is different to SiS900.
355 * The signature field in SiS962 or SiS963 spec is meaningless. 367 * The signature field in SiS962 or SiS963 spec is meaningless.
356 * MAC address is read into @net_dev->dev_addr. 368 * MAC address is read into @net_dev->dev_addr and @net_dev->perm_addr.
357 */ 369 */
358 370
359static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev, 371static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
@@ -372,6 +384,9 @@ static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
372 for (i = 0; i < 3; i++) 384 for (i = 0; i < 3; i++)
373 ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr); 385 ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
374 386
387 /* Store MAC Address in perm_addr */
388 memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
389
375 outl(EEDONE, ee_addr); 390 outl(EEDONE, ee_addr);
376 return 1; 391 return 1;
377 } else { 392 } else {
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
index d65fab1ba790..e25093510b0c 100644
--- a/drivers/net/stmmac/dwmac_lib.c
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -26,9 +26,9 @@
26 26
27#undef DWMAC_DMA_DEBUG 27#undef DWMAC_DMA_DEBUG
28#ifdef DWMAC_DMA_DEBUG 28#ifdef DWMAC_DMA_DEBUG
29#define DBG(fmt, args...) printk(fmt, ## args) 29#define DWMAC_LIB_DBG(fmt, args...) printk(fmt, ## args)
30#else 30#else
31#define DBG(fmt, args...) do { } while (0) 31#define DWMAC_LIB_DBG(fmt, args...) do { } while (0)
32#endif 32#endif
33 33
34/* CSR1 enables the transmit DMA to check for new descriptor */ 34/* CSR1 enables the transmit DMA to check for new descriptor */
@@ -152,7 +152,7 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
152 /* read the status register (CSR5) */ 152 /* read the status register (CSR5) */
153 u32 intr_status = readl(ioaddr + DMA_STATUS); 153 u32 intr_status = readl(ioaddr + DMA_STATUS);
154 154
155 DBG(INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status); 155 DWMAC_LIB_DBG(KERN_INFO "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
156#ifdef DWMAC_DMA_DEBUG 156#ifdef DWMAC_DMA_DEBUG
157 /* It displays the DMA process states (CSR5 register) */ 157 /* It displays the DMA process states (CSR5 register) */
158 show_tx_process_state(intr_status); 158 show_tx_process_state(intr_status);
@@ -160,43 +160,43 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
160#endif 160#endif
161 /* ABNORMAL interrupts */ 161 /* ABNORMAL interrupts */
162 if (unlikely(intr_status & DMA_STATUS_AIS)) { 162 if (unlikely(intr_status & DMA_STATUS_AIS)) {
163 DBG(INFO, "CSR5[15] DMA ABNORMAL IRQ: "); 163 DWMAC_LIB_DBG(KERN_INFO "CSR5[15] DMA ABNORMAL IRQ: ");
164 if (unlikely(intr_status & DMA_STATUS_UNF)) { 164 if (unlikely(intr_status & DMA_STATUS_UNF)) {
165 DBG(INFO, "transmit underflow\n"); 165 DWMAC_LIB_DBG(KERN_INFO "transmit underflow\n");
166 ret = tx_hard_error_bump_tc; 166 ret = tx_hard_error_bump_tc;
167 x->tx_undeflow_irq++; 167 x->tx_undeflow_irq++;
168 } 168 }
169 if (unlikely(intr_status & DMA_STATUS_TJT)) { 169 if (unlikely(intr_status & DMA_STATUS_TJT)) {
170 DBG(INFO, "transmit jabber\n"); 170 DWMAC_LIB_DBG(KERN_INFO "transmit jabber\n");
171 x->tx_jabber_irq++; 171 x->tx_jabber_irq++;
172 } 172 }
173 if (unlikely(intr_status & DMA_STATUS_OVF)) { 173 if (unlikely(intr_status & DMA_STATUS_OVF)) {
174 DBG(INFO, "recv overflow\n"); 174 DWMAC_LIB_DBG(KERN_INFO "recv overflow\n");
175 x->rx_overflow_irq++; 175 x->rx_overflow_irq++;
176 } 176 }
177 if (unlikely(intr_status & DMA_STATUS_RU)) { 177 if (unlikely(intr_status & DMA_STATUS_RU)) {
178 DBG(INFO, "receive buffer unavailable\n"); 178 DWMAC_LIB_DBG(KERN_INFO "receive buffer unavailable\n");
179 x->rx_buf_unav_irq++; 179 x->rx_buf_unav_irq++;
180 } 180 }
181 if (unlikely(intr_status & DMA_STATUS_RPS)) { 181 if (unlikely(intr_status & DMA_STATUS_RPS)) {
182 DBG(INFO, "receive process stopped\n"); 182 DWMAC_LIB_DBG(KERN_INFO "receive process stopped\n");
183 x->rx_process_stopped_irq++; 183 x->rx_process_stopped_irq++;
184 } 184 }
185 if (unlikely(intr_status & DMA_STATUS_RWT)) { 185 if (unlikely(intr_status & DMA_STATUS_RWT)) {
186 DBG(INFO, "receive watchdog\n"); 186 DWMAC_LIB_DBG(KERN_INFO "receive watchdog\n");
187 x->rx_watchdog_irq++; 187 x->rx_watchdog_irq++;
188 } 188 }
189 if (unlikely(intr_status & DMA_STATUS_ETI)) { 189 if (unlikely(intr_status & DMA_STATUS_ETI)) {
190 DBG(INFO, "transmit early interrupt\n"); 190 DWMAC_LIB_DBG(KERN_INFO "transmit early interrupt\n");
191 x->tx_early_irq++; 191 x->tx_early_irq++;
192 } 192 }
193 if (unlikely(intr_status & DMA_STATUS_TPS)) { 193 if (unlikely(intr_status & DMA_STATUS_TPS)) {
194 DBG(INFO, "transmit process stopped\n"); 194 DWMAC_LIB_DBG(KERN_INFO "transmit process stopped\n");
195 x->tx_process_stopped_irq++; 195 x->tx_process_stopped_irq++;
196 ret = tx_hard_error; 196 ret = tx_hard_error;
197 } 197 }
198 if (unlikely(intr_status & DMA_STATUS_FBI)) { 198 if (unlikely(intr_status & DMA_STATUS_FBI)) {
199 DBG(INFO, "fatal bus error\n"); 199 DWMAC_LIB_DBG(KERN_INFO "fatal bus error\n");
200 x->fatal_bus_error_irq++; 200 x->fatal_bus_error_irq++;
201 ret = tx_hard_error; 201 ret = tx_hard_error;
202 } 202 }
@@ -215,7 +215,7 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
215 /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */ 215 /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
216 writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS); 216 writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
217 217
218 DBG(INFO, "\n\n"); 218 DWMAC_LIB_DBG(KERN_INFO "\n\n");
219 return ret; 219 return ret;
220} 220}
221 221
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 0e5f03135b50..cc973fc38405 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -750,7 +750,6 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
750 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE); 750 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
751 priv->xstats.threshold = tc; 751 priv->xstats.threshold = tc;
752 } 752 }
753 stmmac_tx_err(priv);
754 } else if (unlikely(status == tx_hard_error)) 753 } else if (unlikely(status == tx_hard_error))
755 stmmac_tx_err(priv); 754 stmmac_tx_err(priv);
756} 755}
@@ -781,21 +780,6 @@ static int stmmac_open(struct net_device *dev)
781 780
782 stmmac_verify_args(); 781 stmmac_verify_args();
783 782
784 ret = stmmac_init_phy(dev);
785 if (unlikely(ret)) {
786 pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
787 return ret;
788 }
789
790 /* Request the IRQ lines */
791 ret = request_irq(dev->irq, stmmac_interrupt,
792 IRQF_SHARED, dev->name, dev);
793 if (unlikely(ret < 0)) {
794 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
795 __func__, dev->irq, ret);
796 return ret;
797 }
798
799#ifdef CONFIG_STMMAC_TIMER 783#ifdef CONFIG_STMMAC_TIMER
800 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); 784 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
801 if (unlikely(priv->tm == NULL)) { 785 if (unlikely(priv->tm == NULL)) {
@@ -814,6 +798,11 @@ static int stmmac_open(struct net_device *dev)
814 } else 798 } else
815 priv->tm->enable = 1; 799 priv->tm->enable = 1;
816#endif 800#endif
801 ret = stmmac_init_phy(dev);
802 if (unlikely(ret)) {
803 pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
804 goto open_error;
805 }
817 806
818 /* Create and initialize the TX/RX descriptors chains. */ 807 /* Create and initialize the TX/RX descriptors chains. */
819 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); 808 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
@@ -822,12 +811,11 @@ static int stmmac_open(struct net_device *dev)
822 init_dma_desc_rings(dev); 811 init_dma_desc_rings(dev);
823 812
824 /* DMA initialization and SW reset */ 813 /* DMA initialization and SW reset */
825 if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->plat->pbl, 814 ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
826 priv->dma_tx_phy, 815 priv->dma_tx_phy, priv->dma_rx_phy);
827 priv->dma_rx_phy) < 0)) { 816 if (ret < 0) {
828
829 pr_err("%s: DMA initialization failed\n", __func__); 817 pr_err("%s: DMA initialization failed\n", __func__);
830 return -1; 818 goto open_error;
831 } 819 }
832 820
833 /* Copy the MAC addr into the HW */ 821 /* Copy the MAC addr into the HW */
@@ -848,6 +836,15 @@ static int stmmac_open(struct net_device *dev)
848 writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK); 836 writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
849 writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK); 837 writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
850 838
839 /* Request the IRQ lines */
840 ret = request_irq(dev->irq, stmmac_interrupt,
841 IRQF_SHARED, dev->name, dev);
842 if (unlikely(ret < 0)) {
843 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
844 __func__, dev->irq, ret);
845 goto open_error;
846 }
847
851 /* Enable the MAC Rx/Tx */ 848 /* Enable the MAC Rx/Tx */
852 stmmac_enable_mac(priv->ioaddr); 849 stmmac_enable_mac(priv->ioaddr);
853 850
@@ -878,7 +875,17 @@ static int stmmac_open(struct net_device *dev)
878 napi_enable(&priv->napi); 875 napi_enable(&priv->napi);
879 skb_queue_head_init(&priv->rx_recycle); 876 skb_queue_head_init(&priv->rx_recycle);
880 netif_start_queue(dev); 877 netif_start_queue(dev);
878
881 return 0; 879 return 0;
880
881open_error:
882#ifdef CONFIG_STMMAC_TIMER
883 kfree(priv->tm);
884#endif
885 if (priv->phydev)
886 phy_disconnect(priv->phydev);
887
888 return ret;
882} 889}
883 890
884/** 891/**
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 8a3b191b195b..ff32befd8443 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -1251,7 +1251,7 @@ static netdev_tx_t xl_xmit(struct sk_buff *skb, struct net_device *dev)
1251/* 1251/*
1252 * The NIC has told us that a packet has been downloaded onto the card, we must 1252 * The NIC has told us that a packet has been downloaded onto the card, we must
1253 * find out which packet it has done, clear the skb and information for the packet 1253 * find out which packet it has done, clear the skb and information for the packet
1254 * then advance around the ring for all tranmitted packets 1254 * then advance around the ring for all transmitted packets
1255 */ 1255 */
1256 1256
1257static void xl_dn_comp(struct net_device *dev) 1257static void xl_dn_comp(struct net_device *dev)
@@ -1568,7 +1568,7 @@ static void xl_arb_cmd(struct net_device *dev)
1568 if (lan_status_diff & LSC_SOFT_ERR) 1568 if (lan_status_diff & LSC_SOFT_ERR)
1569 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name); 1569 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
1570 if (lan_status_diff & LSC_TRAN_BCN) 1570 if (lan_status_diff & LSC_TRAN_BCN)
1571 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name); 1571 printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
1572 if (lan_status_diff & LSC_SS) 1572 if (lan_status_diff & LSC_SS)
1573 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); 1573 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
1574 if (lan_status_diff & LSC_RING_REC) 1574 if (lan_status_diff & LSC_RING_REC)
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 5bd140704533..9354ca9da576 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -1675,7 +1675,7 @@ drop_frame:
1675 if (lan_status_diff & LSC_SOFT_ERR) 1675 if (lan_status_diff & LSC_SOFT_ERR)
1676 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name); 1676 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name);
1677 if (lan_status_diff & LSC_TRAN_BCN) 1677 if (lan_status_diff & LSC_TRAN_BCN)
1678 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n", dev->name); 1678 printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n", dev->name);
1679 if (lan_status_diff & LSC_SS) 1679 if (lan_status_diff & LSC_SS)
1680 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); 1680 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
1681 if (lan_status_diff & LSC_RING_REC) 1681 if (lan_status_diff & LSC_RING_REC)
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index 3d2fbe60b46e..2684003b8ab6 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -1500,7 +1500,7 @@ drop_frame:
1500 if (lan_status_diff & LSC_SOFT_ERR) 1500 if (lan_status_diff & LSC_SOFT_ERR)
1501 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name); 1501 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
1502 if (lan_status_diff & LSC_TRAN_BCN) 1502 if (lan_status_diff & LSC_TRAN_BCN)
1503 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name); 1503 printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
1504 if (lan_status_diff & LSC_SS) 1504 if (lan_status_diff & LSC_SS)
1505 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name); 1505 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
1506 if (lan_status_diff & LSC_RING_REC) 1506 if (lan_status_diff & LSC_RING_REC)
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index f1b8af64569c..2d10239ce829 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -1040,7 +1040,7 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
1040 } 1040 }
1041 1041
1042 ret = ath9k_htc_hw_init(hif_dev->htc_handle, 1042 ret = ath9k_htc_hw_init(hif_dev->htc_handle,
1043 &hif_dev->udev->dev, hif_dev->device_id, 1043 &interface->dev, hif_dev->device_id,
1044 hif_dev->udev->product, id->driver_info); 1044 hif_dev->udev->product, id->driver_info);
1045 if (ret) { 1045 if (ret) {
1046 ret = -EINVAL; 1046 ret = -EINVAL;
@@ -1158,7 +1158,7 @@ fail_resume:
1158#endif 1158#endif
1159 1159
1160static struct usb_driver ath9k_hif_usb_driver = { 1160static struct usb_driver ath9k_hif_usb_driver = {
1161 .name = "ath9k_hif_usb", 1161 .name = KBUILD_MODNAME,
1162 .probe = ath9k_hif_usb_probe, 1162 .probe = ath9k_hif_usb_probe,
1163 .disconnect = ath9k_hif_usb_disconnect, 1163 .disconnect = ath9k_hif_usb_disconnect,
1164#ifdef CONFIG_PM 1164#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 1ec9bcd6b281..c95bc5cc1a1f 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1254,15 +1254,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1254 ah->txchainmask = common->tx_chainmask; 1254 ah->txchainmask = common->tx_chainmask;
1255 ah->rxchainmask = common->rx_chainmask; 1255 ah->rxchainmask = common->rx_chainmask;
1256 1256
1257 if ((common->bus_ops->ath_bus_type != ATH_USB) && !ah->chip_fullsleep) {
1258 ath9k_hw_abortpcurecv(ah);
1259 if (!ath9k_hw_stopdmarecv(ah)) {
1260 ath_dbg(common, ATH_DBG_XMIT,
1261 "Failed to stop receive dma\n");
1262 bChannelChange = false;
1263 }
1264 }
1265
1266 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) 1257 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
1267 return -EIO; 1258 return -EIO;
1268 1259
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 562257ac52cf..edc1cbbfecaf 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -751,28 +751,47 @@ void ath9k_hw_abortpcurecv(struct ath_hw *ah)
751} 751}
752EXPORT_SYMBOL(ath9k_hw_abortpcurecv); 752EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
753 753
754bool ath9k_hw_stopdmarecv(struct ath_hw *ah) 754bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
755{ 755{
756#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */ 756#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
757#define AH_RX_TIME_QUANTUM 100 /* usec */ 757#define AH_RX_TIME_QUANTUM 100 /* usec */
758 struct ath_common *common = ath9k_hw_common(ah); 758 struct ath_common *common = ath9k_hw_common(ah);
759 u32 mac_status, last_mac_status = 0;
759 int i; 760 int i;
760 761
762 /* Enable access to the DMA observation bus */
763 REG_WRITE(ah, AR_MACMISC,
764 ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
765 (AR_MACMISC_MISC_OBS_BUS_1 <<
766 AR_MACMISC_MISC_OBS_BUS_MSB_S)));
767
761 REG_WRITE(ah, AR_CR, AR_CR_RXD); 768 REG_WRITE(ah, AR_CR, AR_CR_RXD);
762 769
763 /* Wait for rx enable bit to go low */ 770 /* Wait for rx enable bit to go low */
764 for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) { 771 for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
765 if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0) 772 if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
766 break; 773 break;
774
775 if (!AR_SREV_9300_20_OR_LATER(ah)) {
776 mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0;
777 if (mac_status == 0x1c0 && mac_status == last_mac_status) {
778 *reset = true;
779 break;
780 }
781
782 last_mac_status = mac_status;
783 }
784
767 udelay(AH_TIME_QUANTUM); 785 udelay(AH_TIME_QUANTUM);
768 } 786 }
769 787
770 if (i == 0) { 788 if (i == 0) {
771 ath_err(common, 789 ath_err(common,
772 "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n", 790 "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n",
773 AH_RX_STOP_DMA_TIMEOUT / 1000, 791 AH_RX_STOP_DMA_TIMEOUT / 1000,
774 REG_READ(ah, AR_CR), 792 REG_READ(ah, AR_CR),
775 REG_READ(ah, AR_DIAG_SW)); 793 REG_READ(ah, AR_DIAG_SW),
794 REG_READ(ah, AR_DMADBG_7));
776 return false; 795 return false;
777 } else { 796 } else {
778 return true; 797 return true;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index b2b2ff852c32..c2a59386fb9c 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -695,7 +695,7 @@ bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
695void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp); 695void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
696void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning); 696void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning);
697void ath9k_hw_abortpcurecv(struct ath_hw *ah); 697void ath9k_hw_abortpcurecv(struct ath_hw *ah);
698bool ath9k_hw_stopdmarecv(struct ath_hw *ah); 698bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset);
699int ath9k_hw_beaconq_setup(struct ath_hw *ah); 699int ath9k_hw_beaconq_setup(struct ath_hw *ah);
700 700
701/* Interrupt Handling */ 701/* Interrupt Handling */
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index dddb85de622d..17d04ff8d678 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1376,7 +1376,6 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
1376 1376
1377 ath9k_calculate_iter_data(hw, vif, &iter_data); 1377 ath9k_calculate_iter_data(hw, vif, &iter_data);
1378 1378
1379 ath9k_ps_wakeup(sc);
1380 /* Set BSSID mask. */ 1379 /* Set BSSID mask. */
1381 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); 1380 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
1382 ath_hw_setbssidmask(common); 1381 ath_hw_setbssidmask(common);
@@ -1411,7 +1410,6 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
1411 } 1410 }
1412 1411
1413 ath9k_hw_set_interrupts(ah, ah->imask); 1412 ath9k_hw_set_interrupts(ah, ah->imask);
1414 ath9k_ps_restore(sc);
1415 1413
1416 /* Set up ANI */ 1414 /* Set up ANI */
1417 if ((iter_data.naps + iter_data.nadhocs) > 0) { 1415 if ((iter_data.naps + iter_data.nadhocs) > 0) {
@@ -1457,6 +1455,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1457 struct ath_vif *avp = (void *)vif->drv_priv; 1455 struct ath_vif *avp = (void *)vif->drv_priv;
1458 int ret = 0; 1456 int ret = 0;
1459 1457
1458 ath9k_ps_wakeup(sc);
1460 mutex_lock(&sc->mutex); 1459 mutex_lock(&sc->mutex);
1461 1460
1462 switch (vif->type) { 1461 switch (vif->type) {
@@ -1503,6 +1502,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1503 ath9k_do_vif_add_setup(hw, vif); 1502 ath9k_do_vif_add_setup(hw, vif);
1504out: 1503out:
1505 mutex_unlock(&sc->mutex); 1504 mutex_unlock(&sc->mutex);
1505 ath9k_ps_restore(sc);
1506 return ret; 1506 return ret;
1507} 1507}
1508 1508
@@ -1517,6 +1517,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
1517 1517
1518 ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n"); 1518 ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n");
1519 mutex_lock(&sc->mutex); 1519 mutex_lock(&sc->mutex);
1520 ath9k_ps_wakeup(sc);
1520 1521
1521 /* See if new interface type is valid. */ 1522 /* See if new interface type is valid. */
1522 if ((new_type == NL80211_IFTYPE_ADHOC) && 1523 if ((new_type == NL80211_IFTYPE_ADHOC) &&
@@ -1546,6 +1547,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
1546 1547
1547 ath9k_do_vif_add_setup(hw, vif); 1548 ath9k_do_vif_add_setup(hw, vif);
1548out: 1549out:
1550 ath9k_ps_restore(sc);
1549 mutex_unlock(&sc->mutex); 1551 mutex_unlock(&sc->mutex);
1550 return ret; 1552 return ret;
1551} 1553}
@@ -1558,6 +1560,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1558 1560
1559 ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n"); 1561 ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
1560 1562
1563 ath9k_ps_wakeup(sc);
1561 mutex_lock(&sc->mutex); 1564 mutex_lock(&sc->mutex);
1562 1565
1563 sc->nvifs--; 1566 sc->nvifs--;
@@ -1569,6 +1572,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1569 ath9k_calculate_summary_state(hw, NULL); 1572 ath9k_calculate_summary_state(hw, NULL);
1570 1573
1571 mutex_unlock(&sc->mutex); 1574 mutex_unlock(&sc->mutex);
1575 ath9k_ps_restore(sc);
1572} 1576}
1573 1577
1574static void ath9k_enable_ps(struct ath_softc *sc) 1578static void ath9k_enable_ps(struct ath_softc *sc)
@@ -1809,6 +1813,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
1809 1813
1810 txq = sc->tx.txq_map[queue]; 1814 txq = sc->tx.txq_map[queue];
1811 1815
1816 ath9k_ps_wakeup(sc);
1812 mutex_lock(&sc->mutex); 1817 mutex_lock(&sc->mutex);
1813 1818
1814 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info)); 1819 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
@@ -1832,6 +1837,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
1832 ath_beaconq_config(sc); 1837 ath_beaconq_config(sc);
1833 1838
1834 mutex_unlock(&sc->mutex); 1839 mutex_unlock(&sc->mutex);
1840 ath9k_ps_restore(sc);
1835 1841
1836 return ret; 1842 return ret;
1837} 1843}
@@ -1894,6 +1900,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1894 int slottime; 1900 int slottime;
1895 int error; 1901 int error;
1896 1902
1903 ath9k_ps_wakeup(sc);
1897 mutex_lock(&sc->mutex); 1904 mutex_lock(&sc->mutex);
1898 1905
1899 if (changed & BSS_CHANGED_BSSID) { 1906 if (changed & BSS_CHANGED_BSSID) {
@@ -1994,6 +2001,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1994 } 2001 }
1995 2002
1996 mutex_unlock(&sc->mutex); 2003 mutex_unlock(&sc->mutex);
2004 ath9k_ps_restore(sc);
1997} 2005}
1998 2006
1999static u64 ath9k_get_tsf(struct ieee80211_hw *hw) 2007static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index a9c3f4672aa0..dcd19bc337d1 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -486,12 +486,12 @@ start_recv:
486bool ath_stoprecv(struct ath_softc *sc) 486bool ath_stoprecv(struct ath_softc *sc)
487{ 487{
488 struct ath_hw *ah = sc->sc_ah; 488 struct ath_hw *ah = sc->sc_ah;
489 bool stopped; 489 bool stopped, reset = false;
490 490
491 spin_lock_bh(&sc->rx.rxbuflock); 491 spin_lock_bh(&sc->rx.rxbuflock);
492 ath9k_hw_abortpcurecv(ah); 492 ath9k_hw_abortpcurecv(ah);
493 ath9k_hw_setrxfilter(ah, 0); 493 ath9k_hw_setrxfilter(ah, 0);
494 stopped = ath9k_hw_stopdmarecv(ah); 494 stopped = ath9k_hw_stopdmarecv(ah, &reset);
495 495
496 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 496 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
497 ath_edma_stop_recv(sc); 497 ath_edma_stop_recv(sc);
@@ -506,7 +506,7 @@ bool ath_stoprecv(struct ath_softc *sc)
506 "confusing the DMA engine when we start RX up\n"); 506 "confusing the DMA engine when we start RX up\n");
507 ATH_DBG_WARN_ON_ONCE(!stopped); 507 ATH_DBG_WARN_ON_ONCE(!stopped);
508 } 508 }
509 return stopped; 509 return stopped || reset;
510} 510}
511 511
512void ath_flushrecv(struct ath_softc *sc) 512void ath_flushrecv(struct ath_softc *sc)
diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h
index 248c670fdfbe..5c2cfe694152 100644
--- a/drivers/net/wireless/ath/regd_common.h
+++ b/drivers/net/wireless/ath/regd_common.h
@@ -195,6 +195,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = {
195 {APL9_WORLD, CTL_ETSI, CTL_ETSI}, 195 {APL9_WORLD, CTL_ETSI, CTL_ETSI},
196 196
197 {APL3_FCCA, CTL_FCC, CTL_FCC}, 197 {APL3_FCCA, CTL_FCC, CTL_FCC},
198 {APL7_FCCA, CTL_FCC, CTL_FCC},
198 {APL1_ETSIC, CTL_FCC, CTL_ETSI}, 199 {APL1_ETSIC, CTL_FCC, CTL_ETSI},
199 {APL2_ETSIC, CTL_FCC, CTL_ETSI}, 200 {APL2_ETSIC, CTL_FCC, CTL_ETSI},
200 {APL2_APLD, CTL_FCC, NO_CTL}, 201 {APL2_APLD, CTL_FCC, NO_CTL},
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig
index 2a45dd44cc12..aef65cd47661 100644
--- a/drivers/net/wireless/iwlegacy/Kconfig
+++ b/drivers/net/wireless/iwlegacy/Kconfig
@@ -1,6 +1,5 @@
1config IWLWIFI_LEGACY 1config IWLWIFI_LEGACY
2 tristate "Intel Wireless Wifi legacy devices" 2 tristate
3 depends on PCI && MAC80211
4 select FW_LOADER 3 select FW_LOADER
5 select NEW_LEDS 4 select NEW_LEDS
6 select LEDS_CLASS 5 select LEDS_CLASS
@@ -65,7 +64,8 @@ endmenu
65 64
66config IWL4965 65config IWL4965
67 tristate "Intel Wireless WiFi 4965AGN (iwl4965)" 66 tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
68 depends on IWLWIFI_LEGACY 67 depends on PCI && MAC80211
68 select IWLWIFI_LEGACY
69 ---help--- 69 ---help---
70 This option enables support for 70 This option enables support for
71 71
@@ -92,7 +92,8 @@ config IWL4965
92 92
93config IWL3945 93config IWL3945
94 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)" 94 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
95 depends on IWLWIFI_LEGACY 95 depends on PCI && MAC80211
96 select IWLWIFI_LEGACY
96 ---help--- 97 ---help---
97 Select to build the driver supporting the: 98 Select to build the driver supporting the:
98 99
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
index 779d3cb86e2c..5c3a68d3af12 100644
--- a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
@@ -74,8 +74,6 @@
74/* RSSI to dBm */ 74/* RSSI to dBm */
75#define IWL39_RSSI_OFFSET 95 75#define IWL39_RSSI_OFFSET 95
76 76
77#define IWL_DEFAULT_TX_POWER 0x0F
78
79/* 77/*
80 * EEPROM related constants, enums, and structures. 78 * EEPROM related constants, enums, and structures.
81 */ 79 */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
index 08b189c8472d..fc6fa2886d9c 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
@@ -804,9 +804,6 @@ struct iwl4965_scd_bc_tbl {
804 804
805#define IWL4965_DEFAULT_TX_RETRY 15 805#define IWL4965_DEFAULT_TX_RETRY 15
806 806
807/* Limit range of txpower output target to be between these values */
808#define IWL4965_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */
809
810/* EEPROM */ 807/* EEPROM */
811#define IWL4965_FIRST_AMPDU_QUEUE 10 808#define IWL4965_FIRST_AMPDU_QUEUE 10
812 809
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
index 7007d61bb6b5..c1511b14b239 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -160,6 +160,7 @@ int iwl_legacy_init_geos(struct iwl_priv *priv)
160 struct ieee80211_channel *geo_ch; 160 struct ieee80211_channel *geo_ch;
161 struct ieee80211_rate *rates; 161 struct ieee80211_rate *rates;
162 int i = 0; 162 int i = 0;
163 s8 max_tx_power = 0;
163 164
164 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates || 165 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
165 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) { 166 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
@@ -235,8 +236,8 @@ int iwl_legacy_init_geos(struct iwl_priv *priv)
235 236
236 geo_ch->flags |= ch->ht40_extension_channel; 237 geo_ch->flags |= ch->ht40_extension_channel;
237 238
238 if (ch->max_power_avg > priv->tx_power_device_lmt) 239 if (ch->max_power_avg > max_tx_power)
239 priv->tx_power_device_lmt = ch->max_power_avg; 240 max_tx_power = ch->max_power_avg;
240 } else { 241 } else {
241 geo_ch->flags |= IEEE80211_CHAN_DISABLED; 242 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
242 } 243 }
@@ -249,6 +250,10 @@ int iwl_legacy_init_geos(struct iwl_priv *priv)
249 geo_ch->flags); 250 geo_ch->flags);
250 } 251 }
251 252
253 priv->tx_power_device_lmt = max_tx_power;
254 priv->tx_power_user_lmt = max_tx_power;
255 priv->tx_power_next = max_tx_power;
256
252 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) && 257 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
253 priv->cfg->sku & IWL_SKU_A) { 258 priv->cfg->sku & IWL_SKU_A) {
254 IWL_INFO(priv, "Incorrectly detected BG card as ABG. " 259 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
@@ -1124,11 +1129,11 @@ int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1124 if (!priv->cfg->ops->lib->send_tx_power) 1129 if (!priv->cfg->ops->lib->send_tx_power)
1125 return -EOPNOTSUPP; 1130 return -EOPNOTSUPP;
1126 1131
1127 if (tx_power < IWL4965_TX_POWER_TARGET_POWER_MIN) { 1132 /* 0 dBm mean 1 milliwatt */
1133 if (tx_power < 0) {
1128 IWL_WARN(priv, 1134 IWL_WARN(priv,
1129 "Requested user TXPOWER %d below lower limit %d.\n", 1135 "Requested user TXPOWER %d below 1 mW.\n",
1130 tx_power, 1136 tx_power);
1131 IWL4965_TX_POWER_TARGET_POWER_MIN);
1132 return -EINVAL; 1137 return -EINVAL;
1133 } 1138 }
1134 1139
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
index 04c5648027df..cb346d1a9ffa 100644
--- a/drivers/net/wireless/iwlegacy/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
@@ -471,13 +471,6 @@ int iwl_legacy_init_channel_map(struct iwl_priv *priv)
471 flags & EEPROM_CHANNEL_RADAR)) 471 flags & EEPROM_CHANNEL_RADAR))
472 ? "" : "not "); 472 ? "" : "not ");
473 473
474 /* Set the tx_power_user_lmt to the highest power
475 * supported by any channel */
476 if (eeprom_ch_info[ch].max_power_avg >
477 priv->tx_power_user_lmt)
478 priv->tx_power_user_lmt =
479 eeprom_ch_info[ch].max_power_avg;
480
481 ch_info++; 474 ch_info++;
482 } 475 }
483 } 476 }
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
index 28eb3d885ba1..cc7ebcee60e5 100644
--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c
@@ -3825,10 +3825,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3825 priv->force_reset[IWL_FW_RESET].reset_duration = 3825 priv->force_reset[IWL_FW_RESET].reset_duration =
3826 IWL_DELAY_NEXT_FORCE_FW_RELOAD; 3826 IWL_DELAY_NEXT_FORCE_FW_RELOAD;
3827 3827
3828
3829 priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
3830 priv->tx_power_next = IWL_DEFAULT_TX_POWER;
3831
3832 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { 3828 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
3833 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n", 3829 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
3834 eeprom->version); 3830 eeprom->version);
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
index 91b3d8b9d7a5..d484c3678163 100644
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
@@ -3140,12 +3140,6 @@ static int iwl4965_init_drv(struct iwl_priv *priv)
3140 3140
3141 iwl_legacy_init_scan_params(priv); 3141 iwl_legacy_init_scan_params(priv);
3142 3142
3143 /* Set the tx_power_user_lmt to the lowest power level
3144 * this value will get overwritten by channel max power avg
3145 * from eeprom */
3146 priv->tx_power_user_lmt = IWL4965_TX_POWER_TARGET_POWER_MIN;
3147 priv->tx_power_next = IWL4965_TX_POWER_TARGET_POWER_MIN;
3148
3149 ret = iwl_legacy_init_channel_map(priv); 3143 ret = iwl_legacy_init_channel_map(priv);
3150 if (ret) { 3144 if (ret) {
3151 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); 3145 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 3ea31b659d1a..22e045b5bcee 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -530,6 +530,9 @@ static struct iwl_ht_params iwl5000_ht_params = {
530struct iwl_cfg iwl5300_agn_cfg = { 530struct iwl_cfg iwl5300_agn_cfg = {
531 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", 531 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
532 IWL_DEVICE_5000, 532 IWL_DEVICE_5000,
533 /* at least EEPROM 0x11A has wrong info */
534 .valid_tx_ant = ANT_ABC, /* .cfg overwrite */
535 .valid_rx_ant = ANT_ABC, /* .cfg overwrite */
533 .ht_params = &iwl5000_ht_params, 536 .ht_params = &iwl5000_ht_params,
534}; 537};
535 538
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 36952274950e..c1ceb4b23971 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -137,6 +137,7 @@ struct mwl8k_tx_queue {
137struct mwl8k_priv { 137struct mwl8k_priv {
138 struct ieee80211_hw *hw; 138 struct ieee80211_hw *hw;
139 struct pci_dev *pdev; 139 struct pci_dev *pdev;
140 int irq;
140 141
141 struct mwl8k_device_info *device_info; 142 struct mwl8k_device_info *device_info;
142 143
@@ -3761,9 +3762,11 @@ static int mwl8k_start(struct ieee80211_hw *hw)
3761 rc = request_irq(priv->pdev->irq, mwl8k_interrupt, 3762 rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
3762 IRQF_SHARED, MWL8K_NAME, hw); 3763 IRQF_SHARED, MWL8K_NAME, hw);
3763 if (rc) { 3764 if (rc) {
3765 priv->irq = -1;
3764 wiphy_err(hw->wiphy, "failed to register IRQ handler\n"); 3766 wiphy_err(hw->wiphy, "failed to register IRQ handler\n");
3765 return -EIO; 3767 return -EIO;
3766 } 3768 }
3769 priv->irq = priv->pdev->irq;
3767 3770
3768 /* Enable TX reclaim and RX tasklets. */ 3771 /* Enable TX reclaim and RX tasklets. */
3769 tasklet_enable(&priv->poll_tx_task); 3772 tasklet_enable(&priv->poll_tx_task);
@@ -3800,6 +3803,7 @@ static int mwl8k_start(struct ieee80211_hw *hw)
3800 if (rc) { 3803 if (rc) {
3801 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 3804 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
3802 free_irq(priv->pdev->irq, hw); 3805 free_irq(priv->pdev->irq, hw);
3806 priv->irq = -1;
3803 tasklet_disable(&priv->poll_tx_task); 3807 tasklet_disable(&priv->poll_tx_task);
3804 tasklet_disable(&priv->poll_rx_task); 3808 tasklet_disable(&priv->poll_rx_task);
3805 } 3809 }
@@ -3818,7 +3822,10 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
3818 3822
3819 /* Disable interrupts */ 3823 /* Disable interrupts */
3820 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 3824 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
3821 free_irq(priv->pdev->irq, hw); 3825 if (priv->irq != -1) {
3826 free_irq(priv->pdev->irq, hw);
3827 priv->irq = -1;
3828 }
3822 3829
3823 /* Stop finalize join worker */ 3830 /* Stop finalize join worker */
3824 cancel_work_sync(&priv->finalize_join_worker); 3831 cancel_work_sync(&priv->finalize_join_worker);
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 7834c26c2954..042842e704de 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -703,7 +703,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
703 struct p54_tx_info *p54info; 703 struct p54_tx_info *p54info;
704 struct p54_hdr *hdr; 704 struct p54_hdr *hdr;
705 struct p54_tx_data *txhdr; 705 struct p54_tx_data *txhdr;
706 unsigned int padding, len, extra_len; 706 unsigned int padding, len, extra_len = 0;
707 int i, j, ridx; 707 int i, j, ridx;
708 u16 hdr_flags = 0, aid = 0; 708 u16 hdr_flags = 0, aid = 0;
709 u8 rate, queue = 0, crypt_offset = 0; 709 u8 rate, queue = 0, crypt_offset = 0;
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index a3755ffc03d4..bc8ce48f0778 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2550,7 +2550,6 @@ static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq,
2550 const struct parport_pc_via_data *via) 2550 const struct parport_pc_via_data *via)
2551{ 2551{
2552 short inta_addr[6] = { 0x2A0, 0x2C0, 0x220, 0x240, 0x1E0 }; 2552 short inta_addr[6] = { 0x2A0, 0x2C0, 0x220, 0x240, 0x1E0 };
2553 struct resource *base_res;
2554 u32 ite8872set; 2553 u32 ite8872set;
2555 u32 ite8872_lpt, ite8872_lpthi; 2554 u32 ite8872_lpt, ite8872_lpthi;
2556 u8 ite8872_irq, type; 2555 u8 ite8872_irq, type;
@@ -2561,8 +2560,7 @@ static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq,
2561 2560
2562 /* make sure which one chip */ 2561 /* make sure which one chip */
2563 for (i = 0; i < 5; i++) { 2562 for (i = 0; i < 5; i++) {
2564 base_res = request_region(inta_addr[i], 32, "it887x"); 2563 if (request_region(inta_addr[i], 32, "it887x")) {
2565 if (base_res) {
2566 int test; 2564 int test;
2567 pci_write_config_dword(pdev, 0x60, 2565 pci_write_config_dword(pdev, 0x60,
2568 0xe5000000 | inta_addr[i]); 2566 0xe5000000 | inta_addr[i]);
@@ -2571,7 +2569,7 @@ static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq,
2571 test = inb(inta_addr[i]); 2569 test = inb(inta_addr[i]);
2572 if (test != 0xff) 2570 if (test != 0xff)
2573 break; 2571 break;
2574 release_region(inta_addr[i], 0x8); 2572 release_region(inta_addr[i], 32);
2575 } 2573 }
2576 } 2574 }
2577 if (i >= 5) { 2575 if (i >= 5) {
@@ -2635,7 +2633,7 @@ static int __devinit sio_ite_8872_probe(struct pci_dev *pdev, int autoirq,
2635 /* 2633 /*
2636 * Release the resource so that parport_pc_probe_port can get it. 2634 * Release the resource so that parport_pc_probe_port can get it.
2637 */ 2635 */
2638 release_resource(base_res); 2636 release_region(inta_addr[i], 32);
2639 if (parport_pc_probe_port(ite8872_lpt, ite8872_lpthi, 2637 if (parport_pc_probe_port(ite8872_lpt, ite8872_lpthi,
2640 irq, PARPORT_DMA_NONE, &pdev->dev, 0)) { 2638 irq, PARPORT_DMA_NONE, &pdev->dev, 0)) {
2641 printk(KERN_INFO 2639 printk(KERN_INFO
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index c8ff646c0b05..0fa466a91bf4 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -88,4 +88,6 @@ config PCI_IOAPIC
88 depends on HOTPLUG 88 depends on HOTPLUG
89 default y 89 default y
90 90
91select NLS if (DMI || ACPI) 91config PCI_LABEL
92 def_bool y if (DMI || ACPI)
93 select NLS
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 98d61c8e984b..c85f744270a5 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -56,10 +56,10 @@ obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o
56# ACPI Related PCI FW Functions 56# ACPI Related PCI FW Functions
57# ACPI _DSM provided firmware instance and string name 57# ACPI _DSM provided firmware instance and string name
58# 58#
59obj-$(CONFIG_ACPI) += pci-acpi.o pci-label.o 59obj-$(CONFIG_ACPI) += pci-acpi.o
60 60
61# SMBIOS provided firmware instance and labels 61# SMBIOS provided firmware instance and labels
62obj-$(CONFIG_DMI) += pci-label.o 62obj-$(CONFIG_PCI_LABEL) += pci-label.o
63 63
64# Cardbus & CompactPCI use setup-bus 64# Cardbus & CompactPCI use setup-bus
65obj-$(CONFIG_HOTPLUG) += setup-bus.o 65obj-$(CONFIG_HOTPLUG) += setup-bus.o
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 505c1c7075f0..d552d2c77844 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1299,7 +1299,7 @@ static void iommu_detach_domain(struct dmar_domain *domain,
1299static struct iova_domain reserved_iova_list; 1299static struct iova_domain reserved_iova_list;
1300static struct lock_class_key reserved_rbtree_key; 1300static struct lock_class_key reserved_rbtree_key;
1301 1301
1302static void dmar_init_reserved_ranges(void) 1302static int dmar_init_reserved_ranges(void)
1303{ 1303{
1304 struct pci_dev *pdev = NULL; 1304 struct pci_dev *pdev = NULL;
1305 struct iova *iova; 1305 struct iova *iova;
@@ -1313,8 +1313,10 @@ static void dmar_init_reserved_ranges(void)
1313 /* IOAPIC ranges shouldn't be accessed by DMA */ 1313 /* IOAPIC ranges shouldn't be accessed by DMA */
1314 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START), 1314 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1315 IOVA_PFN(IOAPIC_RANGE_END)); 1315 IOVA_PFN(IOAPIC_RANGE_END));
1316 if (!iova) 1316 if (!iova) {
1317 printk(KERN_ERR "Reserve IOAPIC range failed\n"); 1317 printk(KERN_ERR "Reserve IOAPIC range failed\n");
1318 return -ENODEV;
1319 }
1318 1320
1319 /* Reserve all PCI MMIO to avoid peer-to-peer access */ 1321 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1320 for_each_pci_dev(pdev) { 1322 for_each_pci_dev(pdev) {
@@ -1327,11 +1329,13 @@ static void dmar_init_reserved_ranges(void)
1327 iova = reserve_iova(&reserved_iova_list, 1329 iova = reserve_iova(&reserved_iova_list,
1328 IOVA_PFN(r->start), 1330 IOVA_PFN(r->start),
1329 IOVA_PFN(r->end)); 1331 IOVA_PFN(r->end));
1330 if (!iova) 1332 if (!iova) {
1331 printk(KERN_ERR "Reserve iova failed\n"); 1333 printk(KERN_ERR "Reserve iova failed\n");
1334 return -ENODEV;
1335 }
1332 } 1336 }
1333 } 1337 }
1334 1338 return 0;
1335} 1339}
1336 1340
1337static void domain_reserve_special_ranges(struct dmar_domain *domain) 1341static void domain_reserve_special_ranges(struct dmar_domain *domain)
@@ -1835,7 +1839,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1835 1839
1836 ret = iommu_attach_domain(domain, iommu); 1840 ret = iommu_attach_domain(domain, iommu);
1837 if (ret) { 1841 if (ret) {
1838 domain_exit(domain); 1842 free_domain_mem(domain);
1839 goto error; 1843 goto error;
1840 } 1844 }
1841 1845
@@ -2213,7 +2217,7 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
2213 return 0; 2217 return 0;
2214} 2218}
2215 2219
2216int __init init_dmars(void) 2220static int __init init_dmars(int force_on)
2217{ 2221{
2218 struct dmar_drhd_unit *drhd; 2222 struct dmar_drhd_unit *drhd;
2219 struct dmar_rmrr_unit *rmrr; 2223 struct dmar_rmrr_unit *rmrr;
@@ -2393,8 +2397,15 @@ int __init init_dmars(void)
2393 * enable translation 2397 * enable translation
2394 */ 2398 */
2395 for_each_drhd_unit(drhd) { 2399 for_each_drhd_unit(drhd) {
2396 if (drhd->ignored) 2400 if (drhd->ignored) {
2401 /*
2402 * we always have to disable PMRs or DMA may fail on
2403 * this device
2404 */
2405 if (force_on)
2406 iommu_disable_protect_mem_regions(drhd->iommu);
2397 continue; 2407 continue;
2408 }
2398 iommu = drhd->iommu; 2409 iommu = drhd->iommu;
2399 2410
2400 iommu_flush_write_buffer(iommu); 2411 iommu_flush_write_buffer(iommu);
@@ -3240,9 +3251,15 @@ static int device_notifier(struct notifier_block *nb,
3240 if (!domain) 3251 if (!domain)
3241 return 0; 3252 return 0;
3242 3253
3243 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) 3254 if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) {
3244 domain_remove_one_dev_info(domain, pdev); 3255 domain_remove_one_dev_info(domain, pdev);
3245 3256
3257 if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
3258 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
3259 list_empty(&domain->devices))
3260 domain_exit(domain);
3261 }
3262
3246 return 0; 3263 return 0;
3247} 3264}
3248 3265
@@ -3277,12 +3294,21 @@ int __init intel_iommu_init(void)
3277 if (no_iommu || dmar_disabled) 3294 if (no_iommu || dmar_disabled)
3278 return -ENODEV; 3295 return -ENODEV;
3279 3296
3280 iommu_init_mempool(); 3297 if (iommu_init_mempool()) {
3281 dmar_init_reserved_ranges(); 3298 if (force_on)
3299 panic("tboot: Failed to initialize iommu memory\n");
3300 return -ENODEV;
3301 }
3302
3303 if (dmar_init_reserved_ranges()) {
3304 if (force_on)
3305 panic("tboot: Failed to reserve iommu ranges\n");
3306 return -ENODEV;
3307 }
3282 3308
3283 init_no_remapping_devices(); 3309 init_no_remapping_devices();
3284 3310
3285 ret = init_dmars(); 3311 ret = init_dmars(force_on);
3286 if (ret) { 3312 if (ret) {
3287 if (force_on) 3313 if (force_on)
3288 panic("tboot: Failed to initialize DMARs\n"); 3314 panic("tboot: Failed to initialize DMARs\n");
@@ -3391,6 +3417,11 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
3391 domain->iommu_count--; 3417 domain->iommu_count--;
3392 domain_update_iommu_cap(domain); 3418 domain_update_iommu_cap(domain);
3393 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); 3419 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3420
3421 spin_lock_irqsave(&iommu->lock, tmp_flags);
3422 clear_bit(domain->id, iommu->domain_ids);
3423 iommu->domains[domain->id] = NULL;
3424 spin_unlock_irqrestore(&iommu->lock, tmp_flags);
3394 } 3425 }
3395 3426
3396 spin_unlock_irqrestore(&device_domain_lock, flags); 3427 spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -3607,9 +3638,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
3607 3638
3608 pte = dmar_domain->pgd; 3639 pte = dmar_domain->pgd;
3609 if (dma_pte_present(pte)) { 3640 if (dma_pte_present(pte)) {
3610 free_pgtable_page(dmar_domain->pgd);
3611 dmar_domain->pgd = (struct dma_pte *) 3641 dmar_domain->pgd = (struct dma_pte *)
3612 phys_to_virt(dma_pte_addr(pte)); 3642 phys_to_virt(dma_pte_addr(pte));
3643 free_pgtable_page(pte);
3613 } 3644 }
3614 dmar_domain->agaw--; 3645 dmar_domain->agaw--;
3615 } 3646 }
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index fe77e8223841..e8c19def1b0f 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -173,7 +173,7 @@ static int pcmcia_access_config(struct pcmcia_device *p_dev,
173 c = p_dev->function_config; 173 c = p_dev->function_config;
174 174
175 if (!(c->state & CONFIG_LOCKED)) { 175 if (!(c->state & CONFIG_LOCKED)) {
176 dev_dbg(&p_dev->dev, "Configuration isn't't locked\n"); 176 dev_dbg(&p_dev->dev, "Configuration isn't locked\n");
177 mutex_unlock(&s->ops_mutex); 177 mutex_unlock(&s->ops_mutex);
178 return -EACCES; 178 return -EACCES;
179 } 179 }
diff --git a/drivers/pcmcia/pxa2xx_balloon3.c b/drivers/pcmcia/pxa2xx_balloon3.c
index 453c54c97612..4c3e94c0ae85 100644
--- a/drivers/pcmcia/pxa2xx_balloon3.c
+++ b/drivers/pcmcia/pxa2xx_balloon3.c
@@ -25,6 +25,8 @@
25 25
26#include <mach/balloon3.h> 26#include <mach/balloon3.h>
27 27
28#include <asm/mach-types.h>
29
28#include "soc_common.h" 30#include "soc_common.h"
29 31
30/* 32/*
@@ -127,6 +129,9 @@ static int __init balloon3_pcmcia_init(void)
127{ 129{
128 int ret; 130 int ret;
129 131
132 if (!machine_is_balloon3())
133 return -ENODEV;
134
130 balloon3_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); 135 balloon3_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
131 if (!balloon3_pcmcia_device) 136 if (!balloon3_pcmcia_device)
132 return -ENOMEM; 137 return -ENOMEM;
diff --git a/drivers/pcmcia/pxa2xx_trizeps4.c b/drivers/pcmcia/pxa2xx_trizeps4.c
index b7e596620db1..b829e655457b 100644
--- a/drivers/pcmcia/pxa2xx_trizeps4.c
+++ b/drivers/pcmcia/pxa2xx_trizeps4.c
@@ -69,15 +69,15 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
69 for (i = 0; i < ARRAY_SIZE(irqs); i++) { 69 for (i = 0; i < ARRAY_SIZE(irqs); i++) {
70 if (irqs[i].sock != skt->nr) 70 if (irqs[i].sock != skt->nr)
71 continue; 71 continue;
72 if (gpio_request(IRQ_TO_GPIO(irqs[i].irq), irqs[i].str) < 0) { 72 if (gpio_request(irq_to_gpio(irqs[i].irq), irqs[i].str) < 0) {
73 pr_err("%s: sock %d unable to request gpio %d\n", 73 pr_err("%s: sock %d unable to request gpio %d\n",
74 __func__, skt->nr, IRQ_TO_GPIO(irqs[i].irq)); 74 __func__, skt->nr, irq_to_gpio(irqs[i].irq));
75 ret = -EBUSY; 75 ret = -EBUSY;
76 goto error; 76 goto error;
77 } 77 }
78 if (gpio_direction_input(IRQ_TO_GPIO(irqs[i].irq)) < 0) { 78 if (gpio_direction_input(irq_to_gpio(irqs[i].irq)) < 0) {
79 pr_err("%s: sock %d unable to set input gpio %d\n", 79 pr_err("%s: sock %d unable to set input gpio %d\n",
80 __func__, skt->nr, IRQ_TO_GPIO(irqs[i].irq)); 80 __func__, skt->nr, irq_to_gpio(irqs[i].irq));
81 ret = -EINVAL; 81 ret = -EINVAL;
82 goto error; 82 goto error;
83 } 83 }
@@ -86,7 +86,7 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
86 86
87error: 87error:
88 for (; i >= 0; i--) { 88 for (; i >= 0; i--) {
89 gpio_free(IRQ_TO_GPIO(irqs[i].irq)); 89 gpio_free(irq_to_gpio(irqs[i].irq));
90 } 90 }
91 return (ret); 91 return (ret);
92} 92}
@@ -97,7 +97,7 @@ static void trizeps_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
97 /* free allocated gpio's */ 97 /* free allocated gpio's */
98 gpio_free(GPIO_PRDY); 98 gpio_free(GPIO_PRDY);
99 for (i = 0; i < ARRAY_SIZE(irqs); i++) 99 for (i = 0; i < ARRAY_SIZE(irqs); i++)
100 gpio_free(IRQ_TO_GPIO(irqs[i].irq)); 100 gpio_free(irq_to_gpio(irqs[i].irq));
101} 101}
102 102
103static unsigned long trizeps_pcmcia_status[2]; 103static unsigned long trizeps_pcmcia_status[2];
@@ -226,6 +226,9 @@ static int __init trizeps_pcmcia_init(void)
226{ 226{
227 int ret; 227 int ret;
228 228
229 if (!machine_is_trizeps4() && !machine_is_trizeps4wl())
230 return -ENODEV;
231
229 trizeps_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); 232 trizeps_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
230 if (!trizeps_pcmcia_device) 233 if (!trizeps_pcmcia_device)
231 return -ENOMEM; 234 return -ENOMEM;
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 09b4437b3e61..39013867cbd6 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -171,7 +171,7 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
171 err = __rtc_read_alarm(rtc, &alrm); 171 err = __rtc_read_alarm(rtc, &alrm);
172 172
173 if (!err && !rtc_valid_tm(&alrm.time)) 173 if (!err && !rtc_valid_tm(&alrm.time))
174 rtc_set_alarm(rtc, &alrm); 174 rtc_initialize_alarm(rtc, &alrm);
175 175
176 strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE); 176 strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE);
177 dev_set_name(&rtc->dev, "rtc%d", id); 177 dev_set_name(&rtc->dev, "rtc%d", id);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 23719f0acbf6..ef6316acec43 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -375,6 +375,32 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
375} 375}
376EXPORT_SYMBOL_GPL(rtc_set_alarm); 376EXPORT_SYMBOL_GPL(rtc_set_alarm);
377 377
378/* Called once per device from rtc_device_register */
379int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
380{
381 int err;
382
383 err = rtc_valid_tm(&alarm->time);
384 if (err != 0)
385 return err;
386
387 err = mutex_lock_interruptible(&rtc->ops_lock);
388 if (err)
389 return err;
390
391 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
392 rtc->aie_timer.period = ktime_set(0, 0);
393 if (alarm->enabled) {
394 rtc->aie_timer.enabled = 1;
395 timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
396 }
397 mutex_unlock(&rtc->ops_lock);
398 return err;
399}
400EXPORT_SYMBOL_GPL(rtc_initialize_alarm);
401
402
403
378int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) 404int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
379{ 405{
380 int err = mutex_lock_interruptible(&rtc->ops_lock); 406 int err = mutex_lock_interruptible(&rtc->ops_lock);
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index a0fc4cf42abf..90d866272c8e 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -250,6 +250,8 @@ static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
250 bfin_rtc_int_set_alarm(rtc); 250 bfin_rtc_int_set_alarm(rtc);
251 else 251 else
252 bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)); 252 bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
253
254 return 0;
253} 255}
254 256
255static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm) 257static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm)
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index 316f484999b5..80f9c88214c5 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -220,6 +220,7 @@ static int __init coh901331_probe(struct platform_device *pdev)
220 } 220 }
221 clk_disable(rtap->clk); 221 clk_disable(rtap->clk);
222 222
223 platform_set_drvdata(pdev, rtap);
223 rtap->rtc = rtc_device_register("coh901331", &pdev->dev, &coh901331_ops, 224 rtap->rtc = rtc_device_register("coh901331", &pdev->dev, &coh901331_ops,
224 THIS_MODULE); 225 THIS_MODULE);
225 if (IS_ERR(rtap->rtc)) { 226 if (IS_ERR(rtap->rtc)) {
@@ -227,11 +228,10 @@ static int __init coh901331_probe(struct platform_device *pdev)
227 goto out_no_rtc; 228 goto out_no_rtc;
228 } 229 }
229 230
230 platform_set_drvdata(pdev, rtap);
231
232 return 0; 231 return 0;
233 232
234 out_no_rtc: 233 out_no_rtc:
234 platform_set_drvdata(pdev, NULL);
235 out_no_clk_enable: 235 out_no_clk_enable:
236 clk_put(rtap->clk); 236 clk_put(rtap->clk);
237 out_no_clk: 237 out_no_clk:
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index de0dd7b1f146..bcae8dd41496 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -394,7 +394,7 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
394 return 0; 394 return 0;
395 395
396fail2: 396fail2:
397 free_irq(omap_rtc_timer, NULL); 397 free_irq(omap_rtc_timer, rtc);
398fail1: 398fail1:
399 rtc_device_unregister(rtc); 399 rtc_device_unregister(rtc);
400fail0: 400fail0:
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 714964913e5e..b3466c491cd3 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -336,7 +336,6 @@ static void s3c_rtc_release(struct device *dev)
336 336
337 /* do not clear AIE here, it may be needed for wake */ 337 /* do not clear AIE here, it may be needed for wake */
338 338
339 s3c_rtc_setpie(dev, 0);
340 free_irq(s3c_rtc_alarmno, rtc_dev); 339 free_irq(s3c_rtc_alarmno, rtc_dev);
341 free_irq(s3c_rtc_tickno, rtc_dev); 340 free_irq(s3c_rtc_tickno, rtc_dev);
342} 341}
@@ -408,7 +407,6 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
408 platform_set_drvdata(dev, NULL); 407 platform_set_drvdata(dev, NULL);
409 rtc_device_unregister(rtc); 408 rtc_device_unregister(rtc);
410 409
411 s3c_rtc_setpie(&dev->dev, 0);
412 s3c_rtc_setaie(&dev->dev, 0); 410 s3c_rtc_setaie(&dev->dev, 0);
413 411
414 clk_disable(rtc_clk); 412 clk_disable(rtc_clk);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 4d2df2f76ea0..475e603fc584 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2314,15 +2314,14 @@ static void dasd_flush_request_queue(struct dasd_block *block)
2314 2314
2315static int dasd_open(struct block_device *bdev, fmode_t mode) 2315static int dasd_open(struct block_device *bdev, fmode_t mode)
2316{ 2316{
2317 struct dasd_block *block = bdev->bd_disk->private_data;
2318 struct dasd_device *base; 2317 struct dasd_device *base;
2319 int rc; 2318 int rc;
2320 2319
2321 if (!block) 2320 base = dasd_device_from_gendisk(bdev->bd_disk);
2321 if (!base)
2322 return -ENODEV; 2322 return -ENODEV;
2323 2323
2324 base = block->base; 2324 atomic_inc(&base->block->open_count);
2325 atomic_inc(&block->open_count);
2326 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 2325 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
2327 rc = -ENODEV; 2326 rc = -ENODEV;
2328 goto unlock; 2327 goto unlock;
@@ -2355,21 +2354,28 @@ static int dasd_open(struct block_device *bdev, fmode_t mode)
2355 goto out; 2354 goto out;
2356 } 2355 }
2357 2356
2357 dasd_put_device(base);
2358 return 0; 2358 return 0;
2359 2359
2360out: 2360out:
2361 module_put(base->discipline->owner); 2361 module_put(base->discipline->owner);
2362unlock: 2362unlock:
2363 atomic_dec(&block->open_count); 2363 atomic_dec(&base->block->open_count);
2364 dasd_put_device(base);
2364 return rc; 2365 return rc;
2365} 2366}
2366 2367
2367static int dasd_release(struct gendisk *disk, fmode_t mode) 2368static int dasd_release(struct gendisk *disk, fmode_t mode)
2368{ 2369{
2369 struct dasd_block *block = disk->private_data; 2370 struct dasd_device *base;
2370 2371
2371 atomic_dec(&block->open_count); 2372 base = dasd_device_from_gendisk(disk);
2372 module_put(block->base->discipline->owner); 2373 if (!base)
2374 return -ENODEV;
2375
2376 atomic_dec(&base->block->open_count);
2377 module_put(base->discipline->owner);
2378 dasd_put_device(base);
2373 return 0; 2379 return 0;
2374} 2380}
2375 2381
@@ -2378,20 +2384,20 @@ static int dasd_release(struct gendisk *disk, fmode_t mode)
2378 */ 2384 */
2379static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 2385static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
2380{ 2386{
2381 struct dasd_block *block;
2382 struct dasd_device *base; 2387 struct dasd_device *base;
2383 2388
2384 block = bdev->bd_disk->private_data; 2389 base = dasd_device_from_gendisk(bdev->bd_disk);
2385 if (!block) 2390 if (!base)
2386 return -ENODEV; 2391 return -ENODEV;
2387 base = block->base;
2388 2392
2389 if (!base->discipline || 2393 if (!base->discipline ||
2390 !base->discipline->fill_geometry) 2394 !base->discipline->fill_geometry) {
2395 dasd_put_device(base);
2391 return -EINVAL; 2396 return -EINVAL;
2392 2397 }
2393 base->discipline->fill_geometry(block, geo); 2398 base->discipline->fill_geometry(base->block, geo);
2394 geo->start = get_start_sect(bdev) >> block->s2b_shift; 2399 geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
2400 dasd_put_device(base);
2395 return 0; 2401 return 0;
2396} 2402}
2397 2403
@@ -2528,7 +2534,6 @@ void dasd_generic_remove(struct ccw_device *cdev)
2528 dasd_set_target_state(device, DASD_STATE_NEW); 2534 dasd_set_target_state(device, DASD_STATE_NEW);
2529 /* dasd_delete_device destroys the device reference. */ 2535 /* dasd_delete_device destroys the device reference. */
2530 block = device->block; 2536 block = device->block;
2531 device->block = NULL;
2532 dasd_delete_device(device); 2537 dasd_delete_device(device);
2533 /* 2538 /*
2534 * life cycle of block is bound to device, so delete it after 2539 * life cycle of block is bound to device, so delete it after
@@ -2650,7 +2655,6 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
2650 dasd_set_target_state(device, DASD_STATE_NEW); 2655 dasd_set_target_state(device, DASD_STATE_NEW);
2651 /* dasd_delete_device destroys the device reference. */ 2656 /* dasd_delete_device destroys the device reference. */
2652 block = device->block; 2657 block = device->block;
2653 device->block = NULL;
2654 dasd_delete_device(device); 2658 dasd_delete_device(device);
2655 /* 2659 /*
2656 * life cycle of block is bound to device, so delete it after 2660 * life cycle of block is bound to device, so delete it after
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 42e1bf35f689..d71511c7850a 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -674,6 +674,36 @@ dasd_device_from_cdev(struct ccw_device *cdev)
674 return device; 674 return device;
675} 675}
676 676
677void dasd_add_link_to_gendisk(struct gendisk *gdp, struct dasd_device *device)
678{
679 struct dasd_devmap *devmap;
680
681 devmap = dasd_find_busid(dev_name(&device->cdev->dev));
682 if (IS_ERR(devmap))
683 return;
684 spin_lock(&dasd_devmap_lock);
685 gdp->private_data = devmap;
686 spin_unlock(&dasd_devmap_lock);
687}
688
689struct dasd_device *dasd_device_from_gendisk(struct gendisk *gdp)
690{
691 struct dasd_device *device;
692 struct dasd_devmap *devmap;
693
694 if (!gdp->private_data)
695 return NULL;
696 device = NULL;
697 spin_lock(&dasd_devmap_lock);
698 devmap = gdp->private_data;
699 if (devmap && devmap->device) {
700 device = devmap->device;
701 dasd_get_device(device);
702 }
703 spin_unlock(&dasd_devmap_lock);
704 return device;
705}
706
677/* 707/*
678 * SECTION: files in sysfs 708 * SECTION: files in sysfs
679 */ 709 */
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index db8005d9f2fd..3ebdf5f92f8f 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2037,7 +2037,7 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device,
2037 return; 2037 return;
2038 2038
2039 /* summary unit check */ 2039 /* summary unit check */
2040 if ((sense[7] == 0x0D) && 2040 if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
2041 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) { 2041 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
2042 dasd_alias_handle_summary_unit_check(device, irb); 2042 dasd_alias_handle_summary_unit_check(device, irb);
2043 return; 2043 return;
@@ -2053,7 +2053,8 @@ static void dasd_eckd_check_for_device_change(struct dasd_device *device,
2053 /* loss of device reservation is handled via base devices only 2053 /* loss of device reservation is handled via base devices only
2054 * as alias devices may be used with several bases 2054 * as alias devices may be used with several bases
2055 */ 2055 */
2056 if (device->block && (sense[7] == 0x3F) && 2056 if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
2057 (sense[7] == 0x3F) &&
2057 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && 2058 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
2058 test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) { 2059 test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
2059 if (device->features & DASD_FEATURE_FAILONSLCK) 2060 if (device->features & DASD_FEATURE_FAILONSLCK)
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 5505bc07e1e7..19a1ff03d65e 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -73,7 +73,7 @@ int dasd_gendisk_alloc(struct dasd_block *block)
73 if (base->features & DASD_FEATURE_READONLY || 73 if (base->features & DASD_FEATURE_READONLY ||
74 test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) 74 test_bit(DASD_FLAG_DEVICE_RO, &base->flags))
75 set_disk_ro(gdp, 1); 75 set_disk_ro(gdp, 1);
76 gdp->private_data = block; 76 dasd_add_link_to_gendisk(gdp, base);
77 gdp->queue = block->request_queue; 77 gdp->queue = block->request_queue;
78 block->gdp = gdp; 78 block->gdp = gdp;
79 set_capacity(block->gdp, 0); 79 set_capacity(block->gdp, 0);
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index df9f6999411d..d1e4f2c1264c 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -686,6 +686,9 @@ struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
686struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *); 686struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *);
687struct dasd_device *dasd_device_from_devindex(int); 687struct dasd_device *dasd_device_from_devindex(int);
688 688
689void dasd_add_link_to_gendisk(struct gendisk *, struct dasd_device *);
690struct dasd_device *dasd_device_from_gendisk(struct gendisk *);
691
689int dasd_parse(void); 692int dasd_parse(void);
690int dasd_busid_known(const char *); 693int dasd_busid_known(const char *);
691 694
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 26075e95b1ba..72261e4c516d 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -42,16 +42,22 @@ dasd_ioctl_api_version(void __user *argp)
42static int 42static int
43dasd_ioctl_enable(struct block_device *bdev) 43dasd_ioctl_enable(struct block_device *bdev)
44{ 44{
45 struct dasd_block *block = bdev->bd_disk->private_data; 45 struct dasd_device *base;
46 46
47 if (!capable(CAP_SYS_ADMIN)) 47 if (!capable(CAP_SYS_ADMIN))
48 return -EACCES; 48 return -EACCES;
49 49
50 dasd_enable_device(block->base); 50 base = dasd_device_from_gendisk(bdev->bd_disk);
51 if (!base)
52 return -ENODEV;
53
54 dasd_enable_device(base);
51 /* Formatting the dasd device can change the capacity. */ 55 /* Formatting the dasd device can change the capacity. */
52 mutex_lock(&bdev->bd_mutex); 56 mutex_lock(&bdev->bd_mutex);
53 i_size_write(bdev->bd_inode, (loff_t)get_capacity(block->gdp) << 9); 57 i_size_write(bdev->bd_inode,
58 (loff_t)get_capacity(base->block->gdp) << 9);
54 mutex_unlock(&bdev->bd_mutex); 59 mutex_unlock(&bdev->bd_mutex);
60 dasd_put_device(base);
55 return 0; 61 return 0;
56} 62}
57 63
@@ -62,11 +68,14 @@ dasd_ioctl_enable(struct block_device *bdev)
62static int 68static int
63dasd_ioctl_disable(struct block_device *bdev) 69dasd_ioctl_disable(struct block_device *bdev)
64{ 70{
65 struct dasd_block *block = bdev->bd_disk->private_data; 71 struct dasd_device *base;
66 72
67 if (!capable(CAP_SYS_ADMIN)) 73 if (!capable(CAP_SYS_ADMIN))
68 return -EACCES; 74 return -EACCES;
69 75
76 base = dasd_device_from_gendisk(bdev->bd_disk);
77 if (!base)
78 return -ENODEV;
70 /* 79 /*
71 * Man this is sick. We don't do a real disable but only downgrade 80 * Man this is sick. We don't do a real disable but only downgrade
72 * the device to DASD_STATE_BASIC. The reason is that dasdfmt uses 81 * the device to DASD_STATE_BASIC. The reason is that dasdfmt uses
@@ -75,7 +84,7 @@ dasd_ioctl_disable(struct block_device *bdev)
75 * using the BIODASDFMT ioctl. Therefore the correct state for the 84 * using the BIODASDFMT ioctl. Therefore the correct state for the
76 * device is DASD_STATE_BASIC that allows to do basic i/o. 85 * device is DASD_STATE_BASIC that allows to do basic i/o.
77 */ 86 */
78 dasd_set_target_state(block->base, DASD_STATE_BASIC); 87 dasd_set_target_state(base, DASD_STATE_BASIC);
79 /* 88 /*
80 * Set i_size to zero, since read, write, etc. check against this 89 * Set i_size to zero, since read, write, etc. check against this
81 * value. 90 * value.
@@ -83,6 +92,7 @@ dasd_ioctl_disable(struct block_device *bdev)
83 mutex_lock(&bdev->bd_mutex); 92 mutex_lock(&bdev->bd_mutex);
84 i_size_write(bdev->bd_inode, 0); 93 i_size_write(bdev->bd_inode, 0);
85 mutex_unlock(&bdev->bd_mutex); 94 mutex_unlock(&bdev->bd_mutex);
95 dasd_put_device(base);
86 return 0; 96 return 0;
87} 97}
88 98
@@ -191,26 +201,36 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata)
191static int 201static int
192dasd_ioctl_format(struct block_device *bdev, void __user *argp) 202dasd_ioctl_format(struct block_device *bdev, void __user *argp)
193{ 203{
194 struct dasd_block *block = bdev->bd_disk->private_data; 204 struct dasd_device *base;
195 struct format_data_t fdata; 205 struct format_data_t fdata;
206 int rc;
196 207
197 if (!capable(CAP_SYS_ADMIN)) 208 if (!capable(CAP_SYS_ADMIN))
198 return -EACCES; 209 return -EACCES;
199 if (!argp) 210 if (!argp)
200 return -EINVAL; 211 return -EINVAL;
201 212 base = dasd_device_from_gendisk(bdev->bd_disk);
202 if (block->base->features & DASD_FEATURE_READONLY || 213 if (!base)
203 test_bit(DASD_FLAG_DEVICE_RO, &block->base->flags)) 214 return -ENODEV;
215 if (base->features & DASD_FEATURE_READONLY ||
216 test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) {
217 dasd_put_device(base);
204 return -EROFS; 218 return -EROFS;
205 if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) 219 }
220 if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) {
221 dasd_put_device(base);
206 return -EFAULT; 222 return -EFAULT;
223 }
207 if (bdev != bdev->bd_contains) { 224 if (bdev != bdev->bd_contains) {
208 pr_warning("%s: The specified DASD is a partition and cannot " 225 pr_warning("%s: The specified DASD is a partition and cannot "
209 "be formatted\n", 226 "be formatted\n",
210 dev_name(&block->base->cdev->dev)); 227 dev_name(&base->cdev->dev));
228 dasd_put_device(base);
211 return -EINVAL; 229 return -EINVAL;
212 } 230 }
213 return dasd_format(block, &fdata); 231 rc = dasd_format(base->block, &fdata);
232 dasd_put_device(base);
233 return rc;
214} 234}
215 235
216#ifdef CONFIG_DASD_PROFILE 236#ifdef CONFIG_DASD_PROFILE
@@ -340,8 +360,8 @@ static int dasd_ioctl_information(struct dasd_block *block,
340static int 360static int
341dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp) 361dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
342{ 362{
343 struct dasd_block *block = bdev->bd_disk->private_data; 363 struct dasd_device *base;
344 int intval; 364 int intval, rc;
345 365
346 if (!capable(CAP_SYS_ADMIN)) 366 if (!capable(CAP_SYS_ADMIN))
347 return -EACCES; 367 return -EACCES;
@@ -350,10 +370,17 @@ dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
350 return -EINVAL; 370 return -EINVAL;
351 if (get_user(intval, (int __user *)argp)) 371 if (get_user(intval, (int __user *)argp))
352 return -EFAULT; 372 return -EFAULT;
353 if (!intval && test_bit(DASD_FLAG_DEVICE_RO, &block->base->flags)) 373 base = dasd_device_from_gendisk(bdev->bd_disk);
374 if (!base)
375 return -ENODEV;
376 if (!intval && test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) {
377 dasd_put_device(base);
354 return -EROFS; 378 return -EROFS;
379 }
355 set_disk_ro(bdev->bd_disk, intval); 380 set_disk_ro(bdev->bd_disk, intval);
356 return dasd_set_feature(block->base->cdev, DASD_FEATURE_READONLY, intval); 381 rc = dasd_set_feature(base->cdev, DASD_FEATURE_READONLY, intval);
382 dasd_put_device(base);
383 return rc;
357} 384}
358 385
359static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd, 386static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
@@ -372,59 +399,78 @@ static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
372int dasd_ioctl(struct block_device *bdev, fmode_t mode, 399int dasd_ioctl(struct block_device *bdev, fmode_t mode,
373 unsigned int cmd, unsigned long arg) 400 unsigned int cmd, unsigned long arg)
374{ 401{
375 struct dasd_block *block = bdev->bd_disk->private_data; 402 struct dasd_block *block;
403 struct dasd_device *base;
376 void __user *argp; 404 void __user *argp;
405 int rc;
377 406
378 if (is_compat_task()) 407 if (is_compat_task())
379 argp = compat_ptr(arg); 408 argp = compat_ptr(arg);
380 else 409 else
381 argp = (void __user *)arg; 410 argp = (void __user *)arg;
382 411
383 if (!block)
384 return -ENODEV;
385
386 if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) { 412 if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) {
387 PRINT_DEBUG("empty data ptr"); 413 PRINT_DEBUG("empty data ptr");
388 return -EINVAL; 414 return -EINVAL;
389 } 415 }
390 416
417 base = dasd_device_from_gendisk(bdev->bd_disk);
418 if (!base)
419 return -ENODEV;
420 block = base->block;
421 rc = 0;
391 switch (cmd) { 422 switch (cmd) {
392 case BIODASDDISABLE: 423 case BIODASDDISABLE:
393 return dasd_ioctl_disable(bdev); 424 rc = dasd_ioctl_disable(bdev);
425 break;
394 case BIODASDENABLE: 426 case BIODASDENABLE:
395 return dasd_ioctl_enable(bdev); 427 rc = dasd_ioctl_enable(bdev);
428 break;
396 case BIODASDQUIESCE: 429 case BIODASDQUIESCE:
397 return dasd_ioctl_quiesce(block); 430 rc = dasd_ioctl_quiesce(block);
431 break;
398 case BIODASDRESUME: 432 case BIODASDRESUME:
399 return dasd_ioctl_resume(block); 433 rc = dasd_ioctl_resume(block);
434 break;
400 case BIODASDFMT: 435 case BIODASDFMT:
401 return dasd_ioctl_format(bdev, argp); 436 rc = dasd_ioctl_format(bdev, argp);
437 break;
402 case BIODASDINFO: 438 case BIODASDINFO:
403 return dasd_ioctl_information(block, cmd, argp); 439 rc = dasd_ioctl_information(block, cmd, argp);
440 break;
404 case BIODASDINFO2: 441 case BIODASDINFO2:
405 return dasd_ioctl_information(block, cmd, argp); 442 rc = dasd_ioctl_information(block, cmd, argp);
443 break;
406 case BIODASDPRRD: 444 case BIODASDPRRD:
407 return dasd_ioctl_read_profile(block, argp); 445 rc = dasd_ioctl_read_profile(block, argp);
446 break;
408 case BIODASDPRRST: 447 case BIODASDPRRST:
409 return dasd_ioctl_reset_profile(block); 448 rc = dasd_ioctl_reset_profile(block);
449 break;
410 case BLKROSET: 450 case BLKROSET:
411 return dasd_ioctl_set_ro(bdev, argp); 451 rc = dasd_ioctl_set_ro(bdev, argp);
452 break;
412 case DASDAPIVER: 453 case DASDAPIVER:
413 return dasd_ioctl_api_version(argp); 454 rc = dasd_ioctl_api_version(argp);
455 break;
414 case BIODASDCMFENABLE: 456 case BIODASDCMFENABLE:
415 return enable_cmf(block->base->cdev); 457 rc = enable_cmf(base->cdev);
458 break;
416 case BIODASDCMFDISABLE: 459 case BIODASDCMFDISABLE:
417 return disable_cmf(block->base->cdev); 460 rc = disable_cmf(base->cdev);
461 break;
418 case BIODASDREADALLCMB: 462 case BIODASDREADALLCMB:
419 return dasd_ioctl_readall_cmb(block, cmd, argp); 463 rc = dasd_ioctl_readall_cmb(block, cmd, argp);
464 break;
420 default: 465 default:
421 /* if the discipline has an ioctl method try it. */ 466 /* if the discipline has an ioctl method try it. */
422 if (block->base->discipline->ioctl) { 467 if (base->discipline->ioctl) {
423 int rval = block->base->discipline->ioctl(block, cmd, argp); 468 rc = base->discipline->ioctl(block, cmd, argp);
424 if (rval != -ENOIOCTLCMD) 469 if (rc == -ENOIOCTLCMD)
425 return rval; 470 rc = -EINVAL;
426 } 471 } else
427 472 rc = -EINVAL;
428 return -EINVAL;
429 } 473 }
474 dasd_put_device(base);
475 return rc;
430} 476}
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index c532ba929ccd..e8f267eb8887 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -407,8 +407,11 @@ static inline void account_sbals(struct qdio_q *q, int count)
407 q->q_stats.nr_sbals[pos]++; 407 q->q_stats.nr_sbals[pos]++;
408} 408}
409 409
410static void announce_buffer_error(struct qdio_q *q, int count) 410static void process_buffer_error(struct qdio_q *q, int count)
411{ 411{
412 unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
413 SLSB_P_OUTPUT_NOT_INIT;
414
412 q->qdio_error |= QDIO_ERROR_SLSB_STATE; 415 q->qdio_error |= QDIO_ERROR_SLSB_STATE;
413 416
414 /* special handling for no target buffer empty */ 417 /* special handling for no target buffer empty */
@@ -426,6 +429,12 @@ static void announce_buffer_error(struct qdio_q *q, int count)
426 DBF_ERROR("F14:%2x F15:%2x", 429 DBF_ERROR("F14:%2x F15:%2x",
427 q->sbal[q->first_to_check]->element[14].flags & 0xff, 430 q->sbal[q->first_to_check]->element[14].flags & 0xff,
428 q->sbal[q->first_to_check]->element[15].flags & 0xff); 431 q->sbal[q->first_to_check]->element[15].flags & 0xff);
432
433 /*
434 * Interrupts may be avoided as long as the error is present
435 * so change the buffer state immediately to avoid starvation.
436 */
437 set_buf_states(q, q->first_to_check, state, count);
429} 438}
430 439
431static inline void inbound_primed(struct qdio_q *q, int count) 440static inline void inbound_primed(struct qdio_q *q, int count)
@@ -506,8 +515,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
506 account_sbals(q, count); 515 account_sbals(q, count);
507 break; 516 break;
508 case SLSB_P_INPUT_ERROR: 517 case SLSB_P_INPUT_ERROR:
509 announce_buffer_error(q, count); 518 process_buffer_error(q, count);
510 /* process the buffer, the upper layer will take care of it */
511 q->first_to_check = add_buf(q->first_to_check, count); 519 q->first_to_check = add_buf(q->first_to_check, count);
512 atomic_sub(count, &q->nr_buf_used); 520 atomic_sub(count, &q->nr_buf_used);
513 if (q->irq_ptr->perf_stat_enabled) 521 if (q->irq_ptr->perf_stat_enabled)
@@ -677,8 +685,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
677 account_sbals(q, count); 685 account_sbals(q, count);
678 break; 686 break;
679 case SLSB_P_OUTPUT_ERROR: 687 case SLSB_P_OUTPUT_ERROR:
680 announce_buffer_error(q, count); 688 process_buffer_error(q, count);
681 /* process the buffer, the upper layer will take care of it */
682 q->first_to_check = add_buf(q->first_to_check, count); 689 q->first_to_check = add_buf(q->first_to_check, count);
683 atomic_sub(count, &q->nr_buf_used); 690 atomic_sub(count, &q->nr_buf_used);
684 if (q->irq_ptr->perf_stat_enabled) 691 if (q->irq_ptr->perf_stat_enabled)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 6d5c7ff43f5b..e9901b8f8443 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -411,8 +411,6 @@ static void scsi_run_queue(struct request_queue *q)
411 list_splice_init(&shost->starved_list, &starved_list); 411 list_splice_init(&shost->starved_list, &starved_list);
412 412
413 while (!list_empty(&starved_list)) { 413 while (!list_empty(&starved_list)) {
414 int flagset;
415
416 /* 414 /*
417 * As long as shost is accepting commands and we have 415 * As long as shost is accepting commands and we have
418 * starved queues, call blk_run_queue. scsi_request_fn 416 * starved queues, call blk_run_queue. scsi_request_fn
@@ -435,20 +433,7 @@ static void scsi_run_queue(struct request_queue *q)
435 continue; 433 continue;
436 } 434 }
437 435
438 spin_unlock(shost->host_lock); 436 blk_run_queue_async(sdev->request_queue);
439
440 spin_lock(sdev->request_queue->queue_lock);
441 flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
442 !test_bit(QUEUE_FLAG_REENTER,
443 &sdev->request_queue->queue_flags);
444 if (flagset)
445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
446 __blk_run_queue(sdev->request_queue, false);
447 if (flagset)
448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
449 spin_unlock(sdev->request_queue->queue_lock);
450
451 spin_lock(shost->host_lock);
452 } 437 }
453 /* put any unprocessed entries back */ 438 /* put any unprocessed entries back */
454 list_splice(&starved_list, &shost->starved_list); 439 list_splice(&starved_list, &shost->starved_list);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index fdf3fa639056..815069d13f9b 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3816,28 +3816,17 @@ fail_host_msg:
3816static void 3816static void
3817fc_bsg_goose_queue(struct fc_rport *rport) 3817fc_bsg_goose_queue(struct fc_rport *rport)
3818{ 3818{
3819 int flagset;
3820 unsigned long flags;
3821
3822 if (!rport->rqst_q) 3819 if (!rport->rqst_q)
3823 return; 3820 return;
3824 3821
3822 /*
3823 * This get/put dance makes no sense
3824 */
3825 get_device(&rport->dev); 3825 get_device(&rport->dev);
3826 3826 blk_run_queue_async(rport->rqst_q);
3827 spin_lock_irqsave(rport->rqst_q->queue_lock, flags);
3828 flagset = test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags) &&
3829 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
3830 if (flagset)
3831 queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
3832 __blk_run_queue(rport->rqst_q, false);
3833 if (flagset)
3834 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
3835 spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
3836
3837 put_device(&rport->dev); 3827 put_device(&rport->dev);
3838} 3828}
3839 3829
3840
3841/** 3830/**
3842 * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD 3831 * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
3843 * @q: rport request queue 3832 * @q: rport request queue
diff --git a/drivers/staging/rt2860/common/cmm_data_pci.c b/drivers/staging/rt2860/common/cmm_data_pci.c
index bef0bbd8cef7..f01a51c381f1 100644
--- a/drivers/staging/rt2860/common/cmm_data_pci.c
+++ b/drivers/staging/rt2860/common/cmm_data_pci.c
@@ -444,7 +444,7 @@ int RTMPCheckRxError(struct rt_rtmp_adapter *pAd,
444 return (NDIS_STATUS_FAILURE); 444 return (NDIS_STATUS_FAILURE);
445 } 445 }
446 } 446 }
447 /* Drop not U2M frames, can't's drop here because we will drop beacon in this case */ 447 /* Drop not U2M frames, can't drop here because we will drop beacon in this case */
448 /* I am kind of doubting the U2M bit operation */ 448 /* I am kind of doubting the U2M bit operation */
449 /* if (pRxD->U2M == 0) */ 449 /* if (pRxD->U2M == 0) */
450 /* return(NDIS_STATUS_FAILURE); */ 450 /* return(NDIS_STATUS_FAILURE); */
diff --git a/drivers/staging/rt2860/common/cmm_data_usb.c b/drivers/staging/rt2860/common/cmm_data_usb.c
index 5637857ae9eb..83a62faa7e57 100644
--- a/drivers/staging/rt2860/common/cmm_data_usb.c
+++ b/drivers/staging/rt2860/common/cmm_data_usb.c
@@ -860,7 +860,7 @@ int RTMPCheckRxError(struct rt_rtmp_adapter *pAd,
860 DBGPRINT_RAW(RT_DEBUG_ERROR, ("received packet too long\n")); 860 DBGPRINT_RAW(RT_DEBUG_ERROR, ("received packet too long\n"));
861 return NDIS_STATUS_FAILURE; 861 return NDIS_STATUS_FAILURE;
862 } 862 }
863 /* Drop not U2M frames, can't's drop here because we will drop beacon in this case */ 863 /* Drop not U2M frames, can't drop here because we will drop beacon in this case */
864 /* I am kind of doubting the U2M bit operation */ 864 /* I am kind of doubting the U2M bit operation */
865 /* if (pRxD->U2M == 0) */ 865 /* if (pRxD->U2M == 0) */
866 /* return(NDIS_STATUS_FAILURE); */ 866 /* return(NDIS_STATUS_FAILURE); */
diff --git a/drivers/staging/spectra/ffsport.c b/drivers/staging/spectra/ffsport.c
index 20dae73d3b78..506547b603e1 100644
--- a/drivers/staging/spectra/ffsport.c
+++ b/drivers/staging/spectra/ffsport.c
@@ -653,7 +653,7 @@ static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
653 } 653 }
654 dev->queue->queuedata = dev; 654 dev->queue->queuedata = dev;
655 655
656 /* As Linux block layer does't support >4KB hardware sector, */ 656 /* As Linux block layer doesn't support >4KB hardware sector, */
657 /* Here we force report 512 byte hardware sector size to Kernel */ 657 /* Here we force report 512 byte hardware sector size to Kernel */
658 blk_queue_logical_block_size(dev->queue, 512); 658 blk_queue_logical_block_size(dev->queue, 512);
659 659
diff --git a/drivers/staging/tidspbridge/dynload/cload.c b/drivers/staging/tidspbridge/dynload/cload.c
index 5cecd237e3f6..fe1ef0addb09 100644
--- a/drivers/staging/tidspbridge/dynload/cload.c
+++ b/drivers/staging/tidspbridge/dynload/cload.c
@@ -718,7 +718,7 @@ static void dload_symbols(struct dload_state *dlthis)
718 * as a temporary for .dllview record construction. 718 * as a temporary for .dllview record construction.
719 * Allocate storage for the whole table. Add 1 to the section count 719 * Allocate storage for the whole table. Add 1 to the section count
720 * in case a trampoline section is auto-generated as well as the 720 * in case a trampoline section is auto-generated as well as the
721 * size of the trampoline section name so DLLView does't get lost. 721 * size of the trampoline section name so DLLView doesn't get lost.
722 */ 722 */
723 723
724 siz = sym_count * sizeof(struct local_symbol); 724 siz = sym_count * sizeof(struct local_symbol);
diff --git a/drivers/staging/tty/specialix.c b/drivers/staging/tty/specialix.c
index cb24c6d999db..5c3598ec7456 100644
--- a/drivers/staging/tty/specialix.c
+++ b/drivers/staging/tty/specialix.c
@@ -978,7 +978,7 @@ static void sx_change_speed(struct specialix_board *bp,
978 spin_lock_irqsave(&bp->lock, flags); 978 spin_lock_irqsave(&bp->lock, flags);
979 sx_out(bp, CD186x_CAR, port_No(port)); 979 sx_out(bp, CD186x_CAR, port_No(port));
980 980
981 /* The Specialix board does't implement the RTS lines. 981 /* The Specialix board doesn't implement the RTS lines.
982 They are used to set the IRQ level. Don't touch them. */ 982 They are used to set the IRQ level. Don't touch them. */
983 if (sx_crtscts(tty)) 983 if (sx_crtscts(tty))
984 port->MSVR = MSVR_DTR | (sx_in(bp, CD186x_MSVR) & MSVR_RTS); 984 port->MSVR = MSVR_DTR | (sx_in(bp, CD186x_MSVR) & MSVR_RTS);
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 47f8cdb207f1..74273e638c0d 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -1658,8 +1658,12 @@ static void gsm_queue(struct gsm_mux *gsm)
1658 1658
1659 if ((gsm->control & ~PF) == UI) 1659 if ((gsm->control & ~PF) == UI)
1660 gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len); 1660 gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len);
1661 /* generate final CRC with received FCS */ 1661 if (gsm->encoding == 0){
1662 gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs); 1662 /* WARNING: gsm->received_fcs is used for gsm->encoding = 0 only.
1663 In this case it contain the last piece of data
1664 required to generate final CRC */
1665 gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs);
1666 }
1663 if (gsm->fcs != GOOD_FCS) { 1667 if (gsm->fcs != GOOD_FCS) {
1664 gsm->bad_fcs++; 1668 gsm->bad_fcs++;
1665 if (debug & 4) 1669 if (debug & 4)
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index cb36b0d4ef3c..62df72d9f0aa 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -382,12 +382,13 @@ static void imx_start_tx(struct uart_port *port)
382static irqreturn_t imx_rtsint(int irq, void *dev_id) 382static irqreturn_t imx_rtsint(int irq, void *dev_id)
383{ 383{
384 struct imx_port *sport = dev_id; 384 struct imx_port *sport = dev_id;
385 unsigned int val = readl(sport->port.membase + USR1) & USR1_RTSS; 385 unsigned int val;
386 unsigned long flags; 386 unsigned long flags;
387 387
388 spin_lock_irqsave(&sport->port.lock, flags); 388 spin_lock_irqsave(&sport->port.lock, flags);
389 389
390 writel(USR1_RTSD, sport->port.membase + USR1); 390 writel(USR1_RTSD, sport->port.membase + USR1);
391 val = readl(sport->port.membase + USR1) & USR1_RTSS;
391 uart_handle_cts_change(&sport->port, !!val); 392 uart_handle_cts_change(&sport->port, !!val);
392 wake_up_interruptible(&sport->port.state->port.delta_msr_wait); 393 wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
393 394
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 41b6e51188e4..006489d82dc3 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -66,6 +66,7 @@ config USB_ARCH_HAS_EHCI
66 default y if ARCH_VT8500 66 default y if ARCH_VT8500
67 default y if PLAT_SPEAR 67 default y if PLAT_SPEAR
68 default y if ARCH_MSM 68 default y if ARCH_MSM
69 default y if MICROBLAZE
69 default PCI 70 default PCI
70 71
71# ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface. 72# ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface.
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index a3d2e2399655..96fdfb815f89 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -221,7 +221,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
221 break; 221 break;
222 case USB_ENDPOINT_XFER_INT: 222 case USB_ENDPOINT_XFER_INT:
223 type = "Int."; 223 type = "Int.";
224 if (speed == USB_SPEED_HIGH) 224 if (speed == USB_SPEED_HIGH || speed == USB_SPEED_SUPER)
225 interval = 1 << (desc->bInterval - 1); 225 interval = 1 << (desc->bInterval - 1);
226 else 226 else
227 interval = desc->bInterval; 227 interval = desc->bInterval;
@@ -229,7 +229,8 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
229 default: /* "can't happen" */ 229 default: /* "can't happen" */
230 return start; 230 return start;
231 } 231 }
232 interval *= (speed == USB_SPEED_HIGH) ? 125 : 1000; 232 interval *= (speed == USB_SPEED_HIGH ||
233 speed == USB_SPEED_SUPER) ? 125 : 1000;
233 if (interval % 1000) 234 if (interval % 1000)
234 unit = 'u'; 235 unit = 'u';
235 else { 236 else {
@@ -542,8 +543,9 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
542 if (level == 0) { 543 if (level == 0) {
543 int max; 544 int max;
544 545
545 /* high speed reserves 80%, full/low reserves 90% */ 546 /* super/high speed reserves 80%, full/low reserves 90% */
546 if (usbdev->speed == USB_SPEED_HIGH) 547 if (usbdev->speed == USB_SPEED_HIGH ||
548 usbdev->speed == USB_SPEED_SUPER)
547 max = 800; 549 max = 800;
548 else 550 else
549 max = FRAME_TIME_MAX_USECS_ALLOC; 551 max = FRAME_TIME_MAX_USECS_ALLOC;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 8eed05d23838..77a7faec8d78 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1908,7 +1908,7 @@ void usb_free_streams(struct usb_interface *interface,
1908 1908
1909 /* Streams only apply to bulk endpoints. */ 1909 /* Streams only apply to bulk endpoints. */
1910 for (i = 0; i < num_eps; i++) 1910 for (i = 0; i < num_eps; i++)
1911 if (!usb_endpoint_xfer_bulk(&eps[i]->desc)) 1911 if (!eps[i] || !usb_endpoint_xfer_bulk(&eps[i]->desc))
1912 return; 1912 return;
1913 1913
1914 hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags); 1914 hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 8fb754916c67..93720bdc9efd 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2285,7 +2285,17 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
2285 } 2285 }
2286 2286
2287 /* see 7.1.7.6 */ 2287 /* see 7.1.7.6 */
2288 status = set_port_feature(hub->hdev, port1, USB_PORT_FEAT_SUSPEND); 2288 /* Clear PORT_POWER if it's a USB3.0 device connected to USB 3.0
2289 * external hub.
2290 * FIXME: this is a temporary workaround to make the system able
2291 * to suspend/resume.
2292 */
2293 if ((hub->hdev->parent != NULL) && hub_is_superspeed(hub->hdev))
2294 status = clear_port_feature(hub->hdev, port1,
2295 USB_PORT_FEAT_POWER);
2296 else
2297 status = set_port_feature(hub->hdev, port1,
2298 USB_PORT_FEAT_SUSPEND);
2289 if (status) { 2299 if (status) {
2290 dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n", 2300 dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
2291 port1, status); 2301 port1, status);
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c
index 9abecfddb27d..0111f8a9cf7f 100644
--- a/drivers/usb/gadget/f_audio.c
+++ b/drivers/usb/gadget/f_audio.c
@@ -706,6 +706,7 @@ f_audio_unbind(struct usb_configuration *c, struct usb_function *f)
706 struct f_audio *audio = func_to_audio(f); 706 struct f_audio *audio = func_to_audio(f);
707 707
708 usb_free_descriptors(f->descriptors); 708 usb_free_descriptors(f->descriptors);
709 usb_free_descriptors(f->hs_descriptors);
709 kfree(audio); 710 kfree(audio);
710} 711}
711 712
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c
index 95dd4662d6a8..b3c304290150 100644
--- a/drivers/usb/gadget/f_eem.c
+++ b/drivers/usb/gadget/f_eem.c
@@ -314,6 +314,9 @@ eem_unbind(struct usb_configuration *c, struct usb_function *f)
314 314
315static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req) 315static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req)
316{ 316{
317 struct sk_buff *skb = (struct sk_buff *)req->context;
318
319 dev_kfree_skb_any(skb);
317} 320}
318 321
319/* 322/*
@@ -428,10 +431,11 @@ static int eem_unwrap(struct gether *port,
428 skb_trim(skb2, len); 431 skb_trim(skb2, len);
429 put_unaligned_le16(BIT(15) | BIT(11) | len, 432 put_unaligned_le16(BIT(15) | BIT(11) | len,
430 skb_push(skb2, 2)); 433 skb_push(skb2, 2));
431 skb_copy_bits(skb, 0, req->buf, skb->len); 434 skb_copy_bits(skb2, 0, req->buf, skb2->len);
432 req->length = skb->len; 435 req->length = skb2->len;
433 req->complete = eem_cmd_complete; 436 req->complete = eem_cmd_complete;
434 req->zero = 1; 437 req->zero = 1;
438 req->context = skb2;
435 if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC)) 439 if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC))
436 DBG(cdev, "echo response queue fail\n"); 440 DBG(cdev, "echo response queue fail\n");
437 break; 441 break;
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index aee7e3c53c38..36613b37c504 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -1148,6 +1148,12 @@ static int qe_ep_tx(struct qe_ep *ep, struct qe_frame *frame)
1148static int txcomplete(struct qe_ep *ep, unsigned char restart) 1148static int txcomplete(struct qe_ep *ep, unsigned char restart)
1149{ 1149{
1150 if (ep->tx_req != NULL) { 1150 if (ep->tx_req != NULL) {
1151 struct qe_req *req = ep->tx_req;
1152 unsigned zlp = 0, last_len = 0;
1153
1154 last_len = min_t(unsigned, req->req.length - ep->sent,
1155 ep->ep.maxpacket);
1156
1151 if (!restart) { 1157 if (!restart) {
1152 int asent = ep->last; 1158 int asent = ep->last;
1153 ep->sent += asent; 1159 ep->sent += asent;
@@ -1156,9 +1162,18 @@ static int txcomplete(struct qe_ep *ep, unsigned char restart)
1156 ep->last = 0; 1162 ep->last = 0;
1157 } 1163 }
1158 1164
1165 /* zlp needed when req->re.zero is set */
1166 if (req->req.zero) {
1167 if (last_len == 0 ||
1168 (req->req.length % ep->ep.maxpacket) != 0)
1169 zlp = 0;
1170 else
1171 zlp = 1;
1172 } else
1173 zlp = 0;
1174
1159 /* a request already were transmitted completely */ 1175 /* a request already were transmitted completely */
1160 if ((ep->tx_req->req.length - ep->sent) <= 0) { 1176 if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) {
1161 ep->tx_req->req.actual = (unsigned int)ep->sent;
1162 done(ep, ep->tx_req, 0); 1177 done(ep, ep->tx_req, 0);
1163 ep->tx_req = NULL; 1178 ep->tx_req = NULL;
1164 ep->last = 0; 1179 ep->last = 0;
@@ -1191,6 +1206,7 @@ static int qe_usb_senddata(struct qe_ep *ep, struct qe_frame *frame)
1191 buf = (u8 *)ep->tx_req->req.buf + ep->sent; 1206 buf = (u8 *)ep->tx_req->req.buf + ep->sent;
1192 if (buf && size) { 1207 if (buf && size) {
1193 ep->last = size; 1208 ep->last = size;
1209 ep->tx_req->req.actual += size;
1194 frame_set_data(frame, buf); 1210 frame_set_data(frame, buf);
1195 frame_set_length(frame, size); 1211 frame_set_length(frame, size);
1196 frame_set_status(frame, FRAME_OK); 1212 frame_set_status(frame, FRAME_OK);
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 3ed73f49cf18..a01383f71f38 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -386,8 +386,10 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
386 386
387 /* halt any endpoint by doing a "wrong direction" i/o call */ 387 /* halt any endpoint by doing a "wrong direction" i/o call */
388 if (usb_endpoint_dir_in(&data->desc)) { 388 if (usb_endpoint_dir_in(&data->desc)) {
389 if (usb_endpoint_xfer_isoc(&data->desc)) 389 if (usb_endpoint_xfer_isoc(&data->desc)) {
390 mutex_unlock(&data->lock);
390 return -EINVAL; 391 return -EINVAL;
392 }
391 DBG (data->dev, "%s halt\n", data->name); 393 DBG (data->dev, "%s halt\n", data->name);
392 spin_lock_irq (&data->dev->lock); 394 spin_lock_irq (&data->dev->lock);
393 if (likely (data->ep != NULL)) 395 if (likely (data->ep != NULL))
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
index 3e4b35e50c24..68dbcc3e4cc2 100644
--- a/drivers/usb/gadget/pch_udc.c
+++ b/drivers/usb/gadget/pch_udc.c
@@ -1608,7 +1608,7 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1608 return -EINVAL; 1608 return -EINVAL;
1609 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN)) 1609 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1610 return -ESHUTDOWN; 1610 return -ESHUTDOWN;
1611 spin_lock_irqsave(&ep->dev->lock, iflags); 1611 spin_lock_irqsave(&dev->lock, iflags);
1612 /* map the buffer for dma */ 1612 /* map the buffer for dma */
1613 if (usbreq->length && 1613 if (usbreq->length &&
1614 ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) { 1614 ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
@@ -1625,8 +1625,10 @@ static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1625 DMA_FROM_DEVICE); 1625 DMA_FROM_DEVICE);
1626 } else { 1626 } else {
1627 req->buf = kzalloc(usbreq->length, GFP_ATOMIC); 1627 req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1628 if (!req->buf) 1628 if (!req->buf) {
1629 return -ENOMEM; 1629 retval = -ENOMEM;
1630 goto probe_end;
1631 }
1630 if (ep->in) { 1632 if (ep->in) {
1631 memcpy(req->buf, usbreq->buf, usbreq->length); 1633 memcpy(req->buf, usbreq->buf, usbreq->length);
1632 req->dma = dma_map_single(&dev->pdev->dev, 1634 req->dma = dma_map_single(&dev->pdev->dev,
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 015118535f77..6dcc1f68fa60 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1083,7 +1083,9 @@ static void irq_device_state(struct r8a66597 *r8a66597)
1083 1083
1084 if (dvsq == DS_DFLT) { 1084 if (dvsq == DS_DFLT) {
1085 /* bus reset */ 1085 /* bus reset */
1086 spin_unlock(&r8a66597->lock);
1086 r8a66597->driver->disconnect(&r8a66597->gadget); 1087 r8a66597->driver->disconnect(&r8a66597->gadget);
1088 spin_lock(&r8a66597->lock);
1087 r8a66597_update_usb_speed(r8a66597); 1089 r8a66597_update_usb_speed(r8a66597);
1088 } 1090 }
1089 if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG) 1091 if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG)
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 98ded66e8d3f..42abd0f603bf 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -1247,24 +1247,27 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
1247 1247
1248static void scan_async (struct ehci_hcd *ehci) 1248static void scan_async (struct ehci_hcd *ehci)
1249{ 1249{
1250 bool stopped;
1250 struct ehci_qh *qh; 1251 struct ehci_qh *qh;
1251 enum ehci_timer_action action = TIMER_IO_WATCHDOG; 1252 enum ehci_timer_action action = TIMER_IO_WATCHDOG;
1252 1253
1253 ehci->stamp = ehci_readl(ehci, &ehci->regs->frame_index); 1254 ehci->stamp = ehci_readl(ehci, &ehci->regs->frame_index);
1254 timer_action_done (ehci, TIMER_ASYNC_SHRINK); 1255 timer_action_done (ehci, TIMER_ASYNC_SHRINK);
1255rescan: 1256rescan:
1257 stopped = !HC_IS_RUNNING(ehci_to_hcd(ehci)->state);
1256 qh = ehci->async->qh_next.qh; 1258 qh = ehci->async->qh_next.qh;
1257 if (likely (qh != NULL)) { 1259 if (likely (qh != NULL)) {
1258 do { 1260 do {
1259 /* clean any finished work for this qh */ 1261 /* clean any finished work for this qh */
1260 if (!list_empty (&qh->qtd_list) 1262 if (!list_empty(&qh->qtd_list) && (stopped ||
1261 && qh->stamp != ehci->stamp) { 1263 qh->stamp != ehci->stamp)) {
1262 int temp; 1264 int temp;
1263 1265
1264 /* unlinks could happen here; completion 1266 /* unlinks could happen here; completion
1265 * reporting drops the lock. rescan using 1267 * reporting drops the lock. rescan using
1266 * the latest schedule, but don't rescan 1268 * the latest schedule, but don't rescan
1267 * qhs we already finished (no looping). 1269 * qhs we already finished (no looping)
1270 * unless the controller is stopped.
1268 */ 1271 */
1269 qh = qh_get (qh); 1272 qh = qh_get (qh);
1270 qh->stamp = ehci->stamp; 1273 qh->stamp = ehci->stamp;
@@ -1285,9 +1288,9 @@ rescan:
1285 */ 1288 */
1286 if (list_empty(&qh->qtd_list) 1289 if (list_empty(&qh->qtd_list)
1287 && qh->qh_state == QH_STATE_LINKED) { 1290 && qh->qh_state == QH_STATE_LINKED) {
1288 if (!ehci->reclaim 1291 if (!ehci->reclaim && (stopped ||
1289 && ((ehci->stamp - qh->stamp) & 0x1fff) 1292 ((ehci->stamp - qh->stamp) & 0x1fff)
1290 >= (EHCI_SHRINK_FRAMES * 8)) 1293 >= EHCI_SHRINK_FRAMES * 8))
1291 start_unlink_async(ehci, qh); 1294 start_unlink_async(ehci, qh);
1292 else 1295 else
1293 action = TIMER_ASYNC_SHRINK; 1296 action = TIMER_ASYNC_SHRINK;
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index f50e84ac570a..795345ad45e6 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -295,7 +295,7 @@ static void alloc_mem(struct usb_hcd *hcd, struct isp1760_qtd *qtd)
295 } 295 }
296 296
297 dev_err(hcd->self.controller, 297 dev_err(hcd->self.controller,
298 "%s: Can not allocate %lu bytes of memory\n" 298 "%s: Cannot allocate %zu bytes of memory\n"
299 "Current memory map:\n", 299 "Current memory map:\n",
300 __func__, qtd->length); 300 __func__, qtd->length);
301 for (i = 0; i < BLOCKS; i++) { 301 for (i = 0; i < BLOCKS; i++) {
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c
index 17a6043c1fa0..958d985f2951 100644
--- a/drivers/usb/host/ohci-au1xxx.c
+++ b/drivers/usb/host/ohci-au1xxx.c
@@ -33,7 +33,7 @@
33 33
34#ifdef __LITTLE_ENDIAN 34#ifdef __LITTLE_ENDIAN
35#define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C) 35#define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C)
36#elif __BIG_ENDIAN 36#elif defined(__BIG_ENDIAN)
37#define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C | \ 37#define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C | \
38 USBH_ENABLE_BE) 38 USBH_ENABLE_BE)
39#else 39#else
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 1d586d4f7b56..9b166d70ae91 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -84,65 +84,92 @@ int usb_amd_find_chipset_info(void)
84{ 84{
85 u8 rev = 0; 85 u8 rev = 0;
86 unsigned long flags; 86 unsigned long flags;
87 struct amd_chipset_info info;
88 int ret;
87 89
88 spin_lock_irqsave(&amd_lock, flags); 90 spin_lock_irqsave(&amd_lock, flags);
89 91
90 amd_chipset.probe_count++;
91 /* probe only once */ 92 /* probe only once */
92 if (amd_chipset.probe_count > 1) { 93 if (amd_chipset.probe_count > 0) {
94 amd_chipset.probe_count++;
93 spin_unlock_irqrestore(&amd_lock, flags); 95 spin_unlock_irqrestore(&amd_lock, flags);
94 return amd_chipset.probe_result; 96 return amd_chipset.probe_result;
95 } 97 }
98 memset(&info, 0, sizeof(info));
99 spin_unlock_irqrestore(&amd_lock, flags);
96 100
97 amd_chipset.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL); 101 info.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
98 if (amd_chipset.smbus_dev) { 102 if (info.smbus_dev) {
99 rev = amd_chipset.smbus_dev->revision; 103 rev = info.smbus_dev->revision;
100 if (rev >= 0x40) 104 if (rev >= 0x40)
101 amd_chipset.sb_type = 1; 105 info.sb_type = 1;
102 else if (rev >= 0x30 && rev <= 0x3b) 106 else if (rev >= 0x30 && rev <= 0x3b)
103 amd_chipset.sb_type = 3; 107 info.sb_type = 3;
104 } else { 108 } else {
105 amd_chipset.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 109 info.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
106 0x780b, NULL); 110 0x780b, NULL);
107 if (!amd_chipset.smbus_dev) { 111 if (!info.smbus_dev) {
108 spin_unlock_irqrestore(&amd_lock, flags); 112 ret = 0;
109 return 0; 113 goto commit;
110 } 114 }
111 rev = amd_chipset.smbus_dev->revision; 115
116 rev = info.smbus_dev->revision;
112 if (rev >= 0x11 && rev <= 0x18) 117 if (rev >= 0x11 && rev <= 0x18)
113 amd_chipset.sb_type = 2; 118 info.sb_type = 2;
114 } 119 }
115 120
116 if (amd_chipset.sb_type == 0) { 121 if (info.sb_type == 0) {
117 if (amd_chipset.smbus_dev) { 122 if (info.smbus_dev) {
118 pci_dev_put(amd_chipset.smbus_dev); 123 pci_dev_put(info.smbus_dev);
119 amd_chipset.smbus_dev = NULL; 124 info.smbus_dev = NULL;
120 } 125 }
121 spin_unlock_irqrestore(&amd_lock, flags); 126 ret = 0;
122 return 0; 127 goto commit;
123 } 128 }
124 129
125 amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL); 130 info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
126 if (amd_chipset.nb_dev) { 131 if (info.nb_dev) {
127 amd_chipset.nb_type = 1; 132 info.nb_type = 1;
128 } else { 133 } else {
129 amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 134 info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
130 0x1510, NULL); 135 if (info.nb_dev) {
131 if (amd_chipset.nb_dev) { 136 info.nb_type = 2;
132 amd_chipset.nb_type = 2; 137 } else {
133 } else { 138 info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
134 amd_chipset.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 139 0x9600, NULL);
135 0x9600, NULL); 140 if (info.nb_dev)
136 if (amd_chipset.nb_dev) 141 info.nb_type = 3;
137 amd_chipset.nb_type = 3;
138 } 142 }
139 } 143 }
140 144
141 amd_chipset.probe_result = 1; 145 ret = info.probe_result = 1;
142 printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n"); 146 printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
143 147
144 spin_unlock_irqrestore(&amd_lock, flags); 148commit:
145 return amd_chipset.probe_result; 149
150 spin_lock_irqsave(&amd_lock, flags);
151 if (amd_chipset.probe_count > 0) {
152 /* race - someone else was faster - drop devices */
153
154 /* Mark that we where here */
155 amd_chipset.probe_count++;
156 ret = amd_chipset.probe_result;
157
158 spin_unlock_irqrestore(&amd_lock, flags);
159
160 if (info.nb_dev)
161 pci_dev_put(info.nb_dev);
162 if (info.smbus_dev)
163 pci_dev_put(info.smbus_dev);
164
165 } else {
166 /* no race - commit the result */
167 info.probe_count++;
168 amd_chipset = info;
169 spin_unlock_irqrestore(&amd_lock, flags);
170 }
171
172 return ret;
146} 173}
147EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info); 174EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
148 175
@@ -284,6 +311,7 @@ EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
284 311
285void usb_amd_dev_put(void) 312void usb_amd_dev_put(void)
286{ 313{
314 struct pci_dev *nb, *smbus;
287 unsigned long flags; 315 unsigned long flags;
288 316
289 spin_lock_irqsave(&amd_lock, flags); 317 spin_lock_irqsave(&amd_lock, flags);
@@ -294,20 +322,23 @@ void usb_amd_dev_put(void)
294 return; 322 return;
295 } 323 }
296 324
297 if (amd_chipset.nb_dev) { 325 /* save them to pci_dev_put outside of spinlock */
298 pci_dev_put(amd_chipset.nb_dev); 326 nb = amd_chipset.nb_dev;
299 amd_chipset.nb_dev = NULL; 327 smbus = amd_chipset.smbus_dev;
300 } 328
301 if (amd_chipset.smbus_dev) { 329 amd_chipset.nb_dev = NULL;
302 pci_dev_put(amd_chipset.smbus_dev); 330 amd_chipset.smbus_dev = NULL;
303 amd_chipset.smbus_dev = NULL;
304 }
305 amd_chipset.nb_type = 0; 331 amd_chipset.nb_type = 0;
306 amd_chipset.sb_type = 0; 332 amd_chipset.sb_type = 0;
307 amd_chipset.isoc_reqs = 0; 333 amd_chipset.isoc_reqs = 0;
308 amd_chipset.probe_result = 0; 334 amd_chipset.probe_result = 0;
309 335
310 spin_unlock_irqrestore(&amd_lock, flags); 336 spin_unlock_irqrestore(&amd_lock, flags);
337
338 if (nb)
339 pci_dev_put(nb);
340 if (smbus)
341 pci_dev_put(smbus);
311} 342}
312EXPORT_SYMBOL_GPL(usb_amd_dev_put); 343EXPORT_SYMBOL_GPL(usb_amd_dev_put);
313 344
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index a003e79aacdc..627f3438028c 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -846,7 +846,7 @@ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
846 * Skip ports that don't have known speeds, or have duplicate 846 * Skip ports that don't have known speeds, or have duplicate
847 * Extended Capabilities port speed entries. 847 * Extended Capabilities port speed entries.
848 */ 848 */
849 if (port_speed == 0 || port_speed == -1) 849 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
850 continue; 850 continue;
851 851
852 /* 852 /*
@@ -974,6 +974,47 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
974 return 0; 974 return 0;
975} 975}
976 976
977/*
978 * Convert interval expressed as 2^(bInterval - 1) == interval into
979 * straight exponent value 2^n == interval.
980 *
981 */
982static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
983 struct usb_host_endpoint *ep)
984{
985 unsigned int interval;
986
987 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
988 if (interval != ep->desc.bInterval - 1)
989 dev_warn(&udev->dev,
990 "ep %#x - rounding interval to %d microframes\n",
991 ep->desc.bEndpointAddress,
992 1 << interval);
993
994 return interval;
995}
996
997/*
998 * Convert bInterval expressed in frames (in 1-255 range) to exponent of
999 * microframes, rounded down to nearest power of 2.
1000 */
1001static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1002 struct usb_host_endpoint *ep)
1003{
1004 unsigned int interval;
1005
1006 interval = fls(8 * ep->desc.bInterval) - 1;
1007 interval = clamp_val(interval, 3, 10);
1008 if ((1 << interval) != 8 * ep->desc.bInterval)
1009 dev_warn(&udev->dev,
1010 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1011 ep->desc.bEndpointAddress,
1012 1 << interval,
1013 8 * ep->desc.bInterval);
1014
1015 return interval;
1016}
1017
977/* Return the polling or NAK interval. 1018/* Return the polling or NAK interval.
978 * 1019 *
979 * The polling interval is expressed in "microframes". If xHCI's Interval field 1020 * The polling interval is expressed in "microframes". If xHCI's Interval field
@@ -982,7 +1023,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
982 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval 1023 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
983 * is set to 0. 1024 * is set to 0.
984 */ 1025 */
985static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, 1026static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
986 struct usb_host_endpoint *ep) 1027 struct usb_host_endpoint *ep)
987{ 1028{
988 unsigned int interval = 0; 1029 unsigned int interval = 0;
@@ -991,45 +1032,38 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
991 case USB_SPEED_HIGH: 1032 case USB_SPEED_HIGH:
992 /* Max NAK rate */ 1033 /* Max NAK rate */
993 if (usb_endpoint_xfer_control(&ep->desc) || 1034 if (usb_endpoint_xfer_control(&ep->desc) ||
994 usb_endpoint_xfer_bulk(&ep->desc)) 1035 usb_endpoint_xfer_bulk(&ep->desc)) {
995 interval = ep->desc.bInterval; 1036 interval = ep->desc.bInterval;
1037 break;
1038 }
996 /* Fall through - SS and HS isoc/int have same decoding */ 1039 /* Fall through - SS and HS isoc/int have same decoding */
1040
997 case USB_SPEED_SUPER: 1041 case USB_SPEED_SUPER:
998 if (usb_endpoint_xfer_int(&ep->desc) || 1042 if (usb_endpoint_xfer_int(&ep->desc) ||
999 usb_endpoint_xfer_isoc(&ep->desc)) { 1043 usb_endpoint_xfer_isoc(&ep->desc)) {
1000 if (ep->desc.bInterval == 0) 1044 interval = xhci_parse_exponent_interval(udev, ep);
1001 interval = 0;
1002 else
1003 interval = ep->desc.bInterval - 1;
1004 if (interval > 15)
1005 interval = 15;
1006 if (interval != ep->desc.bInterval + 1)
1007 dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
1008 ep->desc.bEndpointAddress, 1 << interval);
1009 } 1045 }
1010 break; 1046 break;
1011 /* Convert bInterval (in 1-255 frames) to microframes and round down to 1047
1012 * nearest power of 2.
1013 */
1014 case USB_SPEED_FULL: 1048 case USB_SPEED_FULL:
1049 if (usb_endpoint_xfer_int(&ep->desc)) {
1050 interval = xhci_parse_exponent_interval(udev, ep);
1051 break;
1052 }
1053 /*
1054 * Fall through for isochronous endpoint interval decoding
1055 * since it uses the same rules as low speed interrupt
1056 * endpoints.
1057 */
1058
1015 case USB_SPEED_LOW: 1059 case USB_SPEED_LOW:
1016 if (usb_endpoint_xfer_int(&ep->desc) || 1060 if (usb_endpoint_xfer_int(&ep->desc) ||
1017 usb_endpoint_xfer_isoc(&ep->desc)) { 1061 usb_endpoint_xfer_isoc(&ep->desc)) {
1018 interval = fls(8*ep->desc.bInterval) - 1; 1062
1019 if (interval > 10) 1063 interval = xhci_parse_frame_interval(udev, ep);
1020 interval = 10;
1021 if (interval < 3)
1022 interval = 3;
1023 if ((1 << interval) != 8*ep->desc.bInterval)
1024 dev_warn(&udev->dev,
1025 "ep %#x - rounding interval"
1026 " to %d microframes, "
1027 "ep desc says %d microframes\n",
1028 ep->desc.bEndpointAddress,
1029 1 << interval,
1030 8*ep->desc.bInterval);
1031 } 1064 }
1032 break; 1065 break;
1066
1033 default: 1067 default:
1034 BUG(); 1068 BUG();
1035 } 1069 }
@@ -1041,7 +1075,7 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1041 * transaction opportunities per microframe", but that goes in the Max Burst 1075 * transaction opportunities per microframe", but that goes in the Max Burst
1042 * endpoint context field. 1076 * endpoint context field.
1043 */ 1077 */
1044static inline u32 xhci_get_endpoint_mult(struct usb_device *udev, 1078static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1045 struct usb_host_endpoint *ep) 1079 struct usb_host_endpoint *ep)
1046{ 1080{
1047 if (udev->speed != USB_SPEED_SUPER || 1081 if (udev->speed != USB_SPEED_SUPER ||
@@ -1050,7 +1084,7 @@ static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
1050 return ep->ss_ep_comp.bmAttributes; 1084 return ep->ss_ep_comp.bmAttributes;
1051} 1085}
1052 1086
1053static inline u32 xhci_get_endpoint_type(struct usb_device *udev, 1087static u32 xhci_get_endpoint_type(struct usb_device *udev,
1054 struct usb_host_endpoint *ep) 1088 struct usb_host_endpoint *ep)
1055{ 1089{
1056 int in; 1090 int in;
@@ -1084,7 +1118,7 @@ static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
1084 * Basically, this is the maxpacket size, multiplied by the burst size 1118 * Basically, this is the maxpacket size, multiplied by the burst size
1085 * and mult size. 1119 * and mult size.
1086 */ 1120 */
1087static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, 1121static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
1088 struct usb_device *udev, 1122 struct usb_device *udev,
1089 struct usb_host_endpoint *ep) 1123 struct usb_host_endpoint *ep)
1090{ 1124{
@@ -1727,12 +1761,12 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
1727 * found a similar duplicate. 1761 * found a similar duplicate.
1728 */ 1762 */
1729 if (xhci->port_array[i] != major_revision && 1763 if (xhci->port_array[i] != major_revision &&
1730 xhci->port_array[i] != (u8) -1) { 1764 xhci->port_array[i] != DUPLICATE_ENTRY) {
1731 if (xhci->port_array[i] == 0x03) 1765 if (xhci->port_array[i] == 0x03)
1732 xhci->num_usb3_ports--; 1766 xhci->num_usb3_ports--;
1733 else 1767 else
1734 xhci->num_usb2_ports--; 1768 xhci->num_usb2_ports--;
1735 xhci->port_array[i] = (u8) -1; 1769 xhci->port_array[i] = DUPLICATE_ENTRY;
1736 } 1770 }
1737 /* FIXME: Should we disable the port? */ 1771 /* FIXME: Should we disable the port? */
1738 continue; 1772 continue;
@@ -1831,7 +1865,7 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
1831 for (i = 0; i < num_ports; i++) { 1865 for (i = 0; i < num_ports; i++) {
1832 if (xhci->port_array[i] == 0x03 || 1866 if (xhci->port_array[i] == 0x03 ||
1833 xhci->port_array[i] == 0 || 1867 xhci->port_array[i] == 0 ||
1834 xhci->port_array[i] == -1) 1868 xhci->port_array[i] == DUPLICATE_ENTRY)
1835 continue; 1869 continue;
1836 1870
1837 xhci->usb2_ports[port_index] = 1871 xhci->usb2_ports[port_index] =
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index ceea9f33491c..a10494c2f3c7 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -114,6 +114,10 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
114 if (pdev->vendor == PCI_VENDOR_ID_NEC) 114 if (pdev->vendor == PCI_VENDOR_ID_NEC)
115 xhci->quirks |= XHCI_NEC_HOST; 115 xhci->quirks |= XHCI_NEC_HOST;
116 116
117 /* AMD PLL quirk */
118 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
119 xhci->quirks |= XHCI_AMD_PLL_FIX;
120
117 /* Make sure the HC is halted. */ 121 /* Make sure the HC is halted. */
118 retval = xhci_halt(xhci); 122 retval = xhci_halt(xhci);
119 if (retval) 123 if (retval)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index cfc1ad92473f..7437386a9a50 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -93,7 +93,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
93/* Does this link TRB point to the first segment in a ring, 93/* Does this link TRB point to the first segment in a ring,
94 * or was the previous TRB the last TRB on the last segment in the ERST? 94 * or was the previous TRB the last TRB on the last segment in the ERST?
95 */ 95 */
96static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, 96static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
97 struct xhci_segment *seg, union xhci_trb *trb) 97 struct xhci_segment *seg, union xhci_trb *trb)
98{ 98{
99 if (ring == xhci->event_ring) 99 if (ring == xhci->event_ring)
@@ -107,7 +107,7 @@ static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring
107 * segment? I.e. would the updated event TRB pointer step off the end of the 107 * segment? I.e. would the updated event TRB pointer step off the end of the
108 * event seg? 108 * event seg?
109 */ 109 */
110static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, 110static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
111 struct xhci_segment *seg, union xhci_trb *trb) 111 struct xhci_segment *seg, union xhci_trb *trb)
112{ 112{
113 if (ring == xhci->event_ring) 113 if (ring == xhci->event_ring)
@@ -116,7 +116,7 @@ static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
116 return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK); 116 return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
117} 117}
118 118
119static inline int enqueue_is_link_trb(struct xhci_ring *ring) 119static int enqueue_is_link_trb(struct xhci_ring *ring)
120{ 120{
121 struct xhci_link_trb *link = &ring->enqueue->link; 121 struct xhci_link_trb *link = &ring->enqueue->link;
122 return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)); 122 return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK));
@@ -592,7 +592,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
592 ep->ep_state |= SET_DEQ_PENDING; 592 ep->ep_state |= SET_DEQ_PENDING;
593} 593}
594 594
595static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci, 595static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
596 struct xhci_virt_ep *ep) 596 struct xhci_virt_ep *ep)
597{ 597{
598 ep->ep_state &= ~EP_HALT_PENDING; 598 ep->ep_state &= ~EP_HALT_PENDING;
@@ -619,6 +619,13 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
619 619
620 /* Only giveback urb when this is the last td in urb */ 620 /* Only giveback urb when this is the last td in urb */
621 if (urb_priv->td_cnt == urb_priv->length) { 621 if (urb_priv->td_cnt == urb_priv->length) {
622 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
623 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
624 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
625 if (xhci->quirks & XHCI_AMD_PLL_FIX)
626 usb_amd_quirk_pll_enable();
627 }
628 }
622 usb_hcd_unlink_urb_from_ep(hcd, urb); 629 usb_hcd_unlink_urb_from_ep(hcd, urb);
623 xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb); 630 xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb);
624 631
@@ -1209,7 +1216,7 @@ static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
1209 * Skip ports that don't have known speeds, or have duplicate 1216 * Skip ports that don't have known speeds, or have duplicate
1210 * Extended Capabilities port speed entries. 1217 * Extended Capabilities port speed entries.
1211 */ 1218 */
1212 if (port_speed == 0 || port_speed == -1) 1219 if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
1213 continue; 1220 continue;
1214 1221
1215 /* 1222 /*
@@ -1235,6 +1242,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
1235 u8 major_revision; 1242 u8 major_revision;
1236 struct xhci_bus_state *bus_state; 1243 struct xhci_bus_state *bus_state;
1237 u32 __iomem **port_array; 1244 u32 __iomem **port_array;
1245 bool bogus_port_status = false;
1238 1246
1239 /* Port status change events always have a successful completion code */ 1247 /* Port status change events always have a successful completion code */
1240 if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) { 1248 if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
@@ -1247,6 +1255,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
1247 max_ports = HCS_MAX_PORTS(xhci->hcs_params1); 1255 max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1248 if ((port_id <= 0) || (port_id > max_ports)) { 1256 if ((port_id <= 0) || (port_id > max_ports)) {
1249 xhci_warn(xhci, "Invalid port id %d\n", port_id); 1257 xhci_warn(xhci, "Invalid port id %d\n", port_id);
1258 bogus_port_status = true;
1250 goto cleanup; 1259 goto cleanup;
1251 } 1260 }
1252 1261
@@ -1258,12 +1267,14 @@ static void handle_port_status(struct xhci_hcd *xhci,
1258 xhci_warn(xhci, "Event for port %u not in " 1267 xhci_warn(xhci, "Event for port %u not in "
1259 "Extended Capabilities, ignoring.\n", 1268 "Extended Capabilities, ignoring.\n",
1260 port_id); 1269 port_id);
1270 bogus_port_status = true;
1261 goto cleanup; 1271 goto cleanup;
1262 } 1272 }
1263 if (major_revision == (u8) -1) { 1273 if (major_revision == DUPLICATE_ENTRY) {
1264 xhci_warn(xhci, "Event for port %u duplicated in" 1274 xhci_warn(xhci, "Event for port %u duplicated in"
1265 "Extended Capabilities, ignoring.\n", 1275 "Extended Capabilities, ignoring.\n",
1266 port_id); 1276 port_id);
1277 bogus_port_status = true;
1267 goto cleanup; 1278 goto cleanup;
1268 } 1279 }
1269 1280
@@ -1335,6 +1346,13 @@ cleanup:
1335 /* Update event ring dequeue pointer before dropping the lock */ 1346 /* Update event ring dequeue pointer before dropping the lock */
1336 inc_deq(xhci, xhci->event_ring, true); 1347 inc_deq(xhci, xhci->event_ring, true);
1337 1348
1349 /* Don't make the USB core poll the roothub if we got a bad port status
1350 * change event. Besides, at that point we can't tell which roothub
1351 * (USB 2.0 or USB 3.0) to kick.
1352 */
1353 if (bogus_port_status)
1354 return;
1355
1338 spin_unlock(&xhci->lock); 1356 spin_unlock(&xhci->lock);
1339 /* Pass this up to the core */ 1357 /* Pass this up to the core */
1340 usb_hcd_poll_rh_status(hcd); 1358 usb_hcd_poll_rh_status(hcd);
@@ -1554,8 +1572,17 @@ td_cleanup:
1554 1572
1555 urb_priv->td_cnt++; 1573 urb_priv->td_cnt++;
1556 /* Giveback the urb when all the tds are completed */ 1574 /* Giveback the urb when all the tds are completed */
1557 if (urb_priv->td_cnt == urb_priv->length) 1575 if (urb_priv->td_cnt == urb_priv->length) {
1558 ret = 1; 1576 ret = 1;
1577 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
1578 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
1579 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
1580 == 0) {
1581 if (xhci->quirks & XHCI_AMD_PLL_FIX)
1582 usb_amd_quirk_pll_enable();
1583 }
1584 }
1585 }
1559 } 1586 }
1560 1587
1561 return ret; 1588 return ret;
@@ -1675,71 +1702,52 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1675 struct urb_priv *urb_priv; 1702 struct urb_priv *urb_priv;
1676 int idx; 1703 int idx;
1677 int len = 0; 1704 int len = 0;
1678 int skip_td = 0;
1679 union xhci_trb *cur_trb; 1705 union xhci_trb *cur_trb;
1680 struct xhci_segment *cur_seg; 1706 struct xhci_segment *cur_seg;
1707 struct usb_iso_packet_descriptor *frame;
1681 u32 trb_comp_code; 1708 u32 trb_comp_code;
1709 bool skip_td = false;
1682 1710
1683 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); 1711 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
1684 trb_comp_code = GET_COMP_CODE(event->transfer_len); 1712 trb_comp_code = GET_COMP_CODE(event->transfer_len);
1685 urb_priv = td->urb->hcpriv; 1713 urb_priv = td->urb->hcpriv;
1686 idx = urb_priv->td_cnt; 1714 idx = urb_priv->td_cnt;
1715 frame = &td->urb->iso_frame_desc[idx];
1687 1716
1688 if (ep->skip) { 1717 /* handle completion code */
1689 /* The transfer is partly done */ 1718 switch (trb_comp_code) {
1690 *status = -EXDEV; 1719 case COMP_SUCCESS:
1691 td->urb->iso_frame_desc[idx].status = -EXDEV; 1720 frame->status = 0;
1692 } else { 1721 xhci_dbg(xhci, "Successful isoc transfer!\n");
1693 /* handle completion code */ 1722 break;
1694 switch (trb_comp_code) { 1723 case COMP_SHORT_TX:
1695 case COMP_SUCCESS: 1724 frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
1696 td->urb->iso_frame_desc[idx].status = 0; 1725 -EREMOTEIO : 0;
1697 xhci_dbg(xhci, "Successful isoc transfer!\n"); 1726 break;
1698 break; 1727 case COMP_BW_OVER:
1699 case COMP_SHORT_TX: 1728 frame->status = -ECOMM;
1700 if (td->urb->transfer_flags & URB_SHORT_NOT_OK) 1729 skip_td = true;
1701 td->urb->iso_frame_desc[idx].status = 1730 break;
1702 -EREMOTEIO; 1731 case COMP_BUFF_OVER:
1703 else 1732 case COMP_BABBLE:
1704 td->urb->iso_frame_desc[idx].status = 0; 1733 frame->status = -EOVERFLOW;
1705 break; 1734 skip_td = true;
1706 case COMP_BW_OVER: 1735 break;
1707 td->urb->iso_frame_desc[idx].status = -ECOMM; 1736 case COMP_STALL:
1708 skip_td = 1; 1737 frame->status = -EPROTO;
1709 break; 1738 skip_td = true;
1710 case COMP_BUFF_OVER: 1739 break;
1711 case COMP_BABBLE: 1740 case COMP_STOP:
1712 td->urb->iso_frame_desc[idx].status = -EOVERFLOW; 1741 case COMP_STOP_INVAL:
1713 skip_td = 1; 1742 break;
1714 break; 1743 default:
1715 case COMP_STALL: 1744 frame->status = -1;
1716 td->urb->iso_frame_desc[idx].status = -EPROTO; 1745 break;
1717 skip_td = 1;
1718 break;
1719 case COMP_STOP:
1720 case COMP_STOP_INVAL:
1721 break;
1722 default:
1723 td->urb->iso_frame_desc[idx].status = -1;
1724 break;
1725 }
1726 }
1727
1728 /* calc actual length */
1729 if (ep->skip) {
1730 td->urb->iso_frame_desc[idx].actual_length = 0;
1731 /* Update ring dequeue pointer */
1732 while (ep_ring->dequeue != td->last_trb)
1733 inc_deq(xhci, ep_ring, false);
1734 inc_deq(xhci, ep_ring, false);
1735 return finish_td(xhci, td, event_trb, event, ep, status, true);
1736 } 1746 }
1737 1747
1738 if (trb_comp_code == COMP_SUCCESS || skip_td == 1) { 1748 if (trb_comp_code == COMP_SUCCESS || skip_td) {
1739 td->urb->iso_frame_desc[idx].actual_length = 1749 frame->actual_length = frame->length;
1740 td->urb->iso_frame_desc[idx].length; 1750 td->urb->actual_length += frame->length;
1741 td->urb->actual_length +=
1742 td->urb->iso_frame_desc[idx].length;
1743 } else { 1751 } else {
1744 for (cur_trb = ep_ring->dequeue, 1752 for (cur_trb = ep_ring->dequeue,
1745 cur_seg = ep_ring->deq_seg; cur_trb != event_trb; 1753 cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
@@ -1755,7 +1763,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1755 TRB_LEN(event->transfer_len); 1763 TRB_LEN(event->transfer_len);
1756 1764
1757 if (trb_comp_code != COMP_STOP_INVAL) { 1765 if (trb_comp_code != COMP_STOP_INVAL) {
1758 td->urb->iso_frame_desc[idx].actual_length = len; 1766 frame->actual_length = len;
1759 td->urb->actual_length += len; 1767 td->urb->actual_length += len;
1760 } 1768 }
1761 } 1769 }
@@ -1766,6 +1774,35 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1766 return finish_td(xhci, td, event_trb, event, ep, status, false); 1774 return finish_td(xhci, td, event_trb, event, ep, status, false);
1767} 1775}
1768 1776
1777static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1778 struct xhci_transfer_event *event,
1779 struct xhci_virt_ep *ep, int *status)
1780{
1781 struct xhci_ring *ep_ring;
1782 struct urb_priv *urb_priv;
1783 struct usb_iso_packet_descriptor *frame;
1784 int idx;
1785
1786 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
1787 urb_priv = td->urb->hcpriv;
1788 idx = urb_priv->td_cnt;
1789 frame = &td->urb->iso_frame_desc[idx];
1790
1791 /* The transfer is partly done */
1792 *status = -EXDEV;
1793 frame->status = -EXDEV;
1794
1795 /* calc actual length */
1796 frame->actual_length = 0;
1797
1798 /* Update ring dequeue pointer */
1799 while (ep_ring->dequeue != td->last_trb)
1800 inc_deq(xhci, ep_ring, false);
1801 inc_deq(xhci, ep_ring, false);
1802
1803 return finish_td(xhci, td, NULL, event, ep, status, true);
1804}
1805
1769/* 1806/*
1770 * Process bulk and interrupt tds, update urb status and actual_length. 1807 * Process bulk and interrupt tds, update urb status and actual_length.
1771 */ 1808 */
@@ -2024,36 +2061,42 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2024 } 2061 }
2025 2062
2026 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); 2063 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
2064
2027 /* Is this a TRB in the currently executing TD? */ 2065 /* Is this a TRB in the currently executing TD? */
2028 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, 2066 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
2029 td->last_trb, event_dma); 2067 td->last_trb, event_dma);
2030 if (event_seg && ep->skip) { 2068 if (!event_seg) {
2069 if (!ep->skip ||
2070 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2071 /* HC is busted, give up! */
2072 xhci_err(xhci,
2073 "ERROR Transfer event TRB DMA ptr not "
2074 "part of current TD\n");
2075 return -ESHUTDOWN;
2076 }
2077
2078 ret = skip_isoc_td(xhci, td, event, ep, &status);
2079 goto cleanup;
2080 }
2081
2082 if (ep->skip) {
2031 xhci_dbg(xhci, "Found td. Clear skip flag.\n"); 2083 xhci_dbg(xhci, "Found td. Clear skip flag.\n");
2032 ep->skip = false; 2084 ep->skip = false;
2033 } 2085 }
2034 if (!event_seg &&
2035 (!ep->skip || !usb_endpoint_xfer_isoc(&td->urb->ep->desc))) {
2036 /* HC is busted, give up! */
2037 xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not "
2038 "part of current TD\n");
2039 return -ESHUTDOWN;
2040 }
2041 2086
2042 if (event_seg) { 2087 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
2043 event_trb = &event_seg->trbs[(event_dma - 2088 sizeof(*event_trb)];
2044 event_seg->dma) / sizeof(*event_trb)]; 2089 /*
2045 /* 2090 * No-op TRB should not trigger interrupts.
2046 * No-op TRB should not trigger interrupts. 2091 * If event_trb is a no-op TRB, it means the
2047 * If event_trb is a no-op TRB, it means the 2092 * corresponding TD has been cancelled. Just ignore
2048 * corresponding TD has been cancelled. Just ignore 2093 * the TD.
2049 * the TD. 2094 */
2050 */ 2095 if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK)
2051 if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK) 2096 == TRB_TYPE(TRB_TR_NOOP)) {
2052 == TRB_TYPE(TRB_TR_NOOP)) { 2097 xhci_dbg(xhci,
2053 xhci_dbg(xhci, "event_trb is a no-op TRB. " 2098 "event_trb is a no-op TRB. Skip it\n");
2054 "Skip it\n"); 2099 goto cleanup;
2055 goto cleanup;
2056 }
2057 } 2100 }
2058 2101
2059 /* Now update the urb's actual_length and give back to 2102 /* Now update the urb's actual_length and give back to
@@ -3126,6 +3169,12 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3126 } 3169 }
3127 } 3170 }
3128 3171
3172 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
3173 if (xhci->quirks & XHCI_AMD_PLL_FIX)
3174 usb_amd_quirk_pll_disable();
3175 }
3176 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
3177
3129 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 3178 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3130 start_cycle, start_trb); 3179 start_cycle, start_trb);
3131 return 0; 3180 return 0;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 196e0181b2ed..81b976e45880 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -550,6 +550,9 @@ void xhci_stop(struct usb_hcd *hcd)
550 del_timer_sync(&xhci->event_ring_timer); 550 del_timer_sync(&xhci->event_ring_timer);
551#endif 551#endif
552 552
553 if (xhci->quirks & XHCI_AMD_PLL_FIX)
554 usb_amd_dev_put();
555
553 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); 556 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
554 temp = xhci_readl(xhci, &xhci->op_regs->status); 557 temp = xhci_readl(xhci, &xhci->op_regs->status);
555 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); 558 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
@@ -771,7 +774,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
771 774
772 /* If restore operation fails, re-initialize the HC during resume */ 775 /* If restore operation fails, re-initialize the HC during resume */
773 if ((temp & STS_SRE) || hibernated) { 776 if ((temp & STS_SRE) || hibernated) {
774 usb_root_hub_lost_power(hcd->self.root_hub); 777 /* Let the USB core know _both_ roothubs lost power. */
778 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
779 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
775 780
776 xhci_dbg(xhci, "Stop HCD\n"); 781 xhci_dbg(xhci, "Stop HCD\n");
777 xhci_halt(xhci); 782 xhci_halt(xhci);
@@ -2386,10 +2391,18 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
2386 /* Everything but endpoint 0 is disabled, so free or cache the rings. */ 2391 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
2387 last_freed_endpoint = 1; 2392 last_freed_endpoint = 1;
2388 for (i = 1; i < 31; ++i) { 2393 for (i = 1; i < 31; ++i) {
2389 if (!virt_dev->eps[i].ring) 2394 struct xhci_virt_ep *ep = &virt_dev->eps[i];
2390 continue; 2395
2391 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 2396 if (ep->ep_state & EP_HAS_STREAMS) {
2392 last_freed_endpoint = i; 2397 xhci_free_stream_info(xhci, ep->stream_info);
2398 ep->stream_info = NULL;
2399 ep->ep_state &= ~EP_HAS_STREAMS;
2400 }
2401
2402 if (ep->ring) {
2403 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2404 last_freed_endpoint = i;
2405 }
2393 } 2406 }
2394 xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); 2407 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
2395 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); 2408 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 07e263063e37..ba1be6b7cc6d 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -30,6 +30,7 @@
30 30
31/* Code sharing between pci-quirks and xhci hcd */ 31/* Code sharing between pci-quirks and xhci hcd */
32#include "xhci-ext-caps.h" 32#include "xhci-ext-caps.h"
33#include "pci-quirks.h"
33 34
34/* xHCI PCI Configuration Registers */ 35/* xHCI PCI Configuration Registers */
35#define XHCI_SBRN_OFFSET (0x60) 36#define XHCI_SBRN_OFFSET (0x60)
@@ -232,7 +233,7 @@ struct xhci_op_regs {
232 * notification type that matches a bit set in this bit field. 233 * notification type that matches a bit set in this bit field.
233 */ 234 */
234#define DEV_NOTE_MASK (0xffff) 235#define DEV_NOTE_MASK (0xffff)
235#define ENABLE_DEV_NOTE(x) (1 << x) 236#define ENABLE_DEV_NOTE(x) (1 << (x))
236/* Most of the device notification types should only be used for debug. 237/* Most of the device notification types should only be used for debug.
237 * SW does need to pay attention to function wake notifications. 238 * SW does need to pay attention to function wake notifications.
238 */ 239 */
@@ -348,6 +349,9 @@ struct xhci_op_regs {
348/* Initiate a warm port reset - complete when PORT_WRC is '1' */ 349/* Initiate a warm port reset - complete when PORT_WRC is '1' */
349#define PORT_WR (1 << 31) 350#define PORT_WR (1 << 31)
350 351
352/* We mark duplicate entries with -1 */
353#define DUPLICATE_ENTRY ((u8)(-1))
354
351/* Port Power Management Status and Control - port_power_base bitmasks */ 355/* Port Power Management Status and Control - port_power_base bitmasks */
352/* Inactivity timer value for transitions into U1, in microseconds. 356/* Inactivity timer value for transitions into U1, in microseconds.
353 * Timeout can be up to 127us. 0xFF means an infinite timeout. 357 * Timeout can be up to 127us. 0xFF means an infinite timeout.
@@ -601,11 +605,11 @@ struct xhci_ep_ctx {
601#define EP_STATE_STOPPED 3 605#define EP_STATE_STOPPED 3
602#define EP_STATE_ERROR 4 606#define EP_STATE_ERROR 4
603/* Mult - Max number of burtst within an interval, in EP companion desc. */ 607/* Mult - Max number of burtst within an interval, in EP companion desc. */
604#define EP_MULT(p) ((p & 0x3) << 8) 608#define EP_MULT(p) (((p) & 0x3) << 8)
605/* bits 10:14 are Max Primary Streams */ 609/* bits 10:14 are Max Primary Streams */
606/* bit 15 is Linear Stream Array */ 610/* bit 15 is Linear Stream Array */
607/* Interval - period between requests to an endpoint - 125u increments. */ 611/* Interval - period between requests to an endpoint - 125u increments. */
608#define EP_INTERVAL(p) ((p & 0xff) << 16) 612#define EP_INTERVAL(p) (((p) & 0xff) << 16)
609#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) 613#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff))
610#define EP_MAXPSTREAMS_MASK (0x1f << 10) 614#define EP_MAXPSTREAMS_MASK (0x1f << 10)
611#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) 615#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK)
@@ -1276,6 +1280,7 @@ struct xhci_hcd {
1276#define XHCI_LINK_TRB_QUIRK (1 << 0) 1280#define XHCI_LINK_TRB_QUIRK (1 << 0)
1277#define XHCI_RESET_EP_QUIRK (1 << 1) 1281#define XHCI_RESET_EP_QUIRK (1 << 1)
1278#define XHCI_NEC_HOST (1 << 2) 1282#define XHCI_NEC_HOST (1 << 2)
1283#define XHCI_AMD_PLL_FIX (1 << 3)
1279 /* There are two roothubs to keep track of bus suspend info for */ 1284 /* There are two roothubs to keep track of bus suspend info for */
1280 struct xhci_bus_state bus_state[2]; 1285 struct xhci_bus_state bus_state[2];
1281 /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */ 1286 /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 4cbb7e4b368d..74073b363c30 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -14,7 +14,7 @@ config USB_MUSB_HDRC
14 select TWL4030_USB if MACH_OMAP_3430SDP 14 select TWL4030_USB if MACH_OMAP_3430SDP
15 select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA 15 select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
16 select USB_OTG_UTILS 16 select USB_OTG_UTILS
17 tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' 17 bool 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
18 help 18 help
19 Say Y here if your system has a dual role high speed USB 19 Say Y here if your system has a dual role high speed USB
20 controller based on the Mentor Graphics silicon IP. Then 20 controller based on the Mentor Graphics silicon IP. Then
@@ -30,8 +30,8 @@ config USB_MUSB_HDRC
30 30
31 If you do not know what this is, please say N. 31 If you do not know what this is, please say N.
32 32
33 To compile this driver as a module, choose M here; the 33# To compile this driver as a module, choose M here; the
34 module will be called "musb-hdrc". 34# module will be called "musb-hdrc".
35 35
36choice 36choice
37 prompt "Platform Glue Layer" 37 prompt "Platform Glue Layer"
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index 52312e8af213..8e2a1ff8a35a 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -21,6 +21,7 @@
21#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
22 22
23#include "musb_core.h" 23#include "musb_core.h"
24#include "musbhsdma.h"
24#include "blackfin.h" 25#include "blackfin.h"
25 26
26struct bfin_glue { 27struct bfin_glue {
@@ -332,6 +333,27 @@ static int bfin_musb_set_mode(struct musb *musb, u8 musb_mode)
332 return -EIO; 333 return -EIO;
333} 334}
334 335
336static int bfin_musb_adjust_channel_params(struct dma_channel *channel,
337 u16 packet_sz, u8 *mode,
338 dma_addr_t *dma_addr, u32 *len)
339{
340 struct musb_dma_channel *musb_channel = channel->private_data;
341
342 /*
343 * Anomaly 05000450 might cause data corruption when using DMA
344 * MODE 1 transmits with short packet. So to work around this,
345 * we truncate all MODE 1 transfers down to a multiple of the
346 * max packet size, and then do the last short packet transfer
347 * (if there is any) using MODE 0.
348 */
349 if (ANOMALY_05000450) {
350 if (musb_channel->transmit && *mode == 1)
351 *len = *len - (*len % packet_sz);
352 }
353
354 return 0;
355}
356
335static void bfin_musb_reg_init(struct musb *musb) 357static void bfin_musb_reg_init(struct musb *musb)
336{ 358{
337 if (ANOMALY_05000346) { 359 if (ANOMALY_05000346) {
@@ -430,6 +452,8 @@ static const struct musb_platform_ops bfin_ops = {
430 452
431 .vbus_status = bfin_musb_vbus_status, 453 .vbus_status = bfin_musb_vbus_status,
432 .set_vbus = bfin_musb_set_vbus, 454 .set_vbus = bfin_musb_set_vbus,
455
456 .adjust_channel_params = bfin_musb_adjust_channel_params,
433}; 457};
434 458
435static u64 bfin_dmamask = DMA_BIT_MASK(32); 459static u64 bfin_dmamask = DMA_BIT_MASK(32);
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index de55a3c3259a..ab434fbd8c35 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -597,12 +597,12 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
597 length = min(n_bds * maxpacket, length); 597 length = min(n_bds * maxpacket, length);
598 } 598 }
599 599
600 DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n", 600 DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u\n",
601 tx->index, 601 tx->index,
602 maxpacket, 602 maxpacket,
603 rndis ? "rndis" : "transparent", 603 rndis ? "rndis" : "transparent",
604 n_bds, 604 n_bds,
605 addr, length); 605 (unsigned long long)addr, length);
606 606
607 cppi_rndis_update(tx, 0, musb->ctrl_base, rndis); 607 cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);
608 608
@@ -820,7 +820,7 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
820 length = min(n_bds * maxpacket, length); 820 length = min(n_bds * maxpacket, length);
821 821
822 DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) " 822 DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
823 "dma 0x%x len %u %u/%u\n", 823 "dma 0x%llx len %u %u/%u\n",
824 rx->index, maxpacket, 824 rx->index, maxpacket,
825 onepacket 825 onepacket
826 ? (is_rndis ? "rndis" : "onepacket") 826 ? (is_rndis ? "rndis" : "onepacket")
@@ -829,7 +829,8 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
829 musb_readl(tibase, 829 musb_readl(tibase,
830 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4)) 830 DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
831 & 0xffff, 831 & 0xffff,
832 addr, length, rx->channel.actual_len, rx->buf_len); 832 (unsigned long long)addr, length,
833 rx->channel.actual_len, rx->buf_len);
833 834
834 /* only queue one segment at a time, since the hardware prevents 835 /* only queue one segment at a time, since the hardware prevents
835 * correct queue shutdown after unexpected short packets 836 * correct queue shutdown after unexpected short packets
@@ -1039,9 +1040,9 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
1039 if (!completed && (bd->hw_options & CPPI_OWN_SET)) 1040 if (!completed && (bd->hw_options & CPPI_OWN_SET))
1040 break; 1041 break;
1041 1042
1042 DBG(5, "C/RXBD %08x: nxt %08x buf %08x " 1043 DBG(5, "C/RXBD %llx: nxt %08x buf %08x "
1043 "off.len %08x opt.len %08x (%d)\n", 1044 "off.len %08x opt.len %08x (%d)\n",
1044 bd->dma, bd->hw_next, bd->hw_bufp, 1045 (unsigned long long)bd->dma, bd->hw_next, bd->hw_bufp,
1045 bd->hw_off_len, bd->hw_options, 1046 bd->hw_off_len, bd->hw_options,
1046 rx->channel.actual_len); 1047 rx->channel.actual_len);
1047 1048
@@ -1111,11 +1112,12 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
1111 musb_ep_select(cppi->mregs, rx->index + 1); 1112 musb_ep_select(cppi->mregs, rx->index + 1);
1112 csr = musb_readw(regs, MUSB_RXCSR); 1113 csr = musb_readw(regs, MUSB_RXCSR);
1113 if (csr & MUSB_RXCSR_DMAENAB) { 1114 if (csr & MUSB_RXCSR_DMAENAB) {
1114 DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n", 1115 DBG(4, "list%d %p/%p, last %llx%s, csr %04x\n",
1115 rx->index, 1116 rx->index,
1116 rx->head, rx->tail, 1117 rx->head, rx->tail,
1117 rx->last_processed 1118 rx->last_processed
1118 ? rx->last_processed->dma 1119 ? (unsigned long long)
1120 rx->last_processed->dma
1119 : 0, 1121 : 0,
1120 completed ? ", completed" : "", 1122 completed ? ", completed" : "",
1121 csr); 1123 csr);
@@ -1167,8 +1169,11 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
1167 tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG); 1169 tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
1168 rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG); 1170 rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
1169 1171
1170 if (!tx && !rx) 1172 if (!tx && !rx) {
1173 if (cppi->irq)
1174 spin_unlock_irqrestore(&musb->lock, flags);
1171 return IRQ_NONE; 1175 return IRQ_NONE;
1176 }
1172 1177
1173 DBG(4, "CPPI IRQ Tx%x Rx%x\n", tx, rx); 1178 DBG(4, "CPPI IRQ Tx%x Rx%x\n", tx, rx);
1174 1179
@@ -1199,7 +1204,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
1199 */ 1204 */
1200 if (NULL == bd) { 1205 if (NULL == bd) {
1201 DBG(1, "null BD\n"); 1206 DBG(1, "null BD\n");
1202 tx_ram->tx_complete = 0; 1207 musb_writel(&tx_ram->tx_complete, 0, 0);
1203 continue; 1208 continue;
1204 } 1209 }
1205 1210
@@ -1452,7 +1457,7 @@ static int cppi_channel_abort(struct dma_channel *channel)
1452 * compare mode by writing 1 to the tx_complete register. 1457 * compare mode by writing 1 to the tx_complete register.
1453 */ 1458 */
1454 cppi_reset_tx(tx_ram, 1); 1459 cppi_reset_tx(tx_ram, 1);
1455 cppi_ch->head = 0; 1460 cppi_ch->head = NULL;
1456 musb_writel(&tx_ram->tx_complete, 0, 1); 1461 musb_writel(&tx_ram->tx_complete, 0, 1);
1457 cppi_dump_tx(5, cppi_ch, " (done teardown)"); 1462 cppi_dump_tx(5, cppi_ch, " (done teardown)");
1458 1463
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 630ae7f3cd4c..f10ff00ca09e 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1030,6 +1030,7 @@ static void musb_shutdown(struct platform_device *pdev)
1030 struct musb *musb = dev_to_musb(&pdev->dev); 1030 struct musb *musb = dev_to_musb(&pdev->dev);
1031 unsigned long flags; 1031 unsigned long flags;
1032 1032
1033 pm_runtime_get_sync(musb->controller);
1033 spin_lock_irqsave(&musb->lock, flags); 1034 spin_lock_irqsave(&musb->lock, flags);
1034 musb_platform_disable(musb); 1035 musb_platform_disable(musb);
1035 musb_generic_disable(musb); 1036 musb_generic_disable(musb);
@@ -1040,6 +1041,7 @@ static void musb_shutdown(struct platform_device *pdev)
1040 musb_writeb(musb->mregs, MUSB_DEVCTL, 0); 1041 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
1041 musb_platform_exit(musb); 1042 musb_platform_exit(musb);
1042 1043
1044 pm_runtime_put(musb->controller);
1043 /* FIXME power down */ 1045 /* FIXME power down */
1044} 1046}
1045 1047
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 4bd9e2145ee4..0e053b587960 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -261,6 +261,7 @@ enum musb_g_ep0_state {
261 * @try_ilde: tries to idle the IP 261 * @try_ilde: tries to idle the IP
262 * @vbus_status: returns vbus status if possible 262 * @vbus_status: returns vbus status if possible
263 * @set_vbus: forces vbus status 263 * @set_vbus: forces vbus status
264 * @channel_program: pre check for standard dma channel_program func
264 */ 265 */
265struct musb_platform_ops { 266struct musb_platform_ops {
266 int (*init)(struct musb *musb); 267 int (*init)(struct musb *musb);
@@ -274,6 +275,10 @@ struct musb_platform_ops {
274 275
275 int (*vbus_status)(struct musb *musb); 276 int (*vbus_status)(struct musb *musb);
276 void (*set_vbus)(struct musb *musb, int on); 277 void (*set_vbus)(struct musb *musb, int on);
278
279 int (*adjust_channel_params)(struct dma_channel *channel,
280 u16 packet_sz, u8 *mode,
281 dma_addr_t *dma_addr, u32 *len);
277}; 282};
278 283
279/* 284/*
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 98519c5d8b5c..6dfbf9ffd7a6 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -535,7 +535,7 @@ void musb_g_tx(struct musb *musb, u8 epnum)
535 is_dma = 1; 535 is_dma = 1;
536 csr |= MUSB_TXCSR_P_WZC_BITS; 536 csr |= MUSB_TXCSR_P_WZC_BITS;
537 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | 537 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
538 MUSB_TXCSR_TXPKTRDY); 538 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
539 musb_writew(epio, MUSB_TXCSR, csr); 539 musb_writew(epio, MUSB_TXCSR, csr);
540 /* Ensure writebuffer is empty. */ 540 /* Ensure writebuffer is empty. */
541 csr = musb_readw(epio, MUSB_TXCSR); 541 csr = musb_readw(epio, MUSB_TXCSR);
@@ -1296,7 +1296,7 @@ static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1296 } 1296 }
1297 1297
1298 /* if the hardware doesn't have the request, easy ... */ 1298 /* if the hardware doesn't have the request, easy ... */
1299 if (musb_ep->req_list.next != &request->list || musb_ep->busy) 1299 if (musb_ep->req_list.next != &req->list || musb_ep->busy)
1300 musb_g_giveback(musb_ep, request, -ECONNRESET); 1300 musb_g_giveback(musb_ep, request, -ECONNRESET);
1301 1301
1302 /* ... else abort the dma transfer ... */ 1302 /* ... else abort the dma transfer ... */
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 0144a2d481fd..d281792db05c 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -169,6 +169,14 @@ static int dma_channel_program(struct dma_channel *channel,
169 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || 169 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
170 channel->status == MUSB_DMA_STATUS_BUSY); 170 channel->status == MUSB_DMA_STATUS_BUSY);
171 171
172 /* Let targets check/tweak the arguments */
173 if (musb->ops->adjust_channel_params) {
174 int ret = musb->ops->adjust_channel_params(channel,
175 packet_sz, &mode, &dma_addr, &len);
176 if (ret)
177 return ret;
178 }
179
172 /* 180 /*
173 * The DMA engine in RTL1.8 and above cannot handle 181 * The DMA engine in RTL1.8 and above cannot handle
174 * DMA addresses that are not aligned to a 4 byte boundary. 182 * DMA addresses that are not aligned to a 4 byte boundary.
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 25cb8b0003b1..57a27fa954b4 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -259,9 +259,10 @@ static int musb_otg_notifications(struct notifier_block *nb,
259 case USB_EVENT_VBUS: 259 case USB_EVENT_VBUS:
260 DBG(4, "VBUS Connect\n"); 260 DBG(4, "VBUS Connect\n");
261 261
262#ifdef CONFIG_USB_GADGET_MUSB_HDRC
262 if (musb->gadget_driver) 263 if (musb->gadget_driver)
263 pm_runtime_get_sync(musb->controller); 264 pm_runtime_get_sync(musb->controller);
264 265#endif
265 otg_init(musb->xceiv); 266 otg_init(musb->xceiv);
266 break; 267 break;
267 268
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index d6384e4aeef9..f7e04bf34a13 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -93,6 +93,8 @@ static int __init ux500_probe(struct platform_device *pdev)
93 } 93 }
94 94
95 musb->dev.parent = &pdev->dev; 95 musb->dev.parent = &pdev->dev;
96 musb->dev.dma_mask = pdev->dev.dma_mask;
97 musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
96 98
97 glue->dev = &pdev->dev; 99 glue->dev = &pdev->dev;
98 glue->musb = musb; 100 glue->musb = musb;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index a973c7a29d6e..4de6ef0ae52a 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -151,6 +151,8 @@ static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
151 * /sys/bus/usb/ftdi_sio/new_id, then send patch/report! 151 * /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
152 */ 152 */
153static struct usb_device_id id_table_combined [] = { 153static struct usb_device_id id_table_combined [] = {
154 { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) },
155 { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) },
154 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, 156 { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
155 { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, 157 { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
156 { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, 158 { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
@@ -525,6 +527,7 @@ static struct usb_device_id id_table_combined [] = {
525 { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_8_PID) }, 527 { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_8_PID) },
526 { USB_DEVICE(IDTECH_VID, IDTECH_IDT1221U_PID) }, 528 { USB_DEVICE(IDTECH_VID, IDTECH_IDT1221U_PID) },
527 { USB_DEVICE(OCT_VID, OCT_US101_PID) }, 529 { USB_DEVICE(OCT_VID, OCT_US101_PID) },
530 { USB_DEVICE(OCT_VID, OCT_DK201_PID) },
528 { USB_DEVICE(FTDI_VID, FTDI_HE_TIRA1_PID), 531 { USB_DEVICE(FTDI_VID, FTDI_HE_TIRA1_PID),
529 .driver_info = (kernel_ulong_t)&ftdi_HE_TIRA1_quirk }, 532 .driver_info = (kernel_ulong_t)&ftdi_HE_TIRA1_quirk },
530 { USB_DEVICE(FTDI_VID, FTDI_USB_UIRT_PID), 533 { USB_DEVICE(FTDI_VID, FTDI_USB_UIRT_PID),
@@ -787,6 +790,8 @@ static struct usb_device_id id_table_combined [] = {
787 { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID), 790 { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
788 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 791 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
789 { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) }, 792 { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) },
793 { USB_DEVICE(FTDI_VID, HAMEG_HO720_PID) },
794 { USB_DEVICE(FTDI_VID, HAMEG_HO730_PID) },
790 { USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) }, 795 { USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) },
791 { USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) }, 796 { USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) },
792 { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) }, 797 { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index c543e55bafba..efffc23723bd 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -300,6 +300,8 @@
300 * Hameg HO820 and HO870 interface (using VID 0x0403) 300 * Hameg HO820 and HO870 interface (using VID 0x0403)
301 */ 301 */
302#define HAMEG_HO820_PID 0xed74 302#define HAMEG_HO820_PID 0xed74
303#define HAMEG_HO730_PID 0xed73
304#define HAMEG_HO720_PID 0xed72
303#define HAMEG_HO870_PID 0xed71 305#define HAMEG_HO870_PID 0xed71
304 306
305/* 307/*
@@ -572,6 +574,7 @@
572/* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */ 574/* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */
573/* Also rebadged as Dick Smith Electronics (Aus) XH6451 */ 575/* Also rebadged as Dick Smith Electronics (Aus) XH6451 */
574/* Also rebadged as SIIG Inc. model US2308 hardware version 1 */ 576/* Also rebadged as SIIG Inc. model US2308 hardware version 1 */
577#define OCT_DK201_PID 0x0103 /* OCT DK201 USB docking station */
575#define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */ 578#define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */
576 579
577/* 580/*
@@ -1141,3 +1144,12 @@
1141#define QIHARDWARE_VID 0x20B7 1144#define QIHARDWARE_VID 0x20B7
1142#define MILKYMISTONE_JTAGSERIAL_PID 0x0713 1145#define MILKYMISTONE_JTAGSERIAL_PID 0x0713
1143 1146
1147/*
1148 * CTI GmbH RS485 Converter http://www.cti-lean.com/
1149 */
1150/* USB-485-Mini*/
1151#define FTDI_CTI_MINI_PID 0xF608
1152/* USB-Nano-485*/
1153#define FTDI_CTI_NANO_PID 0xF60B
1154
1155
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 75c7f456eed5..d77ff0435896 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -407,6 +407,10 @@ static void option_instat_callback(struct urb *urb);
407/* ONDA MT825UP HSDPA 14.2 modem */ 407/* ONDA MT825UP HSDPA 14.2 modem */
408#define ONDA_MT825UP 0x000b 408#define ONDA_MT825UP 0x000b
409 409
410/* Samsung products */
411#define SAMSUNG_VENDOR_ID 0x04e8
412#define SAMSUNG_PRODUCT_GT_B3730 0x6889
413
410/* some devices interfaces need special handling due to a number of reasons */ 414/* some devices interfaces need special handling due to a number of reasons */
411enum option_blacklist_reason { 415enum option_blacklist_reason {
412 OPTION_BLACKLIST_NONE = 0, 416 OPTION_BLACKLIST_NONE = 0,
@@ -968,6 +972,7 @@ static const struct usb_device_id option_ids[] = {
968 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, 972 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
969 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ 973 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
970 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */ 974 { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
975 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730/GT-B3710 LTE USB modem.*/
971 { } /* Terminating entry */ 976 { } /* Terminating entry */
972}; 977};
973MODULE_DEVICE_TABLE(usb, option_ids); 978MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 8858201eb1d3..54a9dab1f33b 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -111,7 +111,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
111 ifnum = intf->desc.bInterfaceNumber; 111 ifnum = intf->desc.bInterfaceNumber;
112 dbg("This Interface = %d", ifnum); 112 dbg("This Interface = %d", ifnum);
113 113
114 data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), 114 data = kzalloc(sizeof(struct usb_wwan_intf_private),
115 GFP_KERNEL); 115 GFP_KERNEL);
116 if (!data) 116 if (!data)
117 return -ENOMEM; 117 return -ENOMEM;
@@ -134,8 +134,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
134 usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) { 134 usb_endpoint_is_bulk_out(&intf->endpoint[1].desc)) {
135 dbg("QDL port found"); 135 dbg("QDL port found");
136 136
137 if (serial->interface->num_altsetting == 1) 137 if (serial->interface->num_altsetting == 1) {
138 return 0; 138 retval = 0; /* Success */
139 break;
140 }
139 141
140 retval = usb_set_interface(serial->dev, ifnum, 1); 142 retval = usb_set_interface(serial->dev, ifnum, 1);
141 if (retval < 0) { 143 if (retval < 0) {
@@ -145,7 +147,6 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
145 retval = -ENODEV; 147 retval = -ENODEV;
146 kfree(data); 148 kfree(data);
147 } 149 }
148 return retval;
149 } 150 }
150 break; 151 break;
151 152
@@ -166,6 +167,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
166 "Could not set interface, error %d\n", 167 "Could not set interface, error %d\n",
167 retval); 168 retval);
168 retval = -ENODEV; 169 retval = -ENODEV;
170 kfree(data);
169 } 171 }
170 } else if (ifnum == 2) { 172 } else if (ifnum == 2) {
171 dbg("Modem port found"); 173 dbg("Modem port found");
@@ -177,7 +179,6 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
177 retval = -ENODEV; 179 retval = -ENODEV;
178 kfree(data); 180 kfree(data);
179 } 181 }
180 return retval;
181 } else if (ifnum==3) { 182 } else if (ifnum==3) {
182 /* 183 /*
183 * NMEA (serial line 9600 8N1) 184 * NMEA (serial line 9600 8N1)
@@ -191,6 +192,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
191 "Could not set interface, error %d\n", 192 "Could not set interface, error %d\n",
192 retval); 193 retval);
193 retval = -ENODEV; 194 retval = -ENODEV;
195 kfree(data);
194 } 196 }
195 } 197 }
196 break; 198 break;
@@ -199,12 +201,27 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
199 dev_err(&serial->dev->dev, 201 dev_err(&serial->dev->dev,
200 "unknown number of interfaces: %d\n", nintf); 202 "unknown number of interfaces: %d\n", nintf);
201 kfree(data); 203 kfree(data);
202 return -ENODEV; 204 retval = -ENODEV;
203 } 205 }
204 206
207 /* Set serial->private if not returning -ENODEV */
208 if (retval != -ENODEV)
209 usb_set_serial_data(serial, data);
205 return retval; 210 return retval;
206} 211}
207 212
213static void qc_release(struct usb_serial *serial)
214{
215 struct usb_wwan_intf_private *priv = usb_get_serial_data(serial);
216
217 dbg("%s", __func__);
218
219 /* Call usb_wwan release & free the private data allocated in qcprobe */
220 usb_wwan_release(serial);
221 usb_set_serial_data(serial, NULL);
222 kfree(priv);
223}
224
208static struct usb_serial_driver qcdevice = { 225static struct usb_serial_driver qcdevice = {
209 .driver = { 226 .driver = {
210 .owner = THIS_MODULE, 227 .owner = THIS_MODULE,
@@ -222,7 +239,7 @@ static struct usb_serial_driver qcdevice = {
222 .chars_in_buffer = usb_wwan_chars_in_buffer, 239 .chars_in_buffer = usb_wwan_chars_in_buffer,
223 .attach = usb_wwan_startup, 240 .attach = usb_wwan_startup,
224 .disconnect = usb_wwan_disconnect, 241 .disconnect = usb_wwan_disconnect,
225 .release = usb_wwan_release, 242 .release = qc_release,
226#ifdef CONFIG_PM 243#ifdef CONFIG_PM
227 .suspend = usb_wwan_suspend, 244 .suspend = usb_wwan_suspend,
228 .resume = usb_wwan_resume, 245 .resume = usb_wwan_resume,
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index a2e5b5100ab4..0f4e8c942f9e 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -1648,7 +1648,9 @@ pxafb_freq_transition(struct notifier_block *nb, unsigned long val, void *data)
1648 1648
1649 switch (val) { 1649 switch (val) {
1650 case CPUFREQ_PRECHANGE: 1650 case CPUFREQ_PRECHANGE:
1651 if (!fbi->overlay[0].usage && !fbi->overlay[1].usage) 1651#ifdef CONFIG_FB_PXA_OVERLAY
1652 if (!(fbi->overlay[0].usage || fbi->overlay[1].usage))
1653#endif
1652 set_ctrlr_state(fbi, C_DISABLE_CLKCHANGE); 1654 set_ctrlr_state(fbi, C_DISABLE_CLKCHANGE);
1653 break; 1655 break;
1654 1656
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 4fb5b2bf2348..4bcc8b82640b 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -590,15 +590,10 @@ static struct virtio_config_ops virtio_pci_config_ops = {
590 590
591static void virtio_pci_release_dev(struct device *_d) 591static void virtio_pci_release_dev(struct device *_d)
592{ 592{
593 struct virtio_device *dev = container_of(_d, struct virtio_device, dev); 593 struct virtio_device *dev = container_of(_d, struct virtio_device,
594 dev);
594 struct virtio_pci_device *vp_dev = to_vp_device(dev); 595 struct virtio_pci_device *vp_dev = to_vp_device(dev);
595 struct pci_dev *pci_dev = vp_dev->pci_dev;
596 596
597 vp_del_vqs(dev);
598 pci_set_drvdata(pci_dev, NULL);
599 pci_iounmap(pci_dev, vp_dev->ioaddr);
600 pci_release_regions(pci_dev);
601 pci_disable_device(pci_dev);
602 kfree(vp_dev); 597 kfree(vp_dev);
603} 598}
604 599
@@ -681,6 +676,12 @@ static void __devexit virtio_pci_remove(struct pci_dev *pci_dev)
681 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); 676 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
682 677
683 unregister_virtio_device(&vp_dev->vdev); 678 unregister_virtio_device(&vp_dev->vdev);
679
680 vp_del_vqs(&vp_dev->vdev);
681 pci_set_drvdata(pci_dev, NULL);
682 pci_iounmap(pci_dev, vp_dev->ioaddr);
683 pci_release_regions(pci_dev);
684 pci_disable_device(pci_dev);
684} 685}
685 686
686#ifdef CONFIG_PM 687#ifdef CONFIG_PM
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index cc2f73e03475..b0043fb26a4d 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -371,6 +371,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
371 /* detach_buf clears data, so grab it now. */ 371 /* detach_buf clears data, so grab it now. */
372 buf = vq->data[i]; 372 buf = vq->data[i];
373 detach_buf(vq, i); 373 detach_buf(vq, i);
374 vq->vring.avail->idx--;
374 END_USE(vq); 375 END_USE(vq);
375 return buf; 376 return buf;
376 } 377 }
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 35a0d12dad73..5fd020da7c55 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -35,6 +35,7 @@
35 * document number 324645-001, 324646-001: Cougar Point (CPT) 35 * document number 324645-001, 324646-001: Cougar Point (CPT)
36 * document number TBD : Patsburg (PBG) 36 * document number TBD : Patsburg (PBG)
37 * document number TBD : DH89xxCC 37 * document number TBD : DH89xxCC
38 * document number TBD : Panther Point
38 */ 39 */
39 40
40/* 41/*
@@ -153,6 +154,38 @@ enum iTCO_chipsets {
153 TCO_PBG1, /* Patsburg */ 154 TCO_PBG1, /* Patsburg */
154 TCO_PBG2, /* Patsburg */ 155 TCO_PBG2, /* Patsburg */
155 TCO_DH89XXCC, /* DH89xxCC */ 156 TCO_DH89XXCC, /* DH89xxCC */
157 TCO_PPT0, /* Panther Point */
158 TCO_PPT1, /* Panther Point */
159 TCO_PPT2, /* Panther Point */
160 TCO_PPT3, /* Panther Point */
161 TCO_PPT4, /* Panther Point */
162 TCO_PPT5, /* Panther Point */
163 TCO_PPT6, /* Panther Point */
164 TCO_PPT7, /* Panther Point */
165 TCO_PPT8, /* Panther Point */
166 TCO_PPT9, /* Panther Point */
167 TCO_PPT10, /* Panther Point */
168 TCO_PPT11, /* Panther Point */
169 TCO_PPT12, /* Panther Point */
170 TCO_PPT13, /* Panther Point */
171 TCO_PPT14, /* Panther Point */
172 TCO_PPT15, /* Panther Point */
173 TCO_PPT16, /* Panther Point */
174 TCO_PPT17, /* Panther Point */
175 TCO_PPT18, /* Panther Point */
176 TCO_PPT19, /* Panther Point */
177 TCO_PPT20, /* Panther Point */
178 TCO_PPT21, /* Panther Point */
179 TCO_PPT22, /* Panther Point */
180 TCO_PPT23, /* Panther Point */
181 TCO_PPT24, /* Panther Point */
182 TCO_PPT25, /* Panther Point */
183 TCO_PPT26, /* Panther Point */
184 TCO_PPT27, /* Panther Point */
185 TCO_PPT28, /* Panther Point */
186 TCO_PPT29, /* Panther Point */
187 TCO_PPT30, /* Panther Point */
188 TCO_PPT31, /* Panther Point */
156}; 189};
157 190
158static struct { 191static struct {
@@ -244,6 +277,38 @@ static struct {
244 {"Patsburg", 2}, 277 {"Patsburg", 2},
245 {"Patsburg", 2}, 278 {"Patsburg", 2},
246 {"DH89xxCC", 2}, 279 {"DH89xxCC", 2},
280 {"Panther Point", 2},
281 {"Panther Point", 2},
282 {"Panther Point", 2},
283 {"Panther Point", 2},
284 {"Panther Point", 2},
285 {"Panther Point", 2},
286 {"Panther Point", 2},
287 {"Panther Point", 2},
288 {"Panther Point", 2},
289 {"Panther Point", 2},
290 {"Panther Point", 2},
291 {"Panther Point", 2},
292 {"Panther Point", 2},
293 {"Panther Point", 2},
294 {"Panther Point", 2},
295 {"Panther Point", 2},
296 {"Panther Point", 2},
297 {"Panther Point", 2},
298 {"Panther Point", 2},
299 {"Panther Point", 2},
300 {"Panther Point", 2},
301 {"Panther Point", 2},
302 {"Panther Point", 2},
303 {"Panther Point", 2},
304 {"Panther Point", 2},
305 {"Panther Point", 2},
306 {"Panther Point", 2},
307 {"Panther Point", 2},
308 {"Panther Point", 2},
309 {"Panther Point", 2},
310 {"Panther Point", 2},
311 {"Panther Point", 2},
247 {NULL, 0} 312 {NULL, 0}
248}; 313};
249 314
@@ -363,6 +428,38 @@ static DEFINE_PCI_DEVICE_TABLE(iTCO_wdt_pci_tbl) = {
363 { ITCO_PCI_DEVICE(0x1d40, TCO_PBG1)}, 428 { ITCO_PCI_DEVICE(0x1d40, TCO_PBG1)},
364 { ITCO_PCI_DEVICE(0x1d41, TCO_PBG2)}, 429 { ITCO_PCI_DEVICE(0x1d41, TCO_PBG2)},
365 { ITCO_PCI_DEVICE(0x2310, TCO_DH89XXCC)}, 430 { ITCO_PCI_DEVICE(0x2310, TCO_DH89XXCC)},
431 { ITCO_PCI_DEVICE(0x1e40, TCO_PPT0)},
432 { ITCO_PCI_DEVICE(0x1e41, TCO_PPT1)},
433 { ITCO_PCI_DEVICE(0x1e42, TCO_PPT2)},
434 { ITCO_PCI_DEVICE(0x1e43, TCO_PPT3)},
435 { ITCO_PCI_DEVICE(0x1e44, TCO_PPT4)},
436 { ITCO_PCI_DEVICE(0x1e45, TCO_PPT5)},
437 { ITCO_PCI_DEVICE(0x1e46, TCO_PPT6)},
438 { ITCO_PCI_DEVICE(0x1e47, TCO_PPT7)},
439 { ITCO_PCI_DEVICE(0x1e48, TCO_PPT8)},
440 { ITCO_PCI_DEVICE(0x1e49, TCO_PPT9)},
441 { ITCO_PCI_DEVICE(0x1e4a, TCO_PPT10)},
442 { ITCO_PCI_DEVICE(0x1e4b, TCO_PPT11)},
443 { ITCO_PCI_DEVICE(0x1e4c, TCO_PPT12)},
444 { ITCO_PCI_DEVICE(0x1e4d, TCO_PPT13)},
445 { ITCO_PCI_DEVICE(0x1e4e, TCO_PPT14)},
446 { ITCO_PCI_DEVICE(0x1e4f, TCO_PPT15)},
447 { ITCO_PCI_DEVICE(0x1e50, TCO_PPT16)},
448 { ITCO_PCI_DEVICE(0x1e51, TCO_PPT17)},
449 { ITCO_PCI_DEVICE(0x1e52, TCO_PPT18)},
450 { ITCO_PCI_DEVICE(0x1e53, TCO_PPT19)},
451 { ITCO_PCI_DEVICE(0x1e54, TCO_PPT20)},
452 { ITCO_PCI_DEVICE(0x1e55, TCO_PPT21)},
453 { ITCO_PCI_DEVICE(0x1e56, TCO_PPT22)},
454 { ITCO_PCI_DEVICE(0x1e57, TCO_PPT23)},
455 { ITCO_PCI_DEVICE(0x1e58, TCO_PPT24)},
456 { ITCO_PCI_DEVICE(0x1e59, TCO_PPT25)},
457 { ITCO_PCI_DEVICE(0x1e5a, TCO_PPT26)},
458 { ITCO_PCI_DEVICE(0x1e5b, TCO_PPT27)},
459 { ITCO_PCI_DEVICE(0x1e5c, TCO_PPT28)},
460 { ITCO_PCI_DEVICE(0x1e5d, TCO_PPT29)},
461 { ITCO_PCI_DEVICE(0x1e5e, TCO_PPT30)},
462 { ITCO_PCI_DEVICE(0x1e5f, TCO_PPT31)},
366 { 0, }, /* End of list */ 463 { 0, }, /* End of list */
367}; 464};
368MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl); 465MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 1ac94125bf93..a2eee574784e 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -8,6 +8,7 @@
8#include <linux/sysrq.h> 8#include <linux/sysrq.h>
9#include <linux/stop_machine.h> 9#include <linux/stop_machine.h>
10#include <linux/freezer.h> 10#include <linux/freezer.h>
11#include <linux/syscore_ops.h>
11 12
12#include <xen/xen.h> 13#include <xen/xen.h>
13#include <xen/xenbus.h> 14#include <xen/xenbus.h>
@@ -70,8 +71,13 @@ static int xen_suspend(void *data)
70 BUG_ON(!irqs_disabled()); 71 BUG_ON(!irqs_disabled());
71 72
72 err = sysdev_suspend(PMSG_FREEZE); 73 err = sysdev_suspend(PMSG_FREEZE);
74 if (!err) {
75 err = syscore_suspend();
76 if (err)
77 sysdev_resume();
78 }
73 if (err) { 79 if (err) {
74 printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n", 80 printk(KERN_ERR "xen_suspend: system core suspend failed: %d\n",
75 err); 81 err);
76 return err; 82 return err;
77 } 83 }
@@ -95,6 +101,7 @@ static int xen_suspend(void *data)
95 xen_timer_resume(); 101 xen_timer_resume();
96 } 102 }
97 103
104 syscore_resume();
98 sysdev_resume(); 105 sysdev_resume();
99 106
100 return 0; 107 return 0;
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index 0ee594569dcc..85b67ffa2a43 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -286,11 +286,9 @@ static struct p9_fid *v9fs_fid_clone_with_uid(struct dentry *dentry, uid_t uid)
286 286
287struct p9_fid *v9fs_writeback_fid(struct dentry *dentry) 287struct p9_fid *v9fs_writeback_fid(struct dentry *dentry)
288{ 288{
289 int err, flags; 289 int err;
290 struct p9_fid *fid; 290 struct p9_fid *fid;
291 struct v9fs_session_info *v9ses;
292 291
293 v9ses = v9fs_dentry2v9ses(dentry);
294 fid = v9fs_fid_clone_with_uid(dentry, 0); 292 fid = v9fs_fid_clone_with_uid(dentry, 0);
295 if (IS_ERR(fid)) 293 if (IS_ERR(fid))
296 goto error_out; 294 goto error_out;
@@ -299,17 +297,8 @@ struct p9_fid *v9fs_writeback_fid(struct dentry *dentry)
299 * dirty pages. We always request for the open fid in read-write 297 * dirty pages. We always request for the open fid in read-write
300 * mode so that a partial page write which result in page 298 * mode so that a partial page write which result in page
301 * read can work. 299 * read can work.
302 *
303 * we don't have a tsyncfs operation for older version
304 * of protocol. So make sure the write back fid is
305 * opened in O_SYNC mode.
306 */ 300 */
307 if (!v9fs_proto_dotl(v9ses)) 301 err = p9_client_open(fid, O_RDWR);
308 flags = O_RDWR | O_SYNC;
309 else
310 flags = O_RDWR;
311
312 err = p9_client_open(fid, flags);
313 if (err < 0) { 302 if (err < 0) {
314 p9_client_clunk(fid); 303 p9_client_clunk(fid);
315 fid = ERR_PTR(err); 304 fid = ERR_PTR(err);
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 9665c2b840e6..e5ebedfc5ed8 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -116,7 +116,6 @@ struct v9fs_session_info {
116 struct list_head slist; /* list of sessions registered with v9fs */ 116 struct list_head slist; /* list of sessions registered with v9fs */
117 struct backing_dev_info bdi; 117 struct backing_dev_info bdi;
118 struct rw_semaphore rename_sem; 118 struct rw_semaphore rename_sem;
119 struct p9_fid *root_fid; /* Used for file system sync */
120}; 119};
121 120
122/* cache_validity flags */ 121/* cache_validity flags */
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index b6a3b9f7fe4d..e022890c6f40 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -126,7 +126,9 @@ static int v9fs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
126 retval = v9fs_refresh_inode_dotl(fid, inode); 126 retval = v9fs_refresh_inode_dotl(fid, inode);
127 else 127 else
128 retval = v9fs_refresh_inode(fid, inode); 128 retval = v9fs_refresh_inode(fid, inode);
129 if (retval <= 0) 129 if (retval == -ENOENT)
130 return 0;
131 if (retval < 0)
130 return retval; 132 return retval;
131 } 133 }
132out_valid: 134out_valid:
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index ffbb113d5f33..82a7c38ddad0 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -811,7 +811,7 @@ v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd)
811 fid = v9fs_fid_lookup(dentry); 811 fid = v9fs_fid_lookup(dentry);
812 if (IS_ERR(fid)) { 812 if (IS_ERR(fid)) {
813 __putname(link); 813 __putname(link);
814 link = ERR_PTR(PTR_ERR(fid)); 814 link = ERR_CAST(fid);
815 goto ndset; 815 goto ndset;
816 } 816 }
817 retval = p9_client_readlink(fid, &target); 817 retval = p9_client_readlink(fid, &target);
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index f3eed3383e4f..feef6cdc1fd2 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -154,6 +154,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
154 retval = PTR_ERR(inode); 154 retval = PTR_ERR(inode);
155 goto release_sb; 155 goto release_sb;
156 } 156 }
157
157 root = d_alloc_root(inode); 158 root = d_alloc_root(inode);
158 if (!root) { 159 if (!root) {
159 iput(inode); 160 iput(inode);
@@ -185,21 +186,10 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
185 p9stat_free(st); 186 p9stat_free(st);
186 kfree(st); 187 kfree(st);
187 } 188 }
188 v9fs_fid_add(root, fid);
189 retval = v9fs_get_acl(inode, fid); 189 retval = v9fs_get_acl(inode, fid);
190 if (retval) 190 if (retval)
191 goto release_sb; 191 goto release_sb;
192 /* 192 v9fs_fid_add(root, fid);
193 * Add the root fid to session info. This is used
194 * for file system sync. We want a cloned fid here
195 * so that we can do a sync_filesystem after a
196 * shrink_dcache_for_umount
197 */
198 v9ses->root_fid = v9fs_fid_clone(root);
199 if (IS_ERR(v9ses->root_fid)) {
200 retval = PTR_ERR(v9ses->root_fid);
201 goto release_sb;
202 }
203 193
204 P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n"); 194 P9_DPRINTK(P9_DEBUG_VFS, " simple set mount, return 0\n");
205 return dget(sb->s_root); 195 return dget(sb->s_root);
@@ -210,11 +200,15 @@ close_session:
210 v9fs_session_close(v9ses); 200 v9fs_session_close(v9ses);
211 kfree(v9ses); 201 kfree(v9ses);
212 return ERR_PTR(retval); 202 return ERR_PTR(retval);
203
213release_sb: 204release_sb:
214 /* 205 /*
215 * we will do the session_close and root dentry 206 * we will do the session_close and root dentry release
216 * release in the below call. 207 * in the below call. But we need to clunk fid, because we haven't
208 * attached the fid to dentry so it won't get clunked
209 * automatically.
217 */ 210 */
211 p9_client_clunk(fid);
218 deactivate_locked_super(sb); 212 deactivate_locked_super(sb);
219 return ERR_PTR(retval); 213 return ERR_PTR(retval);
220} 214}
@@ -232,7 +226,7 @@ static void v9fs_kill_super(struct super_block *s)
232 P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s); 226 P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s);
233 227
234 kill_anon_super(s); 228 kill_anon_super(s);
235 p9_client_clunk(v9ses->root_fid); 229
236 v9fs_session_cancel(v9ses); 230 v9fs_session_cancel(v9ses);
237 v9fs_session_close(v9ses); 231 v9fs_session_close(v9ses);
238 kfree(v9ses); 232 kfree(v9ses);
@@ -285,14 +279,6 @@ done:
285 return res; 279 return res;
286} 280}
287 281
288static int v9fs_sync_fs(struct super_block *sb, int wait)
289{
290 struct v9fs_session_info *v9ses = sb->s_fs_info;
291
292 P9_DPRINTK(P9_DEBUG_VFS, "v9fs_sync_fs: super_block %p\n", sb);
293 return p9_client_sync_fs(v9ses->root_fid);
294}
295
296static int v9fs_drop_inode(struct inode *inode) 282static int v9fs_drop_inode(struct inode *inode)
297{ 283{
298 struct v9fs_session_info *v9ses; 284 struct v9fs_session_info *v9ses;
@@ -307,6 +293,51 @@ static int v9fs_drop_inode(struct inode *inode)
307 return 1; 293 return 1;
308} 294}
309 295
296static int v9fs_write_inode(struct inode *inode,
297 struct writeback_control *wbc)
298{
299 int ret;
300 struct p9_wstat wstat;
301 struct v9fs_inode *v9inode;
302 /*
303 * send an fsync request to server irrespective of
304 * wbc->sync_mode.
305 */
306 P9_DPRINTK(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
307 v9inode = V9FS_I(inode);
308 if (!v9inode->writeback_fid)
309 return 0;
310 v9fs_blank_wstat(&wstat);
311
312 ret = p9_client_wstat(v9inode->writeback_fid, &wstat);
313 if (ret < 0) {
314 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
315 return ret;
316 }
317 return 0;
318}
319
320static int v9fs_write_inode_dotl(struct inode *inode,
321 struct writeback_control *wbc)
322{
323 int ret;
324 struct v9fs_inode *v9inode;
325 /*
326 * send an fsync request to server irrespective of
327 * wbc->sync_mode.
328 */
329 P9_DPRINTK(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode);
330 v9inode = V9FS_I(inode);
331 if (!v9inode->writeback_fid)
332 return 0;
333 ret = p9_client_fsync(v9inode->writeback_fid, 0);
334 if (ret < 0) {
335 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
336 return ret;
337 }
338 return 0;
339}
340
310static const struct super_operations v9fs_super_ops = { 341static const struct super_operations v9fs_super_ops = {
311 .alloc_inode = v9fs_alloc_inode, 342 .alloc_inode = v9fs_alloc_inode,
312 .destroy_inode = v9fs_destroy_inode, 343 .destroy_inode = v9fs_destroy_inode,
@@ -314,17 +345,18 @@ static const struct super_operations v9fs_super_ops = {
314 .evict_inode = v9fs_evict_inode, 345 .evict_inode = v9fs_evict_inode,
315 .show_options = generic_show_options, 346 .show_options = generic_show_options,
316 .umount_begin = v9fs_umount_begin, 347 .umount_begin = v9fs_umount_begin,
348 .write_inode = v9fs_write_inode,
317}; 349};
318 350
319static const struct super_operations v9fs_super_ops_dotl = { 351static const struct super_operations v9fs_super_ops_dotl = {
320 .alloc_inode = v9fs_alloc_inode, 352 .alloc_inode = v9fs_alloc_inode,
321 .destroy_inode = v9fs_destroy_inode, 353 .destroy_inode = v9fs_destroy_inode,
322 .sync_fs = v9fs_sync_fs,
323 .statfs = v9fs_statfs, 354 .statfs = v9fs_statfs,
324 .drop_inode = v9fs_drop_inode, 355 .drop_inode = v9fs_drop_inode,
325 .evict_inode = v9fs_evict_inode, 356 .evict_inode = v9fs_evict_inode,
326 .show_options = generic_show_options, 357 .show_options = generic_show_options,
327 .umount_begin = v9fs_umount_begin, 358 .umount_begin = v9fs_umount_begin,
359 .write_inode = v9fs_write_inode_dotl,
328}; 360};
329 361
330struct file_system_type v9fs_fs_type = { 362struct file_system_type v9fs_fs_type = {
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index de34bfad9ec3..5d505aaa72fb 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -178,16 +178,17 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name,
178 178
179 if (value) { 179 if (value) {
180 acl = posix_acl_from_xattr(value, size); 180 acl = posix_acl_from_xattr(value, size);
181 if (acl == NULL) { 181 if (acl) {
182 value = NULL; 182 ret = posix_acl_valid(acl);
183 size = 0; 183 if (ret)
184 goto out;
184 } else if (IS_ERR(acl)) { 185 } else if (IS_ERR(acl)) {
185 return PTR_ERR(acl); 186 return PTR_ERR(acl);
186 } 187 }
187 } 188 }
188 189
189 ret = btrfs_set_acl(NULL, dentry->d_inode, acl, type); 190 ret = btrfs_set_acl(NULL, dentry->d_inode, acl, type);
190 191out:
191 posix_acl_release(acl); 192 posix_acl_release(acl);
192 193
193 return ret; 194 return ret;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 3458b5725540..8f4b81de3ae2 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -718,7 +718,7 @@ struct btrfs_space_info {
718 u64 total_bytes; /* total bytes in the space, 718 u64 total_bytes; /* total bytes in the space,
719 this doesn't take mirrors into account */ 719 this doesn't take mirrors into account */
720 u64 bytes_used; /* total bytes used, 720 u64 bytes_used; /* total bytes used,
721 this does't take mirrors into account */ 721 this doesn't take mirrors into account */
722 u64 bytes_pinned; /* total bytes pinned, will be freed when the 722 u64 bytes_pinned; /* total bytes pinned, will be freed when the
723 transaction finishes */ 723 transaction finishes */
724 u64 bytes_reserved; /* total bytes the allocator has reserved for 724 u64 bytes_reserved; /* total bytes the allocator has reserved for
@@ -740,8 +740,10 @@ struct btrfs_space_info {
740 */ 740 */
741 unsigned long reservation_progress; 741 unsigned long reservation_progress;
742 742
743 int full; /* indicates that we cannot allocate any more 743 int full:1; /* indicates that we cannot allocate any more
744 chunks for this space */ 744 chunks for this space */
745 int chunk_alloc:1; /* set if we are allocating a chunk */
746
745 int force_alloc; /* set if we need to force a chunk alloc for 747 int force_alloc; /* set if we need to force a chunk alloc for
746 this space */ 748 this space */
747 749
@@ -2576,6 +2578,11 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
2576int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 2578int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
2577 struct inode *inode, u64 start, u64 end); 2579 struct inode *inode, u64 start, u64 end);
2578int btrfs_release_file(struct inode *inode, struct file *file); 2580int btrfs_release_file(struct inode *inode, struct file *file);
2581void btrfs_drop_pages(struct page **pages, size_t num_pages);
2582int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
2583 struct page **pages, size_t num_pages,
2584 loff_t pos, size_t write_bytes,
2585 struct extent_state **cached);
2579 2586
2580/* tree-defrag.c */ 2587/* tree-defrag.c */
2581int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, 2588int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 8f1d44ba332f..228cf36ece83 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2824,6 +2824,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
2824 2824
2825 spin_lock(&delayed_refs->lock); 2825 spin_lock(&delayed_refs->lock);
2826 if (delayed_refs->num_entries == 0) { 2826 if (delayed_refs->num_entries == 0) {
2827 spin_unlock(&delayed_refs->lock);
2827 printk(KERN_INFO "delayed_refs has NO entry\n"); 2828 printk(KERN_INFO "delayed_refs has NO entry\n");
2828 return ret; 2829 return ret;
2829 } 2830 }
@@ -3057,7 +3058,7 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
3057 btrfs_destroy_pinned_extent(root, 3058 btrfs_destroy_pinned_extent(root,
3058 root->fs_info->pinned_extents); 3059 root->fs_info->pinned_extents);
3059 3060
3060 t->use_count = 0; 3061 atomic_set(&t->use_count, 0);
3061 list_del_init(&t->list); 3062 list_del_init(&t->list);
3062 memset(t, 0, sizeof(*t)); 3063 memset(t, 0, sizeof(*t));
3063 kmem_cache_free(btrfs_transaction_cachep, t); 3064 kmem_cache_free(btrfs_transaction_cachep, t);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index f619c3cb13b7..cd52f7f556ef 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -33,6 +33,25 @@
33#include "locking.h" 33#include "locking.h"
34#include "free-space-cache.h" 34#include "free-space-cache.h"
35 35
36/* control flags for do_chunk_alloc's force field
37 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
38 * if we really need one.
39 *
40 * CHUNK_ALLOC_FORCE means it must try to allocate one
41 *
42 * CHUNK_ALLOC_LIMITED means to only try and allocate one
43 * if we have very few chunks already allocated. This is
44 * used as part of the clustering code to help make sure
45 * we have a good pool of storage to cluster in, without
46 * filling the FS with empty chunks
47 *
48 */
49enum {
50 CHUNK_ALLOC_NO_FORCE = 0,
51 CHUNK_ALLOC_FORCE = 1,
52 CHUNK_ALLOC_LIMITED = 2,
53};
54
36static int update_block_group(struct btrfs_trans_handle *trans, 55static int update_block_group(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root, 56 struct btrfs_root *root,
38 u64 bytenr, u64 num_bytes, int alloc); 57 u64 bytenr, u64 num_bytes, int alloc);
@@ -3019,7 +3038,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3019 found->bytes_readonly = 0; 3038 found->bytes_readonly = 0;
3020 found->bytes_may_use = 0; 3039 found->bytes_may_use = 0;
3021 found->full = 0; 3040 found->full = 0;
3022 found->force_alloc = 0; 3041 found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3042 found->chunk_alloc = 0;
3023 *space_info = found; 3043 *space_info = found;
3024 list_add_rcu(&found->list, &info->space_info); 3044 list_add_rcu(&found->list, &info->space_info);
3025 atomic_set(&found->caching_threads, 0); 3045 atomic_set(&found->caching_threads, 0);
@@ -3150,7 +3170,7 @@ again:
3150 if (!data_sinfo->full && alloc_chunk) { 3170 if (!data_sinfo->full && alloc_chunk) {
3151 u64 alloc_target; 3171 u64 alloc_target;
3152 3172
3153 data_sinfo->force_alloc = 1; 3173 data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3154 spin_unlock(&data_sinfo->lock); 3174 spin_unlock(&data_sinfo->lock);
3155alloc: 3175alloc:
3156 alloc_target = btrfs_get_alloc_profile(root, 1); 3176 alloc_target = btrfs_get_alloc_profile(root, 1);
@@ -3160,7 +3180,8 @@ alloc:
3160 3180
3161 ret = do_chunk_alloc(trans, root->fs_info->extent_root, 3181 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3162 bytes + 2 * 1024 * 1024, 3182 bytes + 2 * 1024 * 1024,
3163 alloc_target, 0); 3183 alloc_target,
3184 CHUNK_ALLOC_NO_FORCE);
3164 btrfs_end_transaction(trans, root); 3185 btrfs_end_transaction(trans, root);
3165 if (ret < 0) { 3186 if (ret < 0) {
3166 if (ret != -ENOSPC) 3187 if (ret != -ENOSPC)
@@ -3239,31 +3260,56 @@ static void force_metadata_allocation(struct btrfs_fs_info *info)
3239 rcu_read_lock(); 3260 rcu_read_lock();
3240 list_for_each_entry_rcu(found, head, list) { 3261 list_for_each_entry_rcu(found, head, list) {
3241 if (found->flags & BTRFS_BLOCK_GROUP_METADATA) 3262 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3242 found->force_alloc = 1; 3263 found->force_alloc = CHUNK_ALLOC_FORCE;
3243 } 3264 }
3244 rcu_read_unlock(); 3265 rcu_read_unlock();
3245} 3266}
3246 3267
3247static int should_alloc_chunk(struct btrfs_root *root, 3268static int should_alloc_chunk(struct btrfs_root *root,
3248 struct btrfs_space_info *sinfo, u64 alloc_bytes) 3269 struct btrfs_space_info *sinfo, u64 alloc_bytes,
3270 int force)
3249{ 3271{
3250 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly; 3272 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3273 u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3251 u64 thresh; 3274 u64 thresh;
3252 3275
3253 if (sinfo->bytes_used + sinfo->bytes_reserved + 3276 if (force == CHUNK_ALLOC_FORCE)
3254 alloc_bytes + 256 * 1024 * 1024 < num_bytes) 3277 return 1;
3278
3279 /*
3280 * in limited mode, we want to have some free space up to
3281 * about 1% of the FS size.
3282 */
3283 if (force == CHUNK_ALLOC_LIMITED) {
3284 thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
3285 thresh = max_t(u64, 64 * 1024 * 1024,
3286 div_factor_fine(thresh, 1));
3287
3288 if (num_bytes - num_allocated < thresh)
3289 return 1;
3290 }
3291
3292 /*
3293 * we have two similar checks here, one based on percentage
3294 * and once based on a hard number of 256MB. The idea
3295 * is that if we have a good amount of free
3296 * room, don't allocate a chunk. A good mount is
3297 * less than 80% utilized of the chunks we have allocated,
3298 * or more than 256MB free
3299 */
3300 if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes)
3255 return 0; 3301 return 0;
3256 3302
3257 if (sinfo->bytes_used + sinfo->bytes_reserved + 3303 if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
3258 alloc_bytes < div_factor(num_bytes, 8))
3259 return 0; 3304 return 0;
3260 3305
3261 thresh = btrfs_super_total_bytes(&root->fs_info->super_copy); 3306 thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
3307
3308 /* 256MB or 5% of the FS */
3262 thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5)); 3309 thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));
3263 3310
3264 if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3)) 3311 if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3))
3265 return 0; 3312 return 0;
3266
3267 return 1; 3313 return 1;
3268} 3314}
3269 3315
@@ -3273,10 +3319,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3273{ 3319{
3274 struct btrfs_space_info *space_info; 3320 struct btrfs_space_info *space_info;
3275 struct btrfs_fs_info *fs_info = extent_root->fs_info; 3321 struct btrfs_fs_info *fs_info = extent_root->fs_info;
3322 int wait_for_alloc = 0;
3276 int ret = 0; 3323 int ret = 0;
3277 3324
3278 mutex_lock(&fs_info->chunk_mutex);
3279
3280 flags = btrfs_reduce_alloc_profile(extent_root, flags); 3325 flags = btrfs_reduce_alloc_profile(extent_root, flags);
3281 3326
3282 space_info = __find_space_info(extent_root->fs_info, flags); 3327 space_info = __find_space_info(extent_root->fs_info, flags);
@@ -3287,21 +3332,40 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3287 } 3332 }
3288 BUG_ON(!space_info); 3333 BUG_ON(!space_info);
3289 3334
3335again:
3290 spin_lock(&space_info->lock); 3336 spin_lock(&space_info->lock);
3291 if (space_info->force_alloc) 3337 if (space_info->force_alloc)
3292 force = 1; 3338 force = space_info->force_alloc;
3293 if (space_info->full) { 3339 if (space_info->full) {
3294 spin_unlock(&space_info->lock); 3340 spin_unlock(&space_info->lock);
3295 goto out; 3341 return 0;
3296 } 3342 }
3297 3343
3298 if (!force && !should_alloc_chunk(extent_root, space_info, 3344 if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
3299 alloc_bytes)) {
3300 spin_unlock(&space_info->lock); 3345 spin_unlock(&space_info->lock);
3301 goto out; 3346 return 0;
3347 } else if (space_info->chunk_alloc) {
3348 wait_for_alloc = 1;
3349 } else {
3350 space_info->chunk_alloc = 1;
3302 } 3351 }
3352
3303 spin_unlock(&space_info->lock); 3353 spin_unlock(&space_info->lock);
3304 3354
3355 mutex_lock(&fs_info->chunk_mutex);
3356
3357 /*
3358 * The chunk_mutex is held throughout the entirety of a chunk
3359 * allocation, so once we've acquired the chunk_mutex we know that the
3360 * other guy is done and we need to recheck and see if we should
3361 * allocate.
3362 */
3363 if (wait_for_alloc) {
3364 mutex_unlock(&fs_info->chunk_mutex);
3365 wait_for_alloc = 0;
3366 goto again;
3367 }
3368
3305 /* 3369 /*
3306 * If we have mixed data/metadata chunks we want to make sure we keep 3370 * If we have mixed data/metadata chunks we want to make sure we keep
3307 * allocating mixed chunks instead of individual chunks. 3371 * allocating mixed chunks instead of individual chunks.
@@ -3327,9 +3391,10 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3327 space_info->full = 1; 3391 space_info->full = 1;
3328 else 3392 else
3329 ret = 1; 3393 ret = 1;
3330 space_info->force_alloc = 0; 3394
3395 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3396 space_info->chunk_alloc = 0;
3331 spin_unlock(&space_info->lock); 3397 spin_unlock(&space_info->lock);
3332out:
3333 mutex_unlock(&extent_root->fs_info->chunk_mutex); 3398 mutex_unlock(&extent_root->fs_info->chunk_mutex);
3334 return ret; 3399 return ret;
3335} 3400}
@@ -5303,11 +5368,13 @@ loop:
5303 5368
5304 if (allowed_chunk_alloc) { 5369 if (allowed_chunk_alloc) {
5305 ret = do_chunk_alloc(trans, root, num_bytes + 5370 ret = do_chunk_alloc(trans, root, num_bytes +
5306 2 * 1024 * 1024, data, 1); 5371 2 * 1024 * 1024, data,
5372 CHUNK_ALLOC_LIMITED);
5307 allowed_chunk_alloc = 0; 5373 allowed_chunk_alloc = 0;
5308 done_chunk_alloc = 1; 5374 done_chunk_alloc = 1;
5309 } else if (!done_chunk_alloc) { 5375 } else if (!done_chunk_alloc &&
5310 space_info->force_alloc = 1; 5376 space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) {
5377 space_info->force_alloc = CHUNK_ALLOC_LIMITED;
5311 } 5378 }
5312 5379
5313 if (loop < LOOP_NO_EMPTY_SIZE) { 5380 if (loop < LOOP_NO_EMPTY_SIZE) {
@@ -5393,7 +5460,8 @@ again:
5393 */ 5460 */
5394 if (empty_size || root->ref_cows) 5461 if (empty_size || root->ref_cows)
5395 ret = do_chunk_alloc(trans, root->fs_info->extent_root, 5462 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5396 num_bytes + 2 * 1024 * 1024, data, 0); 5463 num_bytes + 2 * 1024 * 1024, data,
5464 CHUNK_ALLOC_NO_FORCE);
5397 5465
5398 WARN_ON(num_bytes < root->sectorsize); 5466 WARN_ON(num_bytes < root->sectorsize);
5399 ret = find_free_extent(trans, root, num_bytes, empty_size, 5467 ret = find_free_extent(trans, root, num_bytes, empty_size,
@@ -5405,7 +5473,7 @@ again:
5405 num_bytes = num_bytes & ~(root->sectorsize - 1); 5473 num_bytes = num_bytes & ~(root->sectorsize - 1);
5406 num_bytes = max(num_bytes, min_alloc_size); 5474 num_bytes = max(num_bytes, min_alloc_size);
5407 do_chunk_alloc(trans, root->fs_info->extent_root, 5475 do_chunk_alloc(trans, root->fs_info->extent_root,
5408 num_bytes, data, 1); 5476 num_bytes, data, CHUNK_ALLOC_FORCE);
5409 goto again; 5477 goto again;
5410 } 5478 }
5411 if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) { 5479 if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
@@ -7991,6 +8059,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
7991 u64 group_start = group->key.objectid; 8059 u64 group_start = group->key.objectid;
7992 new_extents = kmalloc(sizeof(*new_extents), 8060 new_extents = kmalloc(sizeof(*new_extents),
7993 GFP_NOFS); 8061 GFP_NOFS);
8062 if (!new_extents) {
8063 ret = -ENOMEM;
8064 goto out;
8065 }
7994 nr_extents = 1; 8066 nr_extents = 1;
7995 ret = get_new_locations(reloc_inode, 8067 ret = get_new_locations(reloc_inode,
7996 extent_key, 8068 extent_key,
@@ -8109,13 +8181,15 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
8109 8181
8110 alloc_flags = update_block_group_flags(root, cache->flags); 8182 alloc_flags = update_block_group_flags(root, cache->flags);
8111 if (alloc_flags != cache->flags) 8183 if (alloc_flags != cache->flags)
8112 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); 8184 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
8185 CHUNK_ALLOC_FORCE);
8113 8186
8114 ret = set_block_group_ro(cache); 8187 ret = set_block_group_ro(cache);
8115 if (!ret) 8188 if (!ret)
8116 goto out; 8189 goto out;
8117 alloc_flags = get_alloc_profile(root, cache->space_info->flags); 8190 alloc_flags = get_alloc_profile(root, cache->space_info->flags);
8118 ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); 8191 ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
8192 CHUNK_ALLOC_FORCE);
8119 if (ret < 0) 8193 if (ret < 0)
8120 goto out; 8194 goto out;
8121 ret = set_block_group_ro(cache); 8195 ret = set_block_group_ro(cache);
@@ -8128,7 +8202,8 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8128 struct btrfs_root *root, u64 type) 8202 struct btrfs_root *root, u64 type)
8129{ 8203{
8130 u64 alloc_flags = get_alloc_profile(root, type); 8204 u64 alloc_flags = get_alloc_profile(root, type);
8131 return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); 8205 return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
8206 CHUNK_ALLOC_FORCE);
8132} 8207}
8133 8208
8134/* 8209/*
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 20ddb28602a8..ba41da59e31b 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -690,6 +690,15 @@ static void cache_state(struct extent_state *state,
690 } 690 }
691} 691}
692 692
693static void uncache_state(struct extent_state **cached_ptr)
694{
695 if (cached_ptr && (*cached_ptr)) {
696 struct extent_state *state = *cached_ptr;
697 *cached_ptr = NULL;
698 free_extent_state(state);
699 }
700}
701
693/* 702/*
694 * set some bits on a range in the tree. This may require allocations or 703 * set some bits on a range in the tree. This may require allocations or
695 * sleeping, so the gfp mask is used to indicate what is allowed. 704 * sleeping, so the gfp mask is used to indicate what is allowed.
@@ -940,10 +949,10 @@ static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
940} 949}
941 950
942int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, 951int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
943 gfp_t mask) 952 struct extent_state **cached_state, gfp_t mask)
944{ 953{
945 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL, 954 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
946 NULL, mask); 955 NULL, cached_state, mask);
947} 956}
948 957
949static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, 958static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
@@ -1012,8 +1021,7 @@ int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
1012 mask); 1021 mask);
1013} 1022}
1014 1023
1015int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, 1024int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1016 gfp_t mask)
1017{ 1025{
1018 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, 1026 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
1019 mask); 1027 mask);
@@ -1735,6 +1743,9 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
1735 1743
1736 do { 1744 do {
1737 struct page *page = bvec->bv_page; 1745 struct page *page = bvec->bv_page;
1746 struct extent_state *cached = NULL;
1747 struct extent_state *state;
1748
1738 tree = &BTRFS_I(page->mapping->host)->io_tree; 1749 tree = &BTRFS_I(page->mapping->host)->io_tree;
1739 1750
1740 start = ((u64)page->index << PAGE_CACHE_SHIFT) + 1751 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
@@ -1749,9 +1760,20 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
1749 if (++bvec <= bvec_end) 1760 if (++bvec <= bvec_end)
1750 prefetchw(&bvec->bv_page->flags); 1761 prefetchw(&bvec->bv_page->flags);
1751 1762
1763 spin_lock(&tree->lock);
1764 state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
1765 if (state && state->start == start) {
1766 /*
1767 * take a reference on the state, unlock will drop
1768 * the ref
1769 */
1770 cache_state(state, &cached);
1771 }
1772 spin_unlock(&tree->lock);
1773
1752 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { 1774 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1753 ret = tree->ops->readpage_end_io_hook(page, start, end, 1775 ret = tree->ops->readpage_end_io_hook(page, start, end,
1754 NULL); 1776 state);
1755 if (ret) 1777 if (ret)
1756 uptodate = 0; 1778 uptodate = 0;
1757 } 1779 }
@@ -1764,15 +1786,16 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
1764 test_bit(BIO_UPTODATE, &bio->bi_flags); 1786 test_bit(BIO_UPTODATE, &bio->bi_flags);
1765 if (err) 1787 if (err)
1766 uptodate = 0; 1788 uptodate = 0;
1789 uncache_state(&cached);
1767 continue; 1790 continue;
1768 } 1791 }
1769 } 1792 }
1770 1793
1771 if (uptodate) { 1794 if (uptodate) {
1772 set_extent_uptodate(tree, start, end, 1795 set_extent_uptodate(tree, start, end, &cached,
1773 GFP_ATOMIC); 1796 GFP_ATOMIC);
1774 } 1797 }
1775 unlock_extent(tree, start, end, GFP_ATOMIC); 1798 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
1776 1799
1777 if (whole_page) { 1800 if (whole_page) {
1778 if (uptodate) { 1801 if (uptodate) {
@@ -1811,6 +1834,7 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err)
1811 1834
1812 do { 1835 do {
1813 struct page *page = bvec->bv_page; 1836 struct page *page = bvec->bv_page;
1837 struct extent_state *cached = NULL;
1814 tree = &BTRFS_I(page->mapping->host)->io_tree; 1838 tree = &BTRFS_I(page->mapping->host)->io_tree;
1815 1839
1816 start = ((u64)page->index << PAGE_CACHE_SHIFT) + 1840 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
@@ -1821,13 +1845,14 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err)
1821 prefetchw(&bvec->bv_page->flags); 1845 prefetchw(&bvec->bv_page->flags);
1822 1846
1823 if (uptodate) { 1847 if (uptodate) {
1824 set_extent_uptodate(tree, start, end, GFP_ATOMIC); 1848 set_extent_uptodate(tree, start, end, &cached,
1849 GFP_ATOMIC);
1825 } else { 1850 } else {
1826 ClearPageUptodate(page); 1851 ClearPageUptodate(page);
1827 SetPageError(page); 1852 SetPageError(page);
1828 } 1853 }
1829 1854
1830 unlock_extent(tree, start, end, GFP_ATOMIC); 1855 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
1831 1856
1832 } while (bvec >= bio->bi_io_vec); 1857 } while (bvec >= bio->bi_io_vec);
1833 1858
@@ -2016,14 +2041,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2016 while (cur <= end) { 2041 while (cur <= end) {
2017 if (cur >= last_byte) { 2042 if (cur >= last_byte) {
2018 char *userpage; 2043 char *userpage;
2044 struct extent_state *cached = NULL;
2045
2019 iosize = PAGE_CACHE_SIZE - page_offset; 2046 iosize = PAGE_CACHE_SIZE - page_offset;
2020 userpage = kmap_atomic(page, KM_USER0); 2047 userpage = kmap_atomic(page, KM_USER0);
2021 memset(userpage + page_offset, 0, iosize); 2048 memset(userpage + page_offset, 0, iosize);
2022 flush_dcache_page(page); 2049 flush_dcache_page(page);
2023 kunmap_atomic(userpage, KM_USER0); 2050 kunmap_atomic(userpage, KM_USER0);
2024 set_extent_uptodate(tree, cur, cur + iosize - 1, 2051 set_extent_uptodate(tree, cur, cur + iosize - 1,
2025 GFP_NOFS); 2052 &cached, GFP_NOFS);
2026 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 2053 unlock_extent_cached(tree, cur, cur + iosize - 1,
2054 &cached, GFP_NOFS);
2027 break; 2055 break;
2028 } 2056 }
2029 em = get_extent(inode, page, page_offset, cur, 2057 em = get_extent(inode, page, page_offset, cur,
@@ -2063,14 +2091,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2063 /* we've found a hole, just zero and go on */ 2091 /* we've found a hole, just zero and go on */
2064 if (block_start == EXTENT_MAP_HOLE) { 2092 if (block_start == EXTENT_MAP_HOLE) {
2065 char *userpage; 2093 char *userpage;
2094 struct extent_state *cached = NULL;
2095
2066 userpage = kmap_atomic(page, KM_USER0); 2096 userpage = kmap_atomic(page, KM_USER0);
2067 memset(userpage + page_offset, 0, iosize); 2097 memset(userpage + page_offset, 0, iosize);
2068 flush_dcache_page(page); 2098 flush_dcache_page(page);
2069 kunmap_atomic(userpage, KM_USER0); 2099 kunmap_atomic(userpage, KM_USER0);
2070 2100
2071 set_extent_uptodate(tree, cur, cur + iosize - 1, 2101 set_extent_uptodate(tree, cur, cur + iosize - 1,
2072 GFP_NOFS); 2102 &cached, GFP_NOFS);
2073 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 2103 unlock_extent_cached(tree, cur, cur + iosize - 1,
2104 &cached, GFP_NOFS);
2074 cur = cur + iosize; 2105 cur = cur + iosize;
2075 page_offset += iosize; 2106 page_offset += iosize;
2076 continue; 2107 continue;
@@ -2650,7 +2681,7 @@ int extent_readpages(struct extent_io_tree *tree,
2650 prefetchw(&page->flags); 2681 prefetchw(&page->flags);
2651 list_del(&page->lru); 2682 list_del(&page->lru);
2652 if (!add_to_page_cache_lru(page, mapping, 2683 if (!add_to_page_cache_lru(page, mapping,
2653 page->index, GFP_KERNEL)) { 2684 page->index, GFP_NOFS)) {
2654 __extent_read_full_page(tree, page, get_extent, 2685 __extent_read_full_page(tree, page, get_extent,
2655 &bio, 0, &bio_flags); 2686 &bio, 0, &bio_flags);
2656 } 2687 }
@@ -2789,9 +2820,12 @@ int extent_prepare_write(struct extent_io_tree *tree,
2789 iocount++; 2820 iocount++;
2790 block_start = block_start + iosize; 2821 block_start = block_start + iosize;
2791 } else { 2822 } else {
2792 set_extent_uptodate(tree, block_start, cur_end, 2823 struct extent_state *cached = NULL;
2824
2825 set_extent_uptodate(tree, block_start, cur_end, &cached,
2793 GFP_NOFS); 2826 GFP_NOFS);
2794 unlock_extent(tree, block_start, cur_end, GFP_NOFS); 2827 unlock_extent_cached(tree, block_start, cur_end,
2828 &cached, GFP_NOFS);
2795 block_start = cur_end + 1; 2829 block_start = cur_end + 1;
2796 } 2830 }
2797 page_offset = block_start & (PAGE_CACHE_SIZE - 1); 2831 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
@@ -3457,7 +3491,7 @@ int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3457 num_pages = num_extent_pages(eb->start, eb->len); 3491 num_pages = num_extent_pages(eb->start, eb->len);
3458 3492
3459 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, 3493 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3460 GFP_NOFS); 3494 NULL, GFP_NOFS);
3461 for (i = 0; i < num_pages; i++) { 3495 for (i = 0; i < num_pages; i++) {
3462 page = extent_buffer_page(eb, i); 3496 page = extent_buffer_page(eb, i);
3463 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || 3497 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
@@ -3885,6 +3919,12 @@ static void move_pages(struct page *dst_page, struct page *src_page,
3885 kunmap_atomic(dst_kaddr, KM_USER0); 3919 kunmap_atomic(dst_kaddr, KM_USER0);
3886} 3920}
3887 3921
3922static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
3923{
3924 unsigned long distance = (src > dst) ? src - dst : dst - src;
3925 return distance < len;
3926}
3927
3888static void copy_pages(struct page *dst_page, struct page *src_page, 3928static void copy_pages(struct page *dst_page, struct page *src_page,
3889 unsigned long dst_off, unsigned long src_off, 3929 unsigned long dst_off, unsigned long src_off,
3890 unsigned long len) 3930 unsigned long len)
@@ -3892,10 +3932,12 @@ static void copy_pages(struct page *dst_page, struct page *src_page,
3892 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); 3932 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3893 char *src_kaddr; 3933 char *src_kaddr;
3894 3934
3895 if (dst_page != src_page) 3935 if (dst_page != src_page) {
3896 src_kaddr = kmap_atomic(src_page, KM_USER1); 3936 src_kaddr = kmap_atomic(src_page, KM_USER1);
3897 else 3937 } else {
3898 src_kaddr = dst_kaddr; 3938 src_kaddr = dst_kaddr;
3939 BUG_ON(areas_overlap(src_off, dst_off, len));
3940 }
3899 3941
3900 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); 3942 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3901 kunmap_atomic(dst_kaddr, KM_USER0); 3943 kunmap_atomic(dst_kaddr, KM_USER0);
@@ -3970,7 +4012,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3970 "len %lu len %lu\n", dst_offset, len, dst->len); 4012 "len %lu len %lu\n", dst_offset, len, dst->len);
3971 BUG_ON(1); 4013 BUG_ON(1);
3972 } 4014 }
3973 if (dst_offset < src_offset) { 4015 if (!areas_overlap(src_offset, dst_offset, len)) {
3974 memcpy_extent_buffer(dst, dst_offset, src_offset, len); 4016 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3975 return; 4017 return;
3976 } 4018 }
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index f62c5442835d..af2d7179c372 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -208,7 +208,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
208 int bits, int exclusive_bits, u64 *failed_start, 208 int bits, int exclusive_bits, u64 *failed_start,
209 struct extent_state **cached_state, gfp_t mask); 209 struct extent_state **cached_state, gfp_t mask);
210int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, 210int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
211 gfp_t mask); 211 struct extent_state **cached_state, gfp_t mask);
212int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, 212int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
213 gfp_t mask); 213 gfp_t mask);
214int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, 214int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index e621ea54a3fd..75899a01dded 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -104,7 +104,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
104/* 104/*
105 * unlocks pages after btrfs_file_write is done with them 105 * unlocks pages after btrfs_file_write is done with them
106 */ 106 */
107static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages) 107void btrfs_drop_pages(struct page **pages, size_t num_pages)
108{ 108{
109 size_t i; 109 size_t i;
110 for (i = 0; i < num_pages; i++) { 110 for (i = 0; i < num_pages; i++) {
@@ -127,16 +127,13 @@ static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
127 * this also makes the decision about creating an inline extent vs 127 * this also makes the decision about creating an inline extent vs
128 * doing real data extents, marking pages dirty and delalloc as required. 128 * doing real data extents, marking pages dirty and delalloc as required.
129 */ 129 */
130static noinline int dirty_and_release_pages(struct btrfs_root *root, 130int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
131 struct file *file, 131 struct page **pages, size_t num_pages,
132 struct page **pages, 132 loff_t pos, size_t write_bytes,
133 size_t num_pages, 133 struct extent_state **cached)
134 loff_t pos,
135 size_t write_bytes)
136{ 134{
137 int err = 0; 135 int err = 0;
138 int i; 136 int i;
139 struct inode *inode = fdentry(file)->d_inode;
140 u64 num_bytes; 137 u64 num_bytes;
141 u64 start_pos; 138 u64 start_pos;
142 u64 end_of_last_block; 139 u64 end_of_last_block;
@@ -149,7 +146,7 @@ static noinline int dirty_and_release_pages(struct btrfs_root *root,
149 146
150 end_of_last_block = start_pos + num_bytes - 1; 147 end_of_last_block = start_pos + num_bytes - 1;
151 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, 148 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
152 NULL); 149 cached);
153 if (err) 150 if (err)
154 return err; 151 return err;
155 152
@@ -992,9 +989,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
992 } 989 }
993 990
994 if (copied > 0) { 991 if (copied > 0) {
995 ret = dirty_and_release_pages(root, file, pages, 992 ret = btrfs_dirty_pages(root, inode, pages,
996 dirty_pages, pos, 993 dirty_pages, pos, copied,
997 copied); 994 NULL);
998 if (ret) { 995 if (ret) {
999 btrfs_delalloc_release_space(inode, 996 btrfs_delalloc_release_space(inode,
1000 dirty_pages << PAGE_CACHE_SHIFT); 997 dirty_pages << PAGE_CACHE_SHIFT);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index f561c953205b..63731a1fb0a1 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -508,6 +508,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
508 struct inode *inode; 508 struct inode *inode;
509 struct rb_node *node; 509 struct rb_node *node;
510 struct list_head *pos, *n; 510 struct list_head *pos, *n;
511 struct page **pages;
511 struct page *page; 512 struct page *page;
512 struct extent_state *cached_state = NULL; 513 struct extent_state *cached_state = NULL;
513 struct btrfs_free_cluster *cluster = NULL; 514 struct btrfs_free_cluster *cluster = NULL;
@@ -517,13 +518,13 @@ int btrfs_write_out_cache(struct btrfs_root *root,
517 u64 start, end, len; 518 u64 start, end, len;
518 u64 bytes = 0; 519 u64 bytes = 0;
519 u32 *crc, *checksums; 520 u32 *crc, *checksums;
520 pgoff_t index = 0, last_index = 0;
521 unsigned long first_page_offset; 521 unsigned long first_page_offset;
522 int num_checksums; 522 int index = 0, num_pages = 0;
523 int entries = 0; 523 int entries = 0;
524 int bitmaps = 0; 524 int bitmaps = 0;
525 int ret = 0; 525 int ret = 0;
526 bool next_page = false; 526 bool next_page = false;
527 bool out_of_space = false;
527 528
528 root = root->fs_info->tree_root; 529 root = root->fs_info->tree_root;
529 530
@@ -551,24 +552,31 @@ int btrfs_write_out_cache(struct btrfs_root *root,
551 return 0; 552 return 0;
552 } 553 }
553 554
554 last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 555 num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
556 PAGE_CACHE_SHIFT;
555 filemap_write_and_wait(inode->i_mapping); 557 filemap_write_and_wait(inode->i_mapping);
556 btrfs_wait_ordered_range(inode, inode->i_size & 558 btrfs_wait_ordered_range(inode, inode->i_size &
557 ~(root->sectorsize - 1), (u64)-1); 559 ~(root->sectorsize - 1), (u64)-1);
558 560
559 /* We need a checksum per page. */ 561 /* We need a checksum per page. */
560 num_checksums = i_size_read(inode) / PAGE_CACHE_SIZE; 562 crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
561 crc = checksums = kzalloc(sizeof(u32) * num_checksums, GFP_NOFS);
562 if (!crc) { 563 if (!crc) {
563 iput(inode); 564 iput(inode);
564 return 0; 565 return 0;
565 } 566 }
566 567
568 pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
569 if (!pages) {
570 kfree(crc);
571 iput(inode);
572 return 0;
573 }
574
567 /* Since the first page has all of our checksums and our generation we 575 /* Since the first page has all of our checksums and our generation we
568 * need to calculate the offset into the page that we can start writing 576 * need to calculate the offset into the page that we can start writing
569 * our entries. 577 * our entries.
570 */ 578 */
571 first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64); 579 first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
572 580
573 /* Get the cluster for this block_group if it exists */ 581 /* Get the cluster for this block_group if it exists */
574 if (!list_empty(&block_group->cluster_list)) 582 if (!list_empty(&block_group->cluster_list))
@@ -590,20 +598,18 @@ int btrfs_write_out_cache(struct btrfs_root *root,
590 * after find_get_page at this point. Just putting this here so people 598 * after find_get_page at this point. Just putting this here so people
591 * know and don't freak out. 599 * know and don't freak out.
592 */ 600 */
593 while (index <= last_index) { 601 while (index < num_pages) {
594 page = grab_cache_page(inode->i_mapping, index); 602 page = grab_cache_page(inode->i_mapping, index);
595 if (!page) { 603 if (!page) {
596 pgoff_t i = 0; 604 int i;
597 605
598 while (i < index) { 606 for (i = 0; i < num_pages; i++) {
599 page = find_get_page(inode->i_mapping, i); 607 unlock_page(pages[i]);
600 unlock_page(page); 608 page_cache_release(pages[i]);
601 page_cache_release(page);
602 page_cache_release(page);
603 i++;
604 } 609 }
605 goto out_free; 610 goto out_free;
606 } 611 }
612 pages[index] = page;
607 index++; 613 index++;
608 } 614 }
609 615
@@ -631,7 +637,12 @@ int btrfs_write_out_cache(struct btrfs_root *root,
631 offset = start_offset; 637 offset = start_offset;
632 } 638 }
633 639
634 page = find_get_page(inode->i_mapping, index); 640 if (index >= num_pages) {
641 out_of_space = true;
642 break;
643 }
644
645 page = pages[index];
635 646
636 addr = kmap(page); 647 addr = kmap(page);
637 entry = addr + start_offset; 648 entry = addr + start_offset;
@@ -708,23 +719,6 @@ int btrfs_write_out_cache(struct btrfs_root *root,
708 719
709 bytes += PAGE_CACHE_SIZE; 720 bytes += PAGE_CACHE_SIZE;
710 721
711 ClearPageChecked(page);
712 set_page_extent_mapped(page);
713 SetPageUptodate(page);
714 set_page_dirty(page);
715
716 /*
717 * We need to release our reference we got for grab_cache_page,
718 * except for the first page which will hold our checksums, we
719 * do that below.
720 */
721 if (index != 0) {
722 unlock_page(page);
723 page_cache_release(page);
724 }
725
726 page_cache_release(page);
727
728 index++; 722 index++;
729 } while (node || next_page); 723 } while (node || next_page);
730 724
@@ -734,7 +728,11 @@ int btrfs_write_out_cache(struct btrfs_root *root,
734 struct btrfs_free_space *entry = 728 struct btrfs_free_space *entry =
735 list_entry(pos, struct btrfs_free_space, list); 729 list_entry(pos, struct btrfs_free_space, list);
736 730
737 page = find_get_page(inode->i_mapping, index); 731 if (index >= num_pages) {
732 out_of_space = true;
733 break;
734 }
735 page = pages[index];
738 736
739 addr = kmap(page); 737 addr = kmap(page);
740 memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE); 738 memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE);
@@ -745,64 +743,58 @@ int btrfs_write_out_cache(struct btrfs_root *root,
745 crc++; 743 crc++;
746 bytes += PAGE_CACHE_SIZE; 744 bytes += PAGE_CACHE_SIZE;
747 745
748 ClearPageChecked(page);
749 set_page_extent_mapped(page);
750 SetPageUptodate(page);
751 set_page_dirty(page);
752 unlock_page(page);
753 page_cache_release(page);
754 page_cache_release(page);
755 list_del_init(&entry->list); 746 list_del_init(&entry->list);
756 index++; 747 index++;
757 } 748 }
758 749
750 if (out_of_space) {
751 btrfs_drop_pages(pages, num_pages);
752 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
753 i_size_read(inode) - 1, &cached_state,
754 GFP_NOFS);
755 ret = 0;
756 goto out_free;
757 }
758
759 /* Zero out the rest of the pages just to make sure */ 759 /* Zero out the rest of the pages just to make sure */
760 while (index <= last_index) { 760 while (index < num_pages) {
761 void *addr; 761 void *addr;
762 762
763 page = find_get_page(inode->i_mapping, index); 763 page = pages[index];
764
765 addr = kmap(page); 764 addr = kmap(page);
766 memset(addr, 0, PAGE_CACHE_SIZE); 765 memset(addr, 0, PAGE_CACHE_SIZE);
767 kunmap(page); 766 kunmap(page);
768 ClearPageChecked(page);
769 set_page_extent_mapped(page);
770 SetPageUptodate(page);
771 set_page_dirty(page);
772 unlock_page(page);
773 page_cache_release(page);
774 page_cache_release(page);
775 bytes += PAGE_CACHE_SIZE; 767 bytes += PAGE_CACHE_SIZE;
776 index++; 768 index++;
777 } 769 }
778 770
779 btrfs_set_extent_delalloc(inode, 0, bytes - 1, &cached_state);
780
781 /* Write the checksums and trans id to the first page */ 771 /* Write the checksums and trans id to the first page */
782 { 772 {
783 void *addr; 773 void *addr;
784 u64 *gen; 774 u64 *gen;
785 775
786 page = find_get_page(inode->i_mapping, 0); 776 page = pages[0];
787 777
788 addr = kmap(page); 778 addr = kmap(page);
789 memcpy(addr, checksums, sizeof(u32) * num_checksums); 779 memcpy(addr, checksums, sizeof(u32) * num_pages);
790 gen = addr + (sizeof(u32) * num_checksums); 780 gen = addr + (sizeof(u32) * num_pages);
791 *gen = trans->transid; 781 *gen = trans->transid;
792 kunmap(page); 782 kunmap(page);
793 ClearPageChecked(page);
794 set_page_extent_mapped(page);
795 SetPageUptodate(page);
796 set_page_dirty(page);
797 unlock_page(page);
798 page_cache_release(page);
799 page_cache_release(page);
800 } 783 }
801 BTRFS_I(inode)->generation = trans->transid;
802 784
785 ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0,
786 bytes, &cached_state);
787 btrfs_drop_pages(pages, num_pages);
803 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, 788 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
804 i_size_read(inode) - 1, &cached_state, GFP_NOFS); 789 i_size_read(inode) - 1, &cached_state, GFP_NOFS);
805 790
791 if (ret) {
792 ret = 0;
793 goto out_free;
794 }
795
796 BTRFS_I(inode)->generation = trans->transid;
797
806 filemap_write_and_wait(inode->i_mapping); 798 filemap_write_and_wait(inode->i_mapping);
807 799
808 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 800 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
@@ -853,6 +845,7 @@ out_free:
853 BTRFS_I(inode)->generation = 0; 845 BTRFS_I(inode)->generation = 0;
854 } 846 }
855 kfree(checksums); 847 kfree(checksums);
848 kfree(pages);
856 btrfs_update_inode(trans, root, inode); 849 btrfs_update_inode(trans, root, inode);
857 iput(inode); 850 iput(inode);
858 return ret; 851 return ret;
@@ -1775,10 +1768,13 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
1775 1768
1776 while ((node = rb_last(&block_group->free_space_offset)) != NULL) { 1769 while ((node = rb_last(&block_group->free_space_offset)) != NULL) {
1777 info = rb_entry(node, struct btrfs_free_space, offset_index); 1770 info = rb_entry(node, struct btrfs_free_space, offset_index);
1778 unlink_free_space(block_group, info); 1771 if (!info->bitmap) {
1779 if (info->bitmap) 1772 unlink_free_space(block_group, info);
1780 kfree(info->bitmap); 1773 kmem_cache_free(btrfs_free_space_cachep, info);
1781 kmem_cache_free(btrfs_free_space_cachep, info); 1774 } else {
1775 free_bitmap(block_group, info);
1776 }
1777
1782 if (need_resched()) { 1778 if (need_resched()) {
1783 spin_unlock(&block_group->tree_lock); 1779 spin_unlock(&block_group->tree_lock);
1784 cond_resched(); 1780 cond_resched();
@@ -2308,7 +2304,7 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
2308 start = entry->offset; 2304 start = entry->offset;
2309 bytes = min(entry->bytes, end - start); 2305 bytes = min(entry->bytes, end - start);
2310 unlink_free_space(block_group, entry); 2306 unlink_free_space(block_group, entry);
2311 kfree(entry); 2307 kmem_cache_free(btrfs_free_space_cachep, entry);
2312 } 2308 }
2313 2309
2314 spin_unlock(&block_group->tree_lock); 2310 spin_unlock(&block_group->tree_lock);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 5cc64ab9c485..7cd8ab0ef04d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -954,6 +954,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
954 1, 0, NULL, GFP_NOFS); 954 1, 0, NULL, GFP_NOFS);
955 while (start < end) { 955 while (start < end) {
956 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); 956 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
957 BUG_ON(!async_cow);
957 async_cow->inode = inode; 958 async_cow->inode = inode;
958 async_cow->root = root; 959 async_cow->root = root;
959 async_cow->locked_page = locked_page; 960 async_cow->locked_page = locked_page;
@@ -1770,9 +1771,12 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1770 add_pending_csums(trans, inode, ordered_extent->file_offset, 1771 add_pending_csums(trans, inode, ordered_extent->file_offset,
1771 &ordered_extent->list); 1772 &ordered_extent->list);
1772 1773
1773 btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1774 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1774 ret = btrfs_update_inode(trans, root, inode); 1775 if (!ret) {
1775 BUG_ON(ret); 1776 ret = btrfs_update_inode(trans, root, inode);
1777 BUG_ON(ret);
1778 }
1779 ret = 0;
1776out: 1780out:
1777 if (nolock) { 1781 if (nolock) {
1778 if (trans) 1782 if (trans)
@@ -2590,6 +2594,13 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
2590 struct btrfs_inode_item *item, 2594 struct btrfs_inode_item *item,
2591 struct inode *inode) 2595 struct inode *inode)
2592{ 2596{
2597 if (!leaf->map_token)
2598 map_private_extent_buffer(leaf, (unsigned long)item,
2599 sizeof(struct btrfs_inode_item),
2600 &leaf->map_token, &leaf->kaddr,
2601 &leaf->map_start, &leaf->map_len,
2602 KM_USER1);
2603
2593 btrfs_set_inode_uid(leaf, item, inode->i_uid); 2604 btrfs_set_inode_uid(leaf, item, inode->i_uid);
2594 btrfs_set_inode_gid(leaf, item, inode->i_gid); 2605 btrfs_set_inode_gid(leaf, item, inode->i_gid);
2595 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size); 2606 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
@@ -2618,6 +2629,11 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
2618 btrfs_set_inode_rdev(leaf, item, inode->i_rdev); 2629 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2619 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags); 2630 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2620 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group); 2631 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2632
2633 if (leaf->map_token) {
2634 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
2635 leaf->map_token = NULL;
2636 }
2621} 2637}
2622 2638
2623/* 2639/*
@@ -4207,10 +4223,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4207 struct btrfs_key found_key; 4223 struct btrfs_key found_key;
4208 struct btrfs_path *path; 4224 struct btrfs_path *path;
4209 int ret; 4225 int ret;
4210 u32 nritems;
4211 struct extent_buffer *leaf; 4226 struct extent_buffer *leaf;
4212 int slot; 4227 int slot;
4213 int advance;
4214 unsigned char d_type; 4228 unsigned char d_type;
4215 int over = 0; 4229 int over = 0;
4216 u32 di_cur; 4230 u32 di_cur;
@@ -4253,27 +4267,19 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4253 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4267 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4254 if (ret < 0) 4268 if (ret < 0)
4255 goto err; 4269 goto err;
4256 advance = 0;
4257 4270
4258 while (1) { 4271 while (1) {
4259 leaf = path->nodes[0]; 4272 leaf = path->nodes[0];
4260 nritems = btrfs_header_nritems(leaf);
4261 slot = path->slots[0]; 4273 slot = path->slots[0];
4262 if (advance || slot >= nritems) { 4274 if (slot >= btrfs_header_nritems(leaf)) {
4263 if (slot >= nritems - 1) { 4275 ret = btrfs_next_leaf(root, path);
4264 ret = btrfs_next_leaf(root, path); 4276 if (ret < 0)
4265 if (ret) 4277 goto err;
4266 break; 4278 else if (ret > 0)
4267 leaf = path->nodes[0]; 4279 break;
4268 nritems = btrfs_header_nritems(leaf); 4280 continue;
4269 slot = path->slots[0];
4270 } else {
4271 slot++;
4272 path->slots[0]++;
4273 }
4274 } 4281 }
4275 4282
4276 advance = 1;
4277 item = btrfs_item_nr(leaf, slot); 4283 item = btrfs_item_nr(leaf, slot);
4278 btrfs_item_key_to_cpu(leaf, &found_key, slot); 4284 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4279 4285
@@ -4282,7 +4288,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4282 if (btrfs_key_type(&found_key) != key_type) 4288 if (btrfs_key_type(&found_key) != key_type)
4283 break; 4289 break;
4284 if (found_key.offset < filp->f_pos) 4290 if (found_key.offset < filp->f_pos)
4285 continue; 4291 goto next;
4286 4292
4287 filp->f_pos = found_key.offset; 4293 filp->f_pos = found_key.offset;
4288 4294
@@ -4335,6 +4341,8 @@ skip:
4335 di_cur += di_len; 4341 di_cur += di_len;
4336 di = (struct btrfs_dir_item *)((char *)di + di_len); 4342 di = (struct btrfs_dir_item *)((char *)di + di_len);
4337 } 4343 }
4344next:
4345 path->slots[0]++;
4338 } 4346 }
4339 4347
4340 /* Reached end of directory/root. Bump pos past the last item. */ 4348 /* Reached end of directory/root. Bump pos past the last item. */
@@ -4527,14 +4535,17 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4527 BUG_ON(!path); 4535 BUG_ON(!path);
4528 4536
4529 inode = new_inode(root->fs_info->sb); 4537 inode = new_inode(root->fs_info->sb);
4530 if (!inode) 4538 if (!inode) {
4539 btrfs_free_path(path);
4531 return ERR_PTR(-ENOMEM); 4540 return ERR_PTR(-ENOMEM);
4541 }
4532 4542
4533 if (dir) { 4543 if (dir) {
4534 trace_btrfs_inode_request(dir); 4544 trace_btrfs_inode_request(dir);
4535 4545
4536 ret = btrfs_set_inode_index(dir, index); 4546 ret = btrfs_set_inode_index(dir, index);
4537 if (ret) { 4547 if (ret) {
4548 btrfs_free_path(path);
4538 iput(inode); 4549 iput(inode);
4539 return ERR_PTR(ret); 4550 return ERR_PTR(ret);
4540 } 4551 }
@@ -4721,9 +4732,10 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4721 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4732 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4722 dentry->d_name.len, dir->i_ino, objectid, 4733 dentry->d_name.len, dir->i_ino, objectid,
4723 BTRFS_I(dir)->block_group, mode, &index); 4734 BTRFS_I(dir)->block_group, mode, &index);
4724 err = PTR_ERR(inode); 4735 if (IS_ERR(inode)) {
4725 if (IS_ERR(inode)) 4736 err = PTR_ERR(inode);
4726 goto out_unlock; 4737 goto out_unlock;
4738 }
4727 4739
4728 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 4740 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
4729 if (err) { 4741 if (err) {
@@ -4782,9 +4794,10 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4782 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4794 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4783 dentry->d_name.len, dir->i_ino, objectid, 4795 dentry->d_name.len, dir->i_ino, objectid,
4784 BTRFS_I(dir)->block_group, mode, &index); 4796 BTRFS_I(dir)->block_group, mode, &index);
4785 err = PTR_ERR(inode); 4797 if (IS_ERR(inode)) {
4786 if (IS_ERR(inode)) 4798 err = PTR_ERR(inode);
4787 goto out_unlock; 4799 goto out_unlock;
4800 }
4788 4801
4789 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 4802 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
4790 if (err) { 4803 if (err) {
@@ -4834,9 +4847,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4834 if (inode->i_nlink == ~0U) 4847 if (inode->i_nlink == ~0U)
4835 return -EMLINK; 4848 return -EMLINK;
4836 4849
4837 btrfs_inc_nlink(inode);
4838 inode->i_ctime = CURRENT_TIME;
4839
4840 err = btrfs_set_inode_index(dir, &index); 4850 err = btrfs_set_inode_index(dir, &index);
4841 if (err) 4851 if (err)
4842 goto fail; 4852 goto fail;
@@ -4852,6 +4862,9 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4852 goto fail; 4862 goto fail;
4853 } 4863 }
4854 4864
4865 btrfs_inc_nlink(inode);
4866 inode->i_ctime = CURRENT_TIME;
4867
4855 btrfs_set_trans_block_group(trans, dir); 4868 btrfs_set_trans_block_group(trans, dir);
4856 ihold(inode); 4869 ihold(inode);
4857 4870
@@ -4989,6 +5002,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
4989 inline_size = btrfs_file_extent_inline_item_len(leaf, 5002 inline_size = btrfs_file_extent_inline_item_len(leaf,
4990 btrfs_item_nr(leaf, path->slots[0])); 5003 btrfs_item_nr(leaf, path->slots[0]));
4991 tmp = kmalloc(inline_size, GFP_NOFS); 5004 tmp = kmalloc(inline_size, GFP_NOFS);
5005 if (!tmp)
5006 return -ENOMEM;
4992 ptr = btrfs_file_extent_inline_start(item); 5007 ptr = btrfs_file_extent_inline_start(item);
4993 5008
4994 read_extent_buffer(leaf, tmp, ptr, inline_size); 5009 read_extent_buffer(leaf, tmp, ptr, inline_size);
@@ -5221,7 +5236,7 @@ again:
5221 btrfs_mark_buffer_dirty(leaf); 5236 btrfs_mark_buffer_dirty(leaf);
5222 } 5237 }
5223 set_extent_uptodate(io_tree, em->start, 5238 set_extent_uptodate(io_tree, em->start,
5224 extent_map_end(em) - 1, GFP_NOFS); 5239 extent_map_end(em) - 1, NULL, GFP_NOFS);
5225 goto insert; 5240 goto insert;
5226 } else { 5241 } else {
5227 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type); 5242 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
@@ -5428,17 +5443,30 @@ out:
5428} 5443}
5429 5444
5430static struct extent_map *btrfs_new_extent_direct(struct inode *inode, 5445static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5446 struct extent_map *em,
5431 u64 start, u64 len) 5447 u64 start, u64 len)
5432{ 5448{
5433 struct btrfs_root *root = BTRFS_I(inode)->root; 5449 struct btrfs_root *root = BTRFS_I(inode)->root;
5434 struct btrfs_trans_handle *trans; 5450 struct btrfs_trans_handle *trans;
5435 struct extent_map *em;
5436 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 5451 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
5437 struct btrfs_key ins; 5452 struct btrfs_key ins;
5438 u64 alloc_hint; 5453 u64 alloc_hint;
5439 int ret; 5454 int ret;
5455 bool insert = false;
5440 5456
5441 btrfs_drop_extent_cache(inode, start, start + len - 1, 0); 5457 /*
5458 * Ok if the extent map we looked up is a hole and is for the exact
5459 * range we want, there is no reason to allocate a new one, however if
5460 * it is not right then we need to free this one and drop the cache for
5461 * our range.
5462 */
5463 if (em->block_start != EXTENT_MAP_HOLE || em->start != start ||
5464 em->len != len) {
5465 free_extent_map(em);
5466 em = NULL;
5467 insert = true;
5468 btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
5469 }
5442 5470
5443 trans = btrfs_join_transaction(root, 0); 5471 trans = btrfs_join_transaction(root, 0);
5444 if (IS_ERR(trans)) 5472 if (IS_ERR(trans))
@@ -5454,10 +5482,12 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5454 goto out; 5482 goto out;
5455 } 5483 }
5456 5484
5457 em = alloc_extent_map(GFP_NOFS);
5458 if (!em) { 5485 if (!em) {
5459 em = ERR_PTR(-ENOMEM); 5486 em = alloc_extent_map(GFP_NOFS);
5460 goto out; 5487 if (!em) {
5488 em = ERR_PTR(-ENOMEM);
5489 goto out;
5490 }
5461 } 5491 }
5462 5492
5463 em->start = start; 5493 em->start = start;
@@ -5467,9 +5497,15 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5467 em->block_start = ins.objectid; 5497 em->block_start = ins.objectid;
5468 em->block_len = ins.offset; 5498 em->block_len = ins.offset;
5469 em->bdev = root->fs_info->fs_devices->latest_bdev; 5499 em->bdev = root->fs_info->fs_devices->latest_bdev;
5500
5501 /*
5502 * We need to do this because if we're using the original em we searched
5503 * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that.
5504 */
5505 em->flags = 0;
5470 set_bit(EXTENT_FLAG_PINNED, &em->flags); 5506 set_bit(EXTENT_FLAG_PINNED, &em->flags);
5471 5507
5472 while (1) { 5508 while (insert) {
5473 write_lock(&em_tree->lock); 5509 write_lock(&em_tree->lock);
5474 ret = add_extent_mapping(em_tree, em); 5510 ret = add_extent_mapping(em_tree, em);
5475 write_unlock(&em_tree->lock); 5511 write_unlock(&em_tree->lock);
@@ -5687,8 +5723,7 @@ must_cow:
5687 * it above 5723 * it above
5688 */ 5724 */
5689 len = bh_result->b_size; 5725 len = bh_result->b_size;
5690 free_extent_map(em); 5726 em = btrfs_new_extent_direct(inode, em, start, len);
5691 em = btrfs_new_extent_direct(inode, start, len);
5692 if (IS_ERR(em)) 5727 if (IS_ERR(em))
5693 return PTR_ERR(em); 5728 return PTR_ERR(em);
5694 len = min(len, em->len - (start - em->start)); 5729 len = min(len, em->len - (start - em->start));
@@ -5851,8 +5886,10 @@ again:
5851 } 5886 }
5852 5887
5853 add_pending_csums(trans, inode, ordered->file_offset, &ordered->list); 5888 add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
5854 btrfs_ordered_update_i_size(inode, 0, ordered); 5889 ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5855 btrfs_update_inode(trans, root, inode); 5890 if (!ret)
5891 btrfs_update_inode(trans, root, inode);
5892 ret = 0;
5856out_unlock: 5893out_unlock:
5857 unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset, 5894 unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
5858 ordered->file_offset + ordered->len - 1, 5895 ordered->file_offset + ordered->len - 1,
@@ -5938,7 +5975,7 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
5938 5975
5939static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, 5976static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
5940 int rw, u64 file_offset, int skip_sum, 5977 int rw, u64 file_offset, int skip_sum,
5941 u32 *csums) 5978 u32 *csums, int async_submit)
5942{ 5979{
5943 int write = rw & REQ_WRITE; 5980 int write = rw & REQ_WRITE;
5944 struct btrfs_root *root = BTRFS_I(inode)->root; 5981 struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -5949,13 +5986,24 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
5949 if (ret) 5986 if (ret)
5950 goto err; 5987 goto err;
5951 5988
5952 if (write && !skip_sum) { 5989 if (skip_sum)
5990 goto map;
5991
5992 if (write && async_submit) {
5953 ret = btrfs_wq_submit_bio(root->fs_info, 5993 ret = btrfs_wq_submit_bio(root->fs_info,
5954 inode, rw, bio, 0, 0, 5994 inode, rw, bio, 0, 0,
5955 file_offset, 5995 file_offset,
5956 __btrfs_submit_bio_start_direct_io, 5996 __btrfs_submit_bio_start_direct_io,
5957 __btrfs_submit_bio_done); 5997 __btrfs_submit_bio_done);
5958 goto err; 5998 goto err;
5999 } else if (write) {
6000 /*
6001 * If we aren't doing async submit, calculate the csum of the
6002 * bio now.
6003 */
6004 ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
6005 if (ret)
6006 goto err;
5959 } else if (!skip_sum) { 6007 } else if (!skip_sum) {
5960 ret = btrfs_lookup_bio_sums_dio(root, inode, bio, 6008 ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
5961 file_offset, csums); 6009 file_offset, csums);
@@ -5963,7 +6011,8 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
5963 goto err; 6011 goto err;
5964 } 6012 }
5965 6013
5966 ret = btrfs_map_bio(root, rw, bio, 0, 1); 6014map:
6015 ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
5967err: 6016err:
5968 bio_put(bio); 6017 bio_put(bio);
5969 return ret; 6018 return ret;
@@ -5985,23 +6034,30 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
5985 int nr_pages = 0; 6034 int nr_pages = 0;
5986 u32 *csums = dip->csums; 6035 u32 *csums = dip->csums;
5987 int ret = 0; 6036 int ret = 0;
6037 int async_submit = 0;
5988 int write = rw & REQ_WRITE; 6038 int write = rw & REQ_WRITE;
5989 6039
5990 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
5991 if (!bio)
5992 return -ENOMEM;
5993 bio->bi_private = dip;
5994 bio->bi_end_io = btrfs_end_dio_bio;
5995 atomic_inc(&dip->pending_bios);
5996
5997 map_length = orig_bio->bi_size; 6040 map_length = orig_bio->bi_size;
5998 ret = btrfs_map_block(map_tree, READ, start_sector << 9, 6041 ret = btrfs_map_block(map_tree, READ, start_sector << 9,
5999 &map_length, NULL, 0); 6042 &map_length, NULL, 0);
6000 if (ret) { 6043 if (ret) {
6001 bio_put(bio); 6044 bio_put(orig_bio);
6002 return -EIO; 6045 return -EIO;
6003 } 6046 }
6004 6047
6048 if (map_length >= orig_bio->bi_size) {
6049 bio = orig_bio;
6050 goto submit;
6051 }
6052
6053 async_submit = 1;
6054 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
6055 if (!bio)
6056 return -ENOMEM;
6057 bio->bi_private = dip;
6058 bio->bi_end_io = btrfs_end_dio_bio;
6059 atomic_inc(&dip->pending_bios);
6060
6005 while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) { 6061 while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
6006 if (unlikely(map_length < submit_len + bvec->bv_len || 6062 if (unlikely(map_length < submit_len + bvec->bv_len ||
6007 bio_add_page(bio, bvec->bv_page, bvec->bv_len, 6063 bio_add_page(bio, bvec->bv_page, bvec->bv_len,
@@ -6015,7 +6071,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
6015 atomic_inc(&dip->pending_bios); 6071 atomic_inc(&dip->pending_bios);
6016 ret = __btrfs_submit_dio_bio(bio, inode, rw, 6072 ret = __btrfs_submit_dio_bio(bio, inode, rw,
6017 file_offset, skip_sum, 6073 file_offset, skip_sum,
6018 csums); 6074 csums, async_submit);
6019 if (ret) { 6075 if (ret) {
6020 bio_put(bio); 6076 bio_put(bio);
6021 atomic_dec(&dip->pending_bios); 6077 atomic_dec(&dip->pending_bios);
@@ -6052,8 +6108,9 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
6052 } 6108 }
6053 } 6109 }
6054 6110
6111submit:
6055 ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, 6112 ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
6056 csums); 6113 csums, async_submit);
6057 if (!ret) 6114 if (!ret)
6058 return 0; 6115 return 0;
6059 6116
@@ -6148,6 +6205,7 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
6148 unsigned long nr_segs) 6205 unsigned long nr_segs)
6149{ 6206{
6150 int seg; 6207 int seg;
6208 int i;
6151 size_t size; 6209 size_t size;
6152 unsigned long addr; 6210 unsigned long addr;
6153 unsigned blocksize_mask = root->sectorsize - 1; 6211 unsigned blocksize_mask = root->sectorsize - 1;
@@ -6162,8 +6220,22 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
6162 addr = (unsigned long)iov[seg].iov_base; 6220 addr = (unsigned long)iov[seg].iov_base;
6163 size = iov[seg].iov_len; 6221 size = iov[seg].iov_len;
6164 end += size; 6222 end += size;
6165 if ((addr & blocksize_mask) || (size & blocksize_mask)) 6223 if ((addr & blocksize_mask) || (size & blocksize_mask))
6166 goto out; 6224 goto out;
6225
6226 /* If this is a write we don't need to check anymore */
6227 if (rw & WRITE)
6228 continue;
6229
6230 /*
6231 * Check to make sure we don't have duplicate iov_base's in this
6232 * iovec, if so return EINVAL, otherwise we'll get csum errors
6233 * when reading back.
6234 */
6235 for (i = seg + 1; i < nr_segs; i++) {
6236 if (iov[seg].iov_base == iov[i].iov_base)
6237 goto out;
6238 }
6167 } 6239 }
6168 retval = 0; 6240 retval = 0;
6169out: 6241out:
@@ -7206,9 +7278,10 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7206 dentry->d_name.len, dir->i_ino, objectid, 7278 dentry->d_name.len, dir->i_ino, objectid,
7207 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, 7279 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
7208 &index); 7280 &index);
7209 err = PTR_ERR(inode); 7281 if (IS_ERR(inode)) {
7210 if (IS_ERR(inode)) 7282 err = PTR_ERR(inode);
7211 goto out_unlock; 7283 goto out_unlock;
7284 }
7212 7285
7213 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); 7286 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
7214 if (err) { 7287 if (err) {
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index cfc264fefdb0..ffb48d6c5433 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2287,7 +2287,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
2287 struct btrfs_ioctl_space_info space; 2287 struct btrfs_ioctl_space_info space;
2288 struct btrfs_ioctl_space_info *dest; 2288 struct btrfs_ioctl_space_info *dest;
2289 struct btrfs_ioctl_space_info *dest_orig; 2289 struct btrfs_ioctl_space_info *dest_orig;
2290 struct btrfs_ioctl_space_info *user_dest; 2290 struct btrfs_ioctl_space_info __user *user_dest;
2291 struct btrfs_space_info *info; 2291 struct btrfs_space_info *info;
2292 u64 types[] = {BTRFS_BLOCK_GROUP_DATA, 2292 u64 types[] = {BTRFS_BLOCK_GROUP_DATA,
2293 BTRFS_BLOCK_GROUP_SYSTEM, 2293 BTRFS_BLOCK_GROUP_SYSTEM,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 58e7de9cc90c..0ac712efcdf2 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -159,7 +159,7 @@ enum {
159 Opt_compress_type, Opt_compress_force, Opt_compress_force_type, 159 Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
160 Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, 160 Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
161 Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, 161 Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
162 Opt_enospc_debug, Opt_err, 162 Opt_enospc_debug, Opt_subvolrootid, Opt_err,
163}; 163};
164 164
165static match_table_t tokens = { 165static match_table_t tokens = {
@@ -189,6 +189,7 @@ static match_table_t tokens = {
189 {Opt_clear_cache, "clear_cache"}, 189 {Opt_clear_cache, "clear_cache"},
190 {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, 190 {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
191 {Opt_enospc_debug, "enospc_debug"}, 191 {Opt_enospc_debug, "enospc_debug"},
192 {Opt_subvolrootid, "subvolrootid=%d"},
192 {Opt_err, NULL}, 193 {Opt_err, NULL},
193}; 194};
194 195
@@ -232,6 +233,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
232 break; 233 break;
233 case Opt_subvol: 234 case Opt_subvol:
234 case Opt_subvolid: 235 case Opt_subvolid:
236 case Opt_subvolrootid:
235 case Opt_device: 237 case Opt_device:
236 /* 238 /*
237 * These are parsed by btrfs_parse_early_options 239 * These are parsed by btrfs_parse_early_options
@@ -388,7 +390,7 @@ out:
388 */ 390 */
389static int btrfs_parse_early_options(const char *options, fmode_t flags, 391static int btrfs_parse_early_options(const char *options, fmode_t flags,
390 void *holder, char **subvol_name, u64 *subvol_objectid, 392 void *holder, char **subvol_name, u64 *subvol_objectid,
391 struct btrfs_fs_devices **fs_devices) 393 u64 *subvol_rootid, struct btrfs_fs_devices **fs_devices)
392{ 394{
393 substring_t args[MAX_OPT_ARGS]; 395 substring_t args[MAX_OPT_ARGS];
394 char *opts, *orig, *p; 396 char *opts, *orig, *p;
@@ -429,6 +431,18 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
429 *subvol_objectid = intarg; 431 *subvol_objectid = intarg;
430 } 432 }
431 break; 433 break;
434 case Opt_subvolrootid:
435 intarg = 0;
436 error = match_int(&args[0], &intarg);
437 if (!error) {
438 /* we want the original fs_tree */
439 if (!intarg)
440 *subvol_rootid =
441 BTRFS_FS_TREE_OBJECTID;
442 else
443 *subvol_rootid = intarg;
444 }
445 break;
432 case Opt_device: 446 case Opt_device:
433 error = btrfs_scan_one_device(match_strdup(&args[0]), 447 error = btrfs_scan_one_device(match_strdup(&args[0]),
434 flags, holder, fs_devices); 448 flags, holder, fs_devices);
@@ -736,6 +750,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
736 fmode_t mode = FMODE_READ; 750 fmode_t mode = FMODE_READ;
737 char *subvol_name = NULL; 751 char *subvol_name = NULL;
738 u64 subvol_objectid = 0; 752 u64 subvol_objectid = 0;
753 u64 subvol_rootid = 0;
739 int error = 0; 754 int error = 0;
740 755
741 if (!(flags & MS_RDONLY)) 756 if (!(flags & MS_RDONLY))
@@ -743,7 +758,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
743 758
744 error = btrfs_parse_early_options(data, mode, fs_type, 759 error = btrfs_parse_early_options(data, mode, fs_type,
745 &subvol_name, &subvol_objectid, 760 &subvol_name, &subvol_objectid,
746 &fs_devices); 761 &subvol_rootid, &fs_devices);
747 if (error) 762 if (error)
748 return ERR_PTR(error); 763 return ERR_PTR(error);
749 764
@@ -807,15 +822,17 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
807 s->s_flags |= MS_ACTIVE; 822 s->s_flags |= MS_ACTIVE;
808 } 823 }
809 824
810 root = get_default_root(s, subvol_objectid);
811 if (IS_ERR(root)) {
812 error = PTR_ERR(root);
813 deactivate_locked_super(s);
814 goto error_free_subvol_name;
815 }
816 /* if they gave us a subvolume name bind mount into that */ 825 /* if they gave us a subvolume name bind mount into that */
817 if (strcmp(subvol_name, ".")) { 826 if (strcmp(subvol_name, ".")) {
818 struct dentry *new_root; 827 struct dentry *new_root;
828
829 root = get_default_root(s, subvol_rootid);
830 if (IS_ERR(root)) {
831 error = PTR_ERR(root);
832 deactivate_locked_super(s);
833 goto error_free_subvol_name;
834 }
835
819 mutex_lock(&root->d_inode->i_mutex); 836 mutex_lock(&root->d_inode->i_mutex);
820 new_root = lookup_one_len(subvol_name, root, 837 new_root = lookup_one_len(subvol_name, root,
821 strlen(subvol_name)); 838 strlen(subvol_name));
@@ -836,6 +853,13 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
836 } 853 }
837 dput(root); 854 dput(root);
838 root = new_root; 855 root = new_root;
856 } else {
857 root = get_default_root(s, subvol_objectid);
858 if (IS_ERR(root)) {
859 error = PTR_ERR(root);
860 deactivate_locked_super(s);
861 goto error_free_subvol_name;
862 }
839 } 863 }
840 864
841 kfree(subvol_name); 865 kfree(subvol_name);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 5b158da7e0bb..c571734d5e5a 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -32,10 +32,8 @@
32 32
33static noinline void put_transaction(struct btrfs_transaction *transaction) 33static noinline void put_transaction(struct btrfs_transaction *transaction)
34{ 34{
35 WARN_ON(transaction->use_count == 0); 35 WARN_ON(atomic_read(&transaction->use_count) == 0);
36 transaction->use_count--; 36 if (atomic_dec_and_test(&transaction->use_count)) {
37 if (transaction->use_count == 0) {
38 list_del_init(&transaction->list);
39 memset(transaction, 0, sizeof(*transaction)); 37 memset(transaction, 0, sizeof(*transaction));
40 kmem_cache_free(btrfs_transaction_cachep, transaction); 38 kmem_cache_free(btrfs_transaction_cachep, transaction);
41 } 39 }
@@ -60,14 +58,14 @@ static noinline int join_transaction(struct btrfs_root *root)
60 if (!cur_trans) 58 if (!cur_trans)
61 return -ENOMEM; 59 return -ENOMEM;
62 root->fs_info->generation++; 60 root->fs_info->generation++;
63 cur_trans->num_writers = 1; 61 atomic_set(&cur_trans->num_writers, 1);
64 cur_trans->num_joined = 0; 62 cur_trans->num_joined = 0;
65 cur_trans->transid = root->fs_info->generation; 63 cur_trans->transid = root->fs_info->generation;
66 init_waitqueue_head(&cur_trans->writer_wait); 64 init_waitqueue_head(&cur_trans->writer_wait);
67 init_waitqueue_head(&cur_trans->commit_wait); 65 init_waitqueue_head(&cur_trans->commit_wait);
68 cur_trans->in_commit = 0; 66 cur_trans->in_commit = 0;
69 cur_trans->blocked = 0; 67 cur_trans->blocked = 0;
70 cur_trans->use_count = 1; 68 atomic_set(&cur_trans->use_count, 1);
71 cur_trans->commit_done = 0; 69 cur_trans->commit_done = 0;
72 cur_trans->start_time = get_seconds(); 70 cur_trans->start_time = get_seconds();
73 71
@@ -88,7 +86,7 @@ static noinline int join_transaction(struct btrfs_root *root)
88 root->fs_info->running_transaction = cur_trans; 86 root->fs_info->running_transaction = cur_trans;
89 spin_unlock(&root->fs_info->new_trans_lock); 87 spin_unlock(&root->fs_info->new_trans_lock);
90 } else { 88 } else {
91 cur_trans->num_writers++; 89 atomic_inc(&cur_trans->num_writers);
92 cur_trans->num_joined++; 90 cur_trans->num_joined++;
93 } 91 }
94 92
@@ -145,7 +143,7 @@ static void wait_current_trans(struct btrfs_root *root)
145 cur_trans = root->fs_info->running_transaction; 143 cur_trans = root->fs_info->running_transaction;
146 if (cur_trans && cur_trans->blocked) { 144 if (cur_trans && cur_trans->blocked) {
147 DEFINE_WAIT(wait); 145 DEFINE_WAIT(wait);
148 cur_trans->use_count++; 146 atomic_inc(&cur_trans->use_count);
149 while (1) { 147 while (1) {
150 prepare_to_wait(&root->fs_info->transaction_wait, &wait, 148 prepare_to_wait(&root->fs_info->transaction_wait, &wait,
151 TASK_UNINTERRUPTIBLE); 149 TASK_UNINTERRUPTIBLE);
@@ -181,6 +179,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
181{ 179{
182 struct btrfs_trans_handle *h; 180 struct btrfs_trans_handle *h;
183 struct btrfs_transaction *cur_trans; 181 struct btrfs_transaction *cur_trans;
182 int retries = 0;
184 int ret; 183 int ret;
185 184
186 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) 185 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
@@ -204,7 +203,7 @@ again:
204 } 203 }
205 204
206 cur_trans = root->fs_info->running_transaction; 205 cur_trans = root->fs_info->running_transaction;
207 cur_trans->use_count++; 206 atomic_inc(&cur_trans->use_count);
208 if (type != TRANS_JOIN_NOLOCK) 207 if (type != TRANS_JOIN_NOLOCK)
209 mutex_unlock(&root->fs_info->trans_mutex); 208 mutex_unlock(&root->fs_info->trans_mutex);
210 209
@@ -224,10 +223,18 @@ again:
224 223
225 if (num_items > 0) { 224 if (num_items > 0) {
226 ret = btrfs_trans_reserve_metadata(h, root, num_items); 225 ret = btrfs_trans_reserve_metadata(h, root, num_items);
227 if (ret == -EAGAIN) { 226 if (ret == -EAGAIN && !retries) {
227 retries++;
228 btrfs_commit_transaction(h, root); 228 btrfs_commit_transaction(h, root);
229 goto again; 229 goto again;
230 } else if (ret == -EAGAIN) {
231 /*
232 * We have already retried and got EAGAIN, so really we
233 * don't have space, so set ret to -ENOSPC.
234 */
235 ret = -ENOSPC;
230 } 236 }
237
231 if (ret < 0) { 238 if (ret < 0) {
232 btrfs_end_transaction(h, root); 239 btrfs_end_transaction(h, root);
233 return ERR_PTR(ret); 240 return ERR_PTR(ret);
@@ -327,7 +334,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
327 goto out_unlock; /* nothing committing|committed */ 334 goto out_unlock; /* nothing committing|committed */
328 } 335 }
329 336
330 cur_trans->use_count++; 337 atomic_inc(&cur_trans->use_count);
331 mutex_unlock(&root->fs_info->trans_mutex); 338 mutex_unlock(&root->fs_info->trans_mutex);
332 339
333 wait_for_commit(root, cur_trans); 340 wait_for_commit(root, cur_trans);
@@ -457,18 +464,14 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
457 wake_up_process(info->transaction_kthread); 464 wake_up_process(info->transaction_kthread);
458 } 465 }
459 466
460 if (lock)
461 mutex_lock(&info->trans_mutex);
462 WARN_ON(cur_trans != info->running_transaction); 467 WARN_ON(cur_trans != info->running_transaction);
463 WARN_ON(cur_trans->num_writers < 1); 468 WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
464 cur_trans->num_writers--; 469 atomic_dec(&cur_trans->num_writers);
465 470
466 smp_mb(); 471 smp_mb();
467 if (waitqueue_active(&cur_trans->writer_wait)) 472 if (waitqueue_active(&cur_trans->writer_wait))
468 wake_up(&cur_trans->writer_wait); 473 wake_up(&cur_trans->writer_wait);
469 put_transaction(cur_trans); 474 put_transaction(cur_trans);
470 if (lock)
471 mutex_unlock(&info->trans_mutex);
472 475
473 if (current->journal_info == trans) 476 if (current->journal_info == trans)
474 current->journal_info = NULL; 477 current->journal_info = NULL;
@@ -1178,7 +1181,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1178 /* take transaction reference */ 1181 /* take transaction reference */
1179 mutex_lock(&root->fs_info->trans_mutex); 1182 mutex_lock(&root->fs_info->trans_mutex);
1180 cur_trans = trans->transaction; 1183 cur_trans = trans->transaction;
1181 cur_trans->use_count++; 1184 atomic_inc(&cur_trans->use_count);
1182 mutex_unlock(&root->fs_info->trans_mutex); 1185 mutex_unlock(&root->fs_info->trans_mutex);
1183 1186
1184 btrfs_end_transaction(trans, root); 1187 btrfs_end_transaction(trans, root);
@@ -1237,7 +1240,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1237 1240
1238 mutex_lock(&root->fs_info->trans_mutex); 1241 mutex_lock(&root->fs_info->trans_mutex);
1239 if (cur_trans->in_commit) { 1242 if (cur_trans->in_commit) {
1240 cur_trans->use_count++; 1243 atomic_inc(&cur_trans->use_count);
1241 mutex_unlock(&root->fs_info->trans_mutex); 1244 mutex_unlock(&root->fs_info->trans_mutex);
1242 btrfs_end_transaction(trans, root); 1245 btrfs_end_transaction(trans, root);
1243 1246
@@ -1259,7 +1262,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1259 prev_trans = list_entry(cur_trans->list.prev, 1262 prev_trans = list_entry(cur_trans->list.prev,
1260 struct btrfs_transaction, list); 1263 struct btrfs_transaction, list);
1261 if (!prev_trans->commit_done) { 1264 if (!prev_trans->commit_done) {
1262 prev_trans->use_count++; 1265 atomic_inc(&prev_trans->use_count);
1263 mutex_unlock(&root->fs_info->trans_mutex); 1266 mutex_unlock(&root->fs_info->trans_mutex);
1264 1267
1265 wait_for_commit(root, prev_trans); 1268 wait_for_commit(root, prev_trans);
@@ -1300,14 +1303,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1300 TASK_UNINTERRUPTIBLE); 1303 TASK_UNINTERRUPTIBLE);
1301 1304
1302 smp_mb(); 1305 smp_mb();
1303 if (cur_trans->num_writers > 1) 1306 if (atomic_read(&cur_trans->num_writers) > 1)
1304 schedule_timeout(MAX_SCHEDULE_TIMEOUT); 1307 schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1305 else if (should_grow) 1308 else if (should_grow)
1306 schedule_timeout(1); 1309 schedule_timeout(1);
1307 1310
1308 mutex_lock(&root->fs_info->trans_mutex); 1311 mutex_lock(&root->fs_info->trans_mutex);
1309 finish_wait(&cur_trans->writer_wait, &wait); 1312 finish_wait(&cur_trans->writer_wait, &wait);
1310 } while (cur_trans->num_writers > 1 || 1313 } while (atomic_read(&cur_trans->num_writers) > 1 ||
1311 (should_grow && cur_trans->num_joined != joined)); 1314 (should_grow && cur_trans->num_joined != joined));
1312 1315
1313 ret = create_pending_snapshots(trans, root->fs_info); 1316 ret = create_pending_snapshots(trans, root->fs_info);
@@ -1394,6 +1397,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1394 1397
1395 wake_up(&cur_trans->commit_wait); 1398 wake_up(&cur_trans->commit_wait);
1396 1399
1400 list_del_init(&cur_trans->list);
1397 put_transaction(cur_trans); 1401 put_transaction(cur_trans);
1398 put_transaction(cur_trans); 1402 put_transaction(cur_trans);
1399 1403
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 229a594cacd5..e441acc6c584 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -27,11 +27,11 @@ struct btrfs_transaction {
27 * total writers in this transaction, it must be zero before the 27 * total writers in this transaction, it must be zero before the
28 * transaction can end 28 * transaction can end
29 */ 29 */
30 unsigned long num_writers; 30 atomic_t num_writers;
31 31
32 unsigned long num_joined; 32 unsigned long num_joined;
33 int in_commit; 33 int in_commit;
34 int use_count; 34 atomic_t use_count;
35 int commit_done; 35 int commit_done;
36 int blocked; 36 int blocked;
37 struct list_head list; 37 struct list_head list;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index c50271ad3157..f997ec0c1ba4 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2209,8 +2209,10 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2209 2209
2210 log = root->log_root; 2210 log = root->log_root;
2211 path = btrfs_alloc_path(); 2211 path = btrfs_alloc_path();
2212 if (!path) 2212 if (!path) {
2213 return -ENOMEM; 2213 err = -ENOMEM;
2214 goto out_unlock;
2215 }
2214 2216
2215 di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, 2217 di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino,
2216 name, name_len, -1); 2218 name, name_len, -1);
@@ -2271,6 +2273,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2271 } 2273 }
2272fail: 2274fail:
2273 btrfs_free_path(path); 2275 btrfs_free_path(path);
2276out_unlock:
2274 mutex_unlock(&BTRFS_I(dir)->log_mutex); 2277 mutex_unlock(&BTRFS_I(dir)->log_mutex);
2275 if (ret == -ENOSPC) { 2278 if (ret == -ENOSPC) {
2276 root->fs_info->last_trans_log_full_commit = trans->transid; 2279 root->fs_info->last_trans_log_full_commit = trans->transid;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 309a57b9fc85..c7367ae5a3e6 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -155,6 +155,15 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
155 unsigned long limit; 155 unsigned long limit;
156 unsigned long last_waited = 0; 156 unsigned long last_waited = 0;
157 int force_reg = 0; 157 int force_reg = 0;
158 struct blk_plug plug;
159
160 /*
161 * this function runs all the bios we've collected for
162 * a particular device. We don't want to wander off to
163 * another device without first sending all of these down.
164 * So, setup a plug here and finish it off before we return
165 */
166 blk_start_plug(&plug);
158 167
159 bdi = blk_get_backing_dev_info(device->bdev); 168 bdi = blk_get_backing_dev_info(device->bdev);
160 fs_info = device->dev_root->fs_info; 169 fs_info = device->dev_root->fs_info;
@@ -294,6 +303,7 @@ loop_lock:
294 spin_unlock(&device->io_lock); 303 spin_unlock(&device->io_lock);
295 304
296done: 305done:
306 blk_finish_plug(&plug);
297 return 0; 307 return 0;
298} 308}
299 309
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index a5303b871b13..cfd660550ded 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -180,11 +180,10 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
180 struct btrfs_path *path; 180 struct btrfs_path *path;
181 struct extent_buffer *leaf; 181 struct extent_buffer *leaf;
182 struct btrfs_dir_item *di; 182 struct btrfs_dir_item *di;
183 int ret = 0, slot, advance; 183 int ret = 0, slot;
184 size_t total_size = 0, size_left = size; 184 size_t total_size = 0, size_left = size;
185 unsigned long name_ptr; 185 unsigned long name_ptr;
186 size_t name_len; 186 size_t name_len;
187 u32 nritems;
188 187
189 /* 188 /*
190 * ok we want all objects associated with this id. 189 * ok we want all objects associated with this id.
@@ -204,34 +203,24 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
204 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 203 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
205 if (ret < 0) 204 if (ret < 0)
206 goto err; 205 goto err;
207 advance = 0; 206
208 while (1) { 207 while (1) {
209 leaf = path->nodes[0]; 208 leaf = path->nodes[0];
210 nritems = btrfs_header_nritems(leaf);
211 slot = path->slots[0]; 209 slot = path->slots[0];
212 210
213 /* this is where we start walking through the path */ 211 /* this is where we start walking through the path */
214 if (advance || slot >= nritems) { 212 if (slot >= btrfs_header_nritems(leaf)) {
215 /* 213 /*
216 * if we've reached the last slot in this leaf we need 214 * if we've reached the last slot in this leaf we need
217 * to go to the next leaf and reset everything 215 * to go to the next leaf and reset everything
218 */ 216 */
219 if (slot >= nritems-1) { 217 ret = btrfs_next_leaf(root, path);
220 ret = btrfs_next_leaf(root, path); 218 if (ret < 0)
221 if (ret) 219 goto err;
222 break; 220 else if (ret > 0)
223 leaf = path->nodes[0]; 221 break;
224 nritems = btrfs_header_nritems(leaf); 222 continue;
225 slot = path->slots[0];
226 } else {
227 /*
228 * just walking through the slots on this leaf
229 */
230 slot++;
231 path->slots[0]++;
232 }
233 } 223 }
234 advance = 1;
235 224
236 btrfs_item_key_to_cpu(leaf, &found_key, slot); 225 btrfs_item_key_to_cpu(leaf, &found_key, slot);
237 226
@@ -250,7 +239,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
250 239
251 /* we are just looking for how big our buffer needs to be */ 240 /* we are just looking for how big our buffer needs to be */
252 if (!size) 241 if (!size)
253 continue; 242 goto next;
254 243
255 if (!buffer || (name_len + 1) > size_left) { 244 if (!buffer || (name_len + 1) > size_left) {
256 ret = -ERANGE; 245 ret = -ERANGE;
@@ -263,6 +252,8 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
263 252
264 size_left -= name_len + 1; 253 size_left -= name_len + 1;
265 buffer += name_len + 1; 254 buffer += name_len + 1;
255next:
256 path->slots[0]++;
266 } 257 }
267 ret = total_size; 258 ret = total_size;
268 259
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index db9d55b507d0..4bc862a80efa 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -807,8 +807,7 @@ static int
807cifs_parse_mount_options(char *options, const char *devname, 807cifs_parse_mount_options(char *options, const char *devname,
808 struct smb_vol *vol) 808 struct smb_vol *vol)
809{ 809{
810 char *value; 810 char *value, *data, *end;
811 char *data;
812 unsigned int temp_len, i, j; 811 unsigned int temp_len, i, j;
813 char separator[2]; 812 char separator[2];
814 short int override_uid = -1; 813 short int override_uid = -1;
@@ -851,6 +850,7 @@ cifs_parse_mount_options(char *options, const char *devname,
851 if (!options) 850 if (!options)
852 return 1; 851 return 1;
853 852
853 end = options + strlen(options);
854 if (strncmp(options, "sep=", 4) == 0) { 854 if (strncmp(options, "sep=", 4) == 0) {
855 if (options[4] != 0) { 855 if (options[4] != 0) {
856 separator[0] = options[4]; 856 separator[0] = options[4];
@@ -916,6 +916,7 @@ cifs_parse_mount_options(char *options, const char *devname,
916 the only illegal character in a password is null */ 916 the only illegal character in a password is null */
917 917
918 if ((value[temp_len] == 0) && 918 if ((value[temp_len] == 0) &&
919 (value + temp_len < end) &&
919 (value[temp_len+1] == separator[0])) { 920 (value[temp_len+1] == separator[0])) {
920 /* reinsert comma */ 921 /* reinsert comma */
921 value[temp_len] = separator[0]; 922 value[temp_len] = separator[0];
diff --git a/fs/dcache.c b/fs/dcache.c
index ad25c4cec7d5..22a0ef41bad1 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -99,12 +99,9 @@ static struct kmem_cache *dentry_cache __read_mostly;
99static unsigned int d_hash_mask __read_mostly; 99static unsigned int d_hash_mask __read_mostly;
100static unsigned int d_hash_shift __read_mostly; 100static unsigned int d_hash_shift __read_mostly;
101 101
102struct dcache_hash_bucket { 102static struct hlist_bl_head *dentry_hashtable __read_mostly;
103 struct hlist_bl_head head;
104};
105static struct dcache_hash_bucket *dentry_hashtable __read_mostly;
106 103
107static inline struct dcache_hash_bucket *d_hash(struct dentry *parent, 104static inline struct hlist_bl_head *d_hash(struct dentry *parent,
108 unsigned long hash) 105 unsigned long hash)
109{ 106{
110 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES; 107 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
@@ -112,16 +109,6 @@ static inline struct dcache_hash_bucket *d_hash(struct dentry *parent,
112 return dentry_hashtable + (hash & D_HASHMASK); 109 return dentry_hashtable + (hash & D_HASHMASK);
113} 110}
114 111
115static inline void spin_lock_bucket(struct dcache_hash_bucket *b)
116{
117 bit_spin_lock(0, (unsigned long *)&b->head.first);
118}
119
120static inline void spin_unlock_bucket(struct dcache_hash_bucket *b)
121{
122 __bit_spin_unlock(0, (unsigned long *)&b->head.first);
123}
124
125/* Statistics gathering. */ 112/* Statistics gathering. */
126struct dentry_stat_t dentry_stat = { 113struct dentry_stat_t dentry_stat = {
127 .age_limit = 45, 114 .age_limit = 45,
@@ -167,8 +154,8 @@ static void d_free(struct dentry *dentry)
167 if (dentry->d_op && dentry->d_op->d_release) 154 if (dentry->d_op && dentry->d_op->d_release)
168 dentry->d_op->d_release(dentry); 155 dentry->d_op->d_release(dentry);
169 156
170 /* if dentry was never inserted into hash, immediate free is OK */ 157 /* if dentry was never visible to RCU, immediate free is OK */
171 if (hlist_bl_unhashed(&dentry->d_hash)) 158 if (!(dentry->d_flags & DCACHE_RCUACCESS))
172 __d_free(&dentry->d_u.d_rcu); 159 __d_free(&dentry->d_u.d_rcu);
173 else 160 else
174 call_rcu(&dentry->d_u.d_rcu, __d_free); 161 call_rcu(&dentry->d_u.d_rcu, __d_free);
@@ -330,28 +317,19 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
330 */ 317 */
331void __d_drop(struct dentry *dentry) 318void __d_drop(struct dentry *dentry)
332{ 319{
333 if (!(dentry->d_flags & DCACHE_UNHASHED)) { 320 if (!d_unhashed(dentry)) {
334 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED)) { 321 struct hlist_bl_head *b;
335 bit_spin_lock(0, 322 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
336 (unsigned long *)&dentry->d_sb->s_anon.first); 323 b = &dentry->d_sb->s_anon;
337 dentry->d_flags |= DCACHE_UNHASHED; 324 else
338 hlist_bl_del_init(&dentry->d_hash);
339 __bit_spin_unlock(0,
340 (unsigned long *)&dentry->d_sb->s_anon.first);
341 } else {
342 struct dcache_hash_bucket *b;
343 b = d_hash(dentry->d_parent, dentry->d_name.hash); 325 b = d_hash(dentry->d_parent, dentry->d_name.hash);
344 spin_lock_bucket(b); 326
345 /* 327 hlist_bl_lock(b);
346 * We may not actually need to put DCACHE_UNHASHED 328 __hlist_bl_del(&dentry->d_hash);
347 * manipulations under the hash lock, but follow 329 dentry->d_hash.pprev = NULL;
348 * the principle of least surprise. 330 hlist_bl_unlock(b);
349 */ 331
350 dentry->d_flags |= DCACHE_UNHASHED; 332 dentry_rcuwalk_barrier(dentry);
351 hlist_bl_del_rcu(&dentry->d_hash);
352 spin_unlock_bucket(b);
353 dentry_rcuwalk_barrier(dentry);
354 }
355 } 333 }
356} 334}
357EXPORT_SYMBOL(__d_drop); 335EXPORT_SYMBOL(__d_drop);
@@ -1304,7 +1282,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1304 dname[name->len] = 0; 1282 dname[name->len] = 0;
1305 1283
1306 dentry->d_count = 1; 1284 dentry->d_count = 1;
1307 dentry->d_flags = DCACHE_UNHASHED; 1285 dentry->d_flags = 0;
1308 spin_lock_init(&dentry->d_lock); 1286 spin_lock_init(&dentry->d_lock);
1309 seqcount_init(&dentry->d_seq); 1287 seqcount_init(&dentry->d_seq);
1310 dentry->d_inode = NULL; 1288 dentry->d_inode = NULL;
@@ -1606,10 +1584,9 @@ struct dentry *d_obtain_alias(struct inode *inode)
1606 tmp->d_inode = inode; 1584 tmp->d_inode = inode;
1607 tmp->d_flags |= DCACHE_DISCONNECTED; 1585 tmp->d_flags |= DCACHE_DISCONNECTED;
1608 list_add(&tmp->d_alias, &inode->i_dentry); 1586 list_add(&tmp->d_alias, &inode->i_dentry);
1609 bit_spin_lock(0, (unsigned long *)&tmp->d_sb->s_anon.first); 1587 hlist_bl_lock(&tmp->d_sb->s_anon);
1610 tmp->d_flags &= ~DCACHE_UNHASHED;
1611 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon); 1588 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1612 __bit_spin_unlock(0, (unsigned long *)&tmp->d_sb->s_anon.first); 1589 hlist_bl_unlock(&tmp->d_sb->s_anon);
1613 spin_unlock(&tmp->d_lock); 1590 spin_unlock(&tmp->d_lock);
1614 spin_unlock(&inode->i_lock); 1591 spin_unlock(&inode->i_lock);
1615 security_d_instantiate(tmp, inode); 1592 security_d_instantiate(tmp, inode);
@@ -1789,7 +1766,7 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
1789 unsigned int len = name->len; 1766 unsigned int len = name->len;
1790 unsigned int hash = name->hash; 1767 unsigned int hash = name->hash;
1791 const unsigned char *str = name->name; 1768 const unsigned char *str = name->name;
1792 struct dcache_hash_bucket *b = d_hash(parent, hash); 1769 struct hlist_bl_head *b = d_hash(parent, hash);
1793 struct hlist_bl_node *node; 1770 struct hlist_bl_node *node;
1794 struct dentry *dentry; 1771 struct dentry *dentry;
1795 1772
@@ -1813,7 +1790,7 @@ struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
1813 * 1790 *
1814 * See Documentation/filesystems/path-lookup.txt for more details. 1791 * See Documentation/filesystems/path-lookup.txt for more details.
1815 */ 1792 */
1816 hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) { 1793 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
1817 struct inode *i; 1794 struct inode *i;
1818 const char *tname; 1795 const char *tname;
1819 int tlen; 1796 int tlen;
@@ -1908,7 +1885,7 @@ struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
1908 unsigned int len = name->len; 1885 unsigned int len = name->len;
1909 unsigned int hash = name->hash; 1886 unsigned int hash = name->hash;
1910 const unsigned char *str = name->name; 1887 const unsigned char *str = name->name;
1911 struct dcache_hash_bucket *b = d_hash(parent, hash); 1888 struct hlist_bl_head *b = d_hash(parent, hash);
1912 struct hlist_bl_node *node; 1889 struct hlist_bl_node *node;
1913 struct dentry *found = NULL; 1890 struct dentry *found = NULL;
1914 struct dentry *dentry; 1891 struct dentry *dentry;
@@ -1935,7 +1912,7 @@ struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
1935 */ 1912 */
1936 rcu_read_lock(); 1913 rcu_read_lock();
1937 1914
1938 hlist_bl_for_each_entry_rcu(dentry, node, &b->head, d_hash) { 1915 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
1939 const char *tname; 1916 const char *tname;
1940 int tlen; 1917 int tlen;
1941 1918
@@ -2086,13 +2063,13 @@ again:
2086} 2063}
2087EXPORT_SYMBOL(d_delete); 2064EXPORT_SYMBOL(d_delete);
2088 2065
2089static void __d_rehash(struct dentry * entry, struct dcache_hash_bucket *b) 2066static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2090{ 2067{
2091 BUG_ON(!d_unhashed(entry)); 2068 BUG_ON(!d_unhashed(entry));
2092 spin_lock_bucket(b); 2069 hlist_bl_lock(b);
2093 entry->d_flags &= ~DCACHE_UNHASHED; 2070 entry->d_flags |= DCACHE_RCUACCESS;
2094 hlist_bl_add_head_rcu(&entry->d_hash, &b->head); 2071 hlist_bl_add_head_rcu(&entry->d_hash, b);
2095 spin_unlock_bucket(b); 2072 hlist_bl_unlock(b);
2096} 2073}
2097 2074
2098static void _d_rehash(struct dentry * entry) 2075static void _d_rehash(struct dentry * entry)
@@ -2131,7 +2108,7 @@ EXPORT_SYMBOL(d_rehash);
2131 */ 2108 */
2132void dentry_update_name_case(struct dentry *dentry, struct qstr *name) 2109void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2133{ 2110{
2134 BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); 2111 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
2135 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */ 2112 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2136 2113
2137 spin_lock(&dentry->d_lock); 2114 spin_lock(&dentry->d_lock);
@@ -3025,7 +3002,7 @@ static void __init dcache_init_early(void)
3025 3002
3026 dentry_hashtable = 3003 dentry_hashtable =
3027 alloc_large_system_hash("Dentry cache", 3004 alloc_large_system_hash("Dentry cache",
3028 sizeof(struct dcache_hash_bucket), 3005 sizeof(struct hlist_bl_head),
3029 dhash_entries, 3006 dhash_entries,
3030 13, 3007 13,
3031 HASH_EARLY, 3008 HASH_EARLY,
@@ -3034,7 +3011,7 @@ static void __init dcache_init_early(void)
3034 0); 3011 0);
3035 3012
3036 for (loop = 0; loop < (1 << d_hash_shift); loop++) 3013 for (loop = 0; loop < (1 << d_hash_shift); loop++)
3037 INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head); 3014 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3038} 3015}
3039 3016
3040static void __init dcache_init(void) 3017static void __init dcache_init(void)
@@ -3057,7 +3034,7 @@ static void __init dcache_init(void)
3057 3034
3058 dentry_hashtable = 3035 dentry_hashtable =
3059 alloc_large_system_hash("Dentry cache", 3036 alloc_large_system_hash("Dentry cache",
3060 sizeof(struct dcache_hash_bucket), 3037 sizeof(struct hlist_bl_head),
3061 dhash_entries, 3038 dhash_entries,
3062 13, 3039 13,
3063 0, 3040 0,
@@ -3066,7 +3043,7 @@ static void __init dcache_init(void)
3066 0); 3043 0);
3067 3044
3068 for (loop = 0; loop < (1 << d_hash_shift); loop++) 3045 for (loop = 0; loop < (1 << d_hash_shift); loop++)
3069 INIT_HLIST_BL_HEAD(&dentry_hashtable[loop].head); 3046 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3070} 3047}
3071 3048
3072/* SLAB cache for __getname() consumers */ 3049/* SLAB cache for __getname() consumers */
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index d2a70a4561f9..b8d5c8091024 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1452,6 +1452,25 @@ static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat)
1452 crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; 1452 crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
1453} 1453}
1454 1454
1455void ecryptfs_i_size_init(const char *page_virt, struct inode *inode)
1456{
1457 struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
1458 struct ecryptfs_crypt_stat *crypt_stat;
1459 u64 file_size;
1460
1461 crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
1462 mount_crypt_stat =
1463 &ecryptfs_superblock_to_private(inode->i_sb)->mount_crypt_stat;
1464 if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
1465 file_size = i_size_read(ecryptfs_inode_to_lower(inode));
1466 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
1467 file_size += crypt_stat->metadata_size;
1468 } else
1469 file_size = get_unaligned_be64(page_virt);
1470 i_size_write(inode, (loff_t)file_size);
1471 crypt_stat->flags |= ECRYPTFS_I_SIZE_INITIALIZED;
1472}
1473
1455/** 1474/**
1456 * ecryptfs_read_headers_virt 1475 * ecryptfs_read_headers_virt
1457 * @page_virt: The virtual address into which to read the headers 1476 * @page_virt: The virtual address into which to read the headers
@@ -1482,6 +1501,8 @@ static int ecryptfs_read_headers_virt(char *page_virt,
1482 rc = -EINVAL; 1501 rc = -EINVAL;
1483 goto out; 1502 goto out;
1484 } 1503 }
1504 if (!(crypt_stat->flags & ECRYPTFS_I_SIZE_INITIALIZED))
1505 ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode);
1485 offset += MAGIC_ECRYPTFS_MARKER_SIZE_BYTES; 1506 offset += MAGIC_ECRYPTFS_MARKER_SIZE_BYTES;
1486 rc = ecryptfs_process_flags(crypt_stat, (page_virt + offset), 1507 rc = ecryptfs_process_flags(crypt_stat, (page_virt + offset),
1487 &bytes_read); 1508 &bytes_read);
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index bd3cafd0949d..e70282775e2c 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -269,6 +269,7 @@ struct ecryptfs_crypt_stat {
269#define ECRYPTFS_ENCFN_USE_MOUNT_FNEK 0x00000800 269#define ECRYPTFS_ENCFN_USE_MOUNT_FNEK 0x00000800
270#define ECRYPTFS_ENCFN_USE_FEK 0x00001000 270#define ECRYPTFS_ENCFN_USE_FEK 0x00001000
271#define ECRYPTFS_UNLINK_SIGS 0x00002000 271#define ECRYPTFS_UNLINK_SIGS 0x00002000
272#define ECRYPTFS_I_SIZE_INITIALIZED 0x00004000
272 u32 flags; 273 u32 flags;
273 unsigned int file_version; 274 unsigned int file_version;
274 size_t iv_bytes; 275 size_t iv_bytes;
@@ -295,6 +296,8 @@ struct ecryptfs_crypt_stat {
295struct ecryptfs_inode_info { 296struct ecryptfs_inode_info {
296 struct inode vfs_inode; 297 struct inode vfs_inode;
297 struct inode *wii_inode; 298 struct inode *wii_inode;
299 struct mutex lower_file_mutex;
300 atomic_t lower_file_count;
298 struct file *lower_file; 301 struct file *lower_file;
299 struct ecryptfs_crypt_stat crypt_stat; 302 struct ecryptfs_crypt_stat crypt_stat;
300}; 303};
@@ -626,6 +629,7 @@ struct ecryptfs_open_req {
626int ecryptfs_interpose(struct dentry *hidden_dentry, 629int ecryptfs_interpose(struct dentry *hidden_dentry,
627 struct dentry *this_dentry, struct super_block *sb, 630 struct dentry *this_dentry, struct super_block *sb,
628 u32 flags); 631 u32 flags);
632void ecryptfs_i_size_init(const char *page_virt, struct inode *inode);
629int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, 633int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
630 struct dentry *lower_dentry, 634 struct dentry *lower_dentry,
631 struct inode *ecryptfs_dir_inode); 635 struct inode *ecryptfs_dir_inode);
@@ -757,7 +761,8 @@ int ecryptfs_privileged_open(struct file **lower_file,
757 struct dentry *lower_dentry, 761 struct dentry *lower_dentry,
758 struct vfsmount *lower_mnt, 762 struct vfsmount *lower_mnt,
759 const struct cred *cred); 763 const struct cred *cred);
760int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry); 764int ecryptfs_get_lower_file(struct dentry *ecryptfs_dentry);
765void ecryptfs_put_lower_file(struct inode *inode);
761int 766int
762ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes, 767ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
763 size_t *packet_size, 768 size_t *packet_size,
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index cedc913d11ba..566e5472f78c 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -191,10 +191,10 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
191 | ECRYPTFS_ENCRYPTED); 191 | ECRYPTFS_ENCRYPTED);
192 } 192 }
193 mutex_unlock(&crypt_stat->cs_mutex); 193 mutex_unlock(&crypt_stat->cs_mutex);
194 rc = ecryptfs_init_persistent_file(ecryptfs_dentry); 194 rc = ecryptfs_get_lower_file(ecryptfs_dentry);
195 if (rc) { 195 if (rc) {
196 printk(KERN_ERR "%s: Error attempting to initialize " 196 printk(KERN_ERR "%s: Error attempting to initialize "
197 "the persistent file for the dentry with name " 197 "the lower file for the dentry with name "
198 "[%s]; rc = [%d]\n", __func__, 198 "[%s]; rc = [%d]\n", __func__,
199 ecryptfs_dentry->d_name.name, rc); 199 ecryptfs_dentry->d_name.name, rc);
200 goto out_free; 200 goto out_free;
@@ -202,9 +202,9 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
202 if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_ACCMODE) 202 if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_ACCMODE)
203 == O_RDONLY && (file->f_flags & O_ACCMODE) != O_RDONLY) { 203 == O_RDONLY && (file->f_flags & O_ACCMODE) != O_RDONLY) {
204 rc = -EPERM; 204 rc = -EPERM;
205 printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs " 205 printk(KERN_WARNING "%s: Lower file is RO; eCryptfs "
206 "file must hence be opened RO\n", __func__); 206 "file must hence be opened RO\n", __func__);
207 goto out_free; 207 goto out_put;
208 } 208 }
209 ecryptfs_set_file_lower( 209 ecryptfs_set_file_lower(
210 file, ecryptfs_inode_to_private(inode)->lower_file); 210 file, ecryptfs_inode_to_private(inode)->lower_file);
@@ -232,10 +232,11 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
232 "Plaintext passthrough mode is not " 232 "Plaintext passthrough mode is not "
233 "enabled; returning -EIO\n"); 233 "enabled; returning -EIO\n");
234 mutex_unlock(&crypt_stat->cs_mutex); 234 mutex_unlock(&crypt_stat->cs_mutex);
235 goto out_free; 235 goto out_put;
236 } 236 }
237 rc = 0; 237 rc = 0;
238 crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); 238 crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
239 | ECRYPTFS_ENCRYPTED);
239 mutex_unlock(&crypt_stat->cs_mutex); 240 mutex_unlock(&crypt_stat->cs_mutex);
240 goto out; 241 goto out;
241 } 242 }
@@ -245,6 +246,8 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
245 "[0x%.16lx] size: [0x%.16llx]\n", inode, inode->i_ino, 246 "[0x%.16lx] size: [0x%.16llx]\n", inode, inode->i_ino,
246 (unsigned long long)i_size_read(inode)); 247 (unsigned long long)i_size_read(inode));
247 goto out; 248 goto out;
249out_put:
250 ecryptfs_put_lower_file(inode);
248out_free: 251out_free:
249 kmem_cache_free(ecryptfs_file_info_cache, 252 kmem_cache_free(ecryptfs_file_info_cache,
250 ecryptfs_file_to_private(file)); 253 ecryptfs_file_to_private(file));
@@ -254,17 +257,13 @@ out:
254 257
255static int ecryptfs_flush(struct file *file, fl_owner_t td) 258static int ecryptfs_flush(struct file *file, fl_owner_t td)
256{ 259{
257 int rc = 0; 260 return file->f_mode & FMODE_WRITE
258 struct file *lower_file = NULL; 261 ? filemap_write_and_wait(file->f_mapping) : 0;
259
260 lower_file = ecryptfs_file_to_lower(file);
261 if (lower_file->f_op && lower_file->f_op->flush)
262 rc = lower_file->f_op->flush(lower_file, td);
263 return rc;
264} 262}
265 263
266static int ecryptfs_release(struct inode *inode, struct file *file) 264static int ecryptfs_release(struct inode *inode, struct file *file)
267{ 265{
266 ecryptfs_put_lower_file(inode);
268 kmem_cache_free(ecryptfs_file_info_cache, 267 kmem_cache_free(ecryptfs_file_info_cache,
269 ecryptfs_file_to_private(file)); 268 ecryptfs_file_to_private(file));
270 return 0; 269 return 0;
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index f99051b7adab..4d4cc6a90cd5 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -168,19 +168,18 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry)
168 "context; rc = [%d]\n", rc); 168 "context; rc = [%d]\n", rc);
169 goto out; 169 goto out;
170 } 170 }
171 rc = ecryptfs_init_persistent_file(ecryptfs_dentry); 171 rc = ecryptfs_get_lower_file(ecryptfs_dentry);
172 if (rc) { 172 if (rc) {
173 printk(KERN_ERR "%s: Error attempting to initialize " 173 printk(KERN_ERR "%s: Error attempting to initialize "
174 "the persistent file for the dentry with name " 174 "the lower file for the dentry with name "
175 "[%s]; rc = [%d]\n", __func__, 175 "[%s]; rc = [%d]\n", __func__,
176 ecryptfs_dentry->d_name.name, rc); 176 ecryptfs_dentry->d_name.name, rc);
177 goto out; 177 goto out;
178 } 178 }
179 rc = ecryptfs_write_metadata(ecryptfs_dentry); 179 rc = ecryptfs_write_metadata(ecryptfs_dentry);
180 if (rc) { 180 if (rc)
181 printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc); 181 printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc);
182 goto out; 182 ecryptfs_put_lower_file(ecryptfs_dentry->d_inode);
183 }
184out: 183out:
185 return rc; 184 return rc;
186} 185}
@@ -226,11 +225,9 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
226 struct dentry *lower_dir_dentry; 225 struct dentry *lower_dir_dentry;
227 struct vfsmount *lower_mnt; 226 struct vfsmount *lower_mnt;
228 struct inode *lower_inode; 227 struct inode *lower_inode;
229 struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
230 struct ecryptfs_crypt_stat *crypt_stat; 228 struct ecryptfs_crypt_stat *crypt_stat;
231 char *page_virt = NULL; 229 char *page_virt = NULL;
232 u64 file_size; 230 int put_lower = 0, rc = 0;
233 int rc = 0;
234 231
235 lower_dir_dentry = lower_dentry->d_parent; 232 lower_dir_dentry = lower_dentry->d_parent;
236 lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt( 233 lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(
@@ -277,14 +274,15 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
277 rc = -ENOMEM; 274 rc = -ENOMEM;
278 goto out; 275 goto out;
279 } 276 }
280 rc = ecryptfs_init_persistent_file(ecryptfs_dentry); 277 rc = ecryptfs_get_lower_file(ecryptfs_dentry);
281 if (rc) { 278 if (rc) {
282 printk(KERN_ERR "%s: Error attempting to initialize " 279 printk(KERN_ERR "%s: Error attempting to initialize "
283 "the persistent file for the dentry with name " 280 "the lower file for the dentry with name "
284 "[%s]; rc = [%d]\n", __func__, 281 "[%s]; rc = [%d]\n", __func__,
285 ecryptfs_dentry->d_name.name, rc); 282 ecryptfs_dentry->d_name.name, rc);
286 goto out_free_kmem; 283 goto out_free_kmem;
287 } 284 }
285 put_lower = 1;
288 crypt_stat = &ecryptfs_inode_to_private( 286 crypt_stat = &ecryptfs_inode_to_private(
289 ecryptfs_dentry->d_inode)->crypt_stat; 287 ecryptfs_dentry->d_inode)->crypt_stat;
290 /* TODO: lock for crypt_stat comparison */ 288 /* TODO: lock for crypt_stat comparison */
@@ -302,18 +300,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
302 } 300 }
303 crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR; 301 crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
304 } 302 }
305 mount_crypt_stat = &ecryptfs_superblock_to_private( 303 ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode);
306 ecryptfs_dentry->d_sb)->mount_crypt_stat;
307 if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
308 if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
309 file_size = (crypt_stat->metadata_size
310 + i_size_read(lower_dentry->d_inode));
311 else
312 file_size = i_size_read(lower_dentry->d_inode);
313 } else {
314 file_size = get_unaligned_be64(page_virt);
315 }
316 i_size_write(ecryptfs_dentry->d_inode, (loff_t)file_size);
317out_free_kmem: 304out_free_kmem:
318 kmem_cache_free(ecryptfs_header_cache_2, page_virt); 305 kmem_cache_free(ecryptfs_header_cache_2, page_virt);
319 goto out; 306 goto out;
@@ -322,6 +309,8 @@ out_put:
322 mntput(lower_mnt); 309 mntput(lower_mnt);
323 d_drop(ecryptfs_dentry); 310 d_drop(ecryptfs_dentry);
324out: 311out:
312 if (put_lower)
313 ecryptfs_put_lower_file(ecryptfs_dentry->d_inode);
325 return rc; 314 return rc;
326} 315}
327 316
@@ -538,8 +527,6 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
538 dget(lower_dentry); 527 dget(lower_dentry);
539 rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry); 528 rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry);
540 dput(lower_dentry); 529 dput(lower_dentry);
541 if (!rc)
542 d_delete(lower_dentry);
543 fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode); 530 fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
544 dir->i_nlink = lower_dir_dentry->d_inode->i_nlink; 531 dir->i_nlink = lower_dir_dentry->d_inode->i_nlink;
545 unlock_dir(lower_dir_dentry); 532 unlock_dir(lower_dir_dentry);
@@ -610,8 +597,8 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
610 fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode); 597 fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode);
611out_lock: 598out_lock:
612 unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry); 599 unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
613 dput(lower_new_dentry->d_parent); 600 dput(lower_new_dir_dentry);
614 dput(lower_old_dentry->d_parent); 601 dput(lower_old_dir_dentry);
615 dput(lower_new_dentry); 602 dput(lower_new_dentry);
616 dput(lower_old_dentry); 603 dput(lower_old_dentry);
617 return rc; 604 return rc;
@@ -759,8 +746,11 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
759 746
760 if (unlikely((ia->ia_size == i_size))) { 747 if (unlikely((ia->ia_size == i_size))) {
761 lower_ia->ia_valid &= ~ATTR_SIZE; 748 lower_ia->ia_valid &= ~ATTR_SIZE;
762 goto out; 749 return 0;
763 } 750 }
751 rc = ecryptfs_get_lower_file(dentry);
752 if (rc)
753 return rc;
764 crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat; 754 crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat;
765 /* Switch on growing or shrinking file */ 755 /* Switch on growing or shrinking file */
766 if (ia->ia_size > i_size) { 756 if (ia->ia_size > i_size) {
@@ -838,6 +828,7 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
838 lower_ia->ia_valid &= ~ATTR_SIZE; 828 lower_ia->ia_valid &= ~ATTR_SIZE;
839 } 829 }
840out: 830out:
831 ecryptfs_put_lower_file(inode);
841 return rc; 832 return rc;
842} 833}
843 834
@@ -913,7 +904,13 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
913 904
914 mount_crypt_stat = &ecryptfs_superblock_to_private( 905 mount_crypt_stat = &ecryptfs_superblock_to_private(
915 dentry->d_sb)->mount_crypt_stat; 906 dentry->d_sb)->mount_crypt_stat;
907 rc = ecryptfs_get_lower_file(dentry);
908 if (rc) {
909 mutex_unlock(&crypt_stat->cs_mutex);
910 goto out;
911 }
916 rc = ecryptfs_read_metadata(dentry); 912 rc = ecryptfs_read_metadata(dentry);
913 ecryptfs_put_lower_file(inode);
917 if (rc) { 914 if (rc) {
918 if (!(mount_crypt_stat->flags 915 if (!(mount_crypt_stat->flags
919 & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) { 916 & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) {
@@ -927,10 +924,17 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
927 goto out; 924 goto out;
928 } 925 }
929 rc = 0; 926 rc = 0;
930 crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); 927 crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
928 | ECRYPTFS_ENCRYPTED);
931 } 929 }
932 } 930 }
933 mutex_unlock(&crypt_stat->cs_mutex); 931 mutex_unlock(&crypt_stat->cs_mutex);
932 if (S_ISREG(inode->i_mode)) {
933 rc = filemap_write_and_wait(inode->i_mapping);
934 if (rc)
935 goto out;
936 fsstack_copy_attr_all(inode, lower_inode);
937 }
934 memcpy(&lower_ia, ia, sizeof(lower_ia)); 938 memcpy(&lower_ia, ia, sizeof(lower_ia));
935 if (ia->ia_valid & ATTR_FILE) 939 if (ia->ia_valid & ATTR_FILE)
936 lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file); 940 lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file);
diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
index 0851ab6980f5..69f994a7d524 100644
--- a/fs/ecryptfs/kthread.c
+++ b/fs/ecryptfs/kthread.c
@@ -44,7 +44,7 @@ static struct task_struct *ecryptfs_kthread;
44 * @ignored: ignored 44 * @ignored: ignored
45 * 45 *
46 * The eCryptfs kernel thread that has the responsibility of getting 46 * The eCryptfs kernel thread that has the responsibility of getting
47 * the lower persistent file with RW permissions. 47 * the lower file with RW permissions.
48 * 48 *
49 * Returns zero on success; non-zero otherwise 49 * Returns zero on success; non-zero otherwise
50 */ 50 */
@@ -141,8 +141,8 @@ int ecryptfs_privileged_open(struct file **lower_file,
141 int rc = 0; 141 int rc = 0;
142 142
143 /* Corresponding dput() and mntput() are done when the 143 /* Corresponding dput() and mntput() are done when the
144 * persistent file is fput() when the eCryptfs inode is 144 * lower file is fput() when all eCryptfs files for the inode are
145 * destroyed. */ 145 * released. */
146 dget(lower_dentry); 146 dget(lower_dentry);
147 mntget(lower_mnt); 147 mntget(lower_mnt);
148 flags |= IS_RDONLY(lower_dentry->d_inode) ? O_RDONLY : O_RDWR; 148 flags |= IS_RDONLY(lower_dentry->d_inode) ? O_RDONLY : O_RDWR;
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index fdb2eb0ad09e..89b93389af8e 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -96,7 +96,7 @@ void __ecryptfs_printk(const char *fmt, ...)
96} 96}
97 97
98/** 98/**
99 * ecryptfs_init_persistent_file 99 * ecryptfs_init_lower_file
100 * @ecryptfs_dentry: Fully initialized eCryptfs dentry object, with 100 * @ecryptfs_dentry: Fully initialized eCryptfs dentry object, with
101 * the lower dentry and the lower mount set 101 * the lower dentry and the lower mount set
102 * 102 *
@@ -104,42 +104,70 @@ void __ecryptfs_printk(const char *fmt, ...)
104 * inode. All I/O operations to the lower inode occur through that 104 * inode. All I/O operations to the lower inode occur through that
105 * file. When the first eCryptfs dentry that interposes with the first 105 * file. When the first eCryptfs dentry that interposes with the first
106 * lower dentry for that inode is created, this function creates the 106 * lower dentry for that inode is created, this function creates the
107 * persistent file struct and associates it with the eCryptfs 107 * lower file struct and associates it with the eCryptfs
108 * inode. When the eCryptfs inode is destroyed, the file is closed. 108 * inode. When all eCryptfs files associated with the inode are released, the
109 * file is closed.
109 * 110 *
110 * The persistent file will be opened with read/write permissions, if 111 * The lower file will be opened with read/write permissions, if
111 * possible. Otherwise, it is opened read-only. 112 * possible. Otherwise, it is opened read-only.
112 * 113 *
113 * This function does nothing if a lower persistent file is already 114 * This function does nothing if a lower file is already
114 * associated with the eCryptfs inode. 115 * associated with the eCryptfs inode.
115 * 116 *
116 * Returns zero on success; non-zero otherwise 117 * Returns zero on success; non-zero otherwise
117 */ 118 */
118int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry) 119static int ecryptfs_init_lower_file(struct dentry *dentry,
120 struct file **lower_file)
119{ 121{
120 const struct cred *cred = current_cred(); 122 const struct cred *cred = current_cred();
121 struct ecryptfs_inode_info *inode_info = 123 struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
122 ecryptfs_inode_to_private(ecryptfs_dentry->d_inode); 124 struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
123 int rc = 0; 125 int rc;
124 126
125 if (!inode_info->lower_file) { 127 rc = ecryptfs_privileged_open(lower_file, lower_dentry, lower_mnt,
126 struct dentry *lower_dentry; 128 cred);
127 struct vfsmount *lower_mnt = 129 if (rc) {
128 ecryptfs_dentry_to_lower_mnt(ecryptfs_dentry); 130 printk(KERN_ERR "Error opening lower file "
131 "for lower_dentry [0x%p] and lower_mnt [0x%p]; "
132 "rc = [%d]\n", lower_dentry, lower_mnt, rc);
133 (*lower_file) = NULL;
134 }
135 return rc;
136}
129 137
130 lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry); 138int ecryptfs_get_lower_file(struct dentry *dentry)
131 rc = ecryptfs_privileged_open(&inode_info->lower_file, 139{
132 lower_dentry, lower_mnt, cred); 140 struct ecryptfs_inode_info *inode_info =
133 if (rc) { 141 ecryptfs_inode_to_private(dentry->d_inode);
134 printk(KERN_ERR "Error opening lower persistent file " 142 int count, rc = 0;
135 "for lower_dentry [0x%p] and lower_mnt [0x%p]; " 143
136 "rc = [%d]\n", lower_dentry, lower_mnt, rc); 144 mutex_lock(&inode_info->lower_file_mutex);
137 inode_info->lower_file = NULL; 145 count = atomic_inc_return(&inode_info->lower_file_count);
138 } 146 if (WARN_ON_ONCE(count < 1))
147 rc = -EINVAL;
148 else if (count == 1) {
149 rc = ecryptfs_init_lower_file(dentry,
150 &inode_info->lower_file);
151 if (rc)
152 atomic_set(&inode_info->lower_file_count, 0);
139 } 153 }
154 mutex_unlock(&inode_info->lower_file_mutex);
140 return rc; 155 return rc;
141} 156}
142 157
158void ecryptfs_put_lower_file(struct inode *inode)
159{
160 struct ecryptfs_inode_info *inode_info;
161
162 inode_info = ecryptfs_inode_to_private(inode);
163 if (atomic_dec_and_mutex_lock(&inode_info->lower_file_count,
164 &inode_info->lower_file_mutex)) {
165 fput(inode_info->lower_file);
166 inode_info->lower_file = NULL;
167 mutex_unlock(&inode_info->lower_file_mutex);
168 }
169}
170
143static struct inode *ecryptfs_get_inode(struct inode *lower_inode, 171static struct inode *ecryptfs_get_inode(struct inode *lower_inode,
144 struct super_block *sb) 172 struct super_block *sb)
145{ 173{
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index bacc882e1ae4..245b517bf1b6 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -55,6 +55,8 @@ static struct inode *ecryptfs_alloc_inode(struct super_block *sb)
55 if (unlikely(!inode_info)) 55 if (unlikely(!inode_info))
56 goto out; 56 goto out;
57 ecryptfs_init_crypt_stat(&inode_info->crypt_stat); 57 ecryptfs_init_crypt_stat(&inode_info->crypt_stat);
58 mutex_init(&inode_info->lower_file_mutex);
59 atomic_set(&inode_info->lower_file_count, 0);
58 inode_info->lower_file = NULL; 60 inode_info->lower_file = NULL;
59 inode = &inode_info->vfs_inode; 61 inode = &inode_info->vfs_inode;
60out: 62out:
@@ -77,8 +79,7 @@ static void ecryptfs_i_callback(struct rcu_head *head)
77 * 79 *
78 * This is used during the final destruction of the inode. All 80 * This is used during the final destruction of the inode. All
79 * allocation of memory related to the inode, including allocated 81 * allocation of memory related to the inode, including allocated
80 * memory in the crypt_stat struct, will be released here. This 82 * memory in the crypt_stat struct, will be released here.
81 * function also fput()'s the persistent file for the lower inode.
82 * There should be no chance that this deallocation will be missed. 83 * There should be no chance that this deallocation will be missed.
83 */ 84 */
84static void ecryptfs_destroy_inode(struct inode *inode) 85static void ecryptfs_destroy_inode(struct inode *inode)
@@ -86,16 +87,7 @@ static void ecryptfs_destroy_inode(struct inode *inode)
86 struct ecryptfs_inode_info *inode_info; 87 struct ecryptfs_inode_info *inode_info;
87 88
88 inode_info = ecryptfs_inode_to_private(inode); 89 inode_info = ecryptfs_inode_to_private(inode);
89 if (inode_info->lower_file) { 90 BUG_ON(inode_info->lower_file);
90 struct dentry *lower_dentry =
91 inode_info->lower_file->f_dentry;
92
93 BUG_ON(!lower_dentry);
94 if (lower_dentry->d_inode) {
95 fput(inode_info->lower_file);
96 inode_info->lower_file = NULL;
97 }
98 }
99 ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat); 91 ecryptfs_destroy_crypt_stat(&inode_info->crypt_stat);
100 call_rcu(&inode->i_rcu, ecryptfs_i_callback); 92 call_rcu(&inode->i_rcu, ecryptfs_i_callback);
101} 93}
diff --git a/fs/filesystems.c b/fs/filesystems.c
index 751d6b255a12..0845f84f2a5f 100644
--- a/fs/filesystems.c
+++ b/fs/filesystems.c
@@ -110,14 +110,13 @@ int unregister_filesystem(struct file_system_type * fs)
110 *tmp = fs->next; 110 *tmp = fs->next;
111 fs->next = NULL; 111 fs->next = NULL;
112 write_unlock(&file_systems_lock); 112 write_unlock(&file_systems_lock);
113 synchronize_rcu();
113 return 0; 114 return 0;
114 } 115 }
115 tmp = &(*tmp)->next; 116 tmp = &(*tmp)->next;
116 } 117 }
117 write_unlock(&file_systems_lock); 118 write_unlock(&file_systems_lock);
118 119
119 synchronize_rcu();
120
121 return -EINVAL; 120 return -EINVAL;
122} 121}
123 122
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index c71995b111bf..0f5c4f9d5d62 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -884,8 +884,8 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
884 } 884 }
885 885
886 brelse(dibh); 886 brelse(dibh);
887 gfs2_trans_end(sdp);
888failed: 887failed:
888 gfs2_trans_end(sdp);
889 if (al) { 889 if (al) {
890 gfs2_inplace_release(ip); 890 gfs2_inplace_release(ip);
891 gfs2_quota_unlock(ip); 891 gfs2_quota_unlock(ip);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 5c356d09c321..f789c5732b7c 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1506,7 +1506,7 @@ struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name)
1506 inode = gfs2_inode_lookup(dir->i_sb, 1506 inode = gfs2_inode_lookup(dir->i_sb,
1507 be16_to_cpu(dent->de_type), 1507 be16_to_cpu(dent->de_type),
1508 be64_to_cpu(dent->de_inum.no_addr), 1508 be64_to_cpu(dent->de_inum.no_addr),
1509 be64_to_cpu(dent->de_inum.no_formal_ino)); 1509 be64_to_cpu(dent->de_inum.no_formal_ino), 0);
1510 brelse(bh); 1510 brelse(bh);
1511 return inode; 1511 return inode;
1512 } 1512 }
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index b2682e073eee..e48310885c48 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -617,18 +617,51 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
617 return generic_file_aio_write(iocb, iov, nr_segs, pos); 617 return generic_file_aio_write(iocb, iov, nr_segs, pos);
618} 618}
619 619
620static void empty_write_end(struct page *page, unsigned from, 620static int empty_write_end(struct page *page, unsigned from,
621 unsigned to) 621 unsigned to, int mode)
622{ 622{
623 struct gfs2_inode *ip = GFS2_I(page->mapping->host); 623 struct inode *inode = page->mapping->host;
624 struct gfs2_inode *ip = GFS2_I(inode);
625 struct buffer_head *bh;
626 unsigned offset, blksize = 1 << inode->i_blkbits;
627 pgoff_t end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
624 628
625 zero_user(page, from, to-from); 629 zero_user(page, from, to-from);
626 mark_page_accessed(page); 630 mark_page_accessed(page);
627 631
628 if (!gfs2_is_writeback(ip)) 632 if (page->index < end_index || !(mode & FALLOC_FL_KEEP_SIZE)) {
629 gfs2_page_add_databufs(ip, page, from, to); 633 if (!gfs2_is_writeback(ip))
634 gfs2_page_add_databufs(ip, page, from, to);
635
636 block_commit_write(page, from, to);
637 return 0;
638 }
639
640 offset = 0;
641 bh = page_buffers(page);
642 while (offset < to) {
643 if (offset >= from) {
644 set_buffer_uptodate(bh);
645 mark_buffer_dirty(bh);
646 clear_buffer_new(bh);
647 write_dirty_buffer(bh, WRITE);
648 }
649 offset += blksize;
650 bh = bh->b_this_page;
651 }
630 652
631 block_commit_write(page, from, to); 653 offset = 0;
654 bh = page_buffers(page);
655 while (offset < to) {
656 if (offset >= from) {
657 wait_on_buffer(bh);
658 if (!buffer_uptodate(bh))
659 return -EIO;
660 }
661 offset += blksize;
662 bh = bh->b_this_page;
663 }
664 return 0;
632} 665}
633 666
634static int needs_empty_write(sector_t block, struct inode *inode) 667static int needs_empty_write(sector_t block, struct inode *inode)
@@ -643,7 +676,8 @@ static int needs_empty_write(sector_t block, struct inode *inode)
643 return !buffer_mapped(&bh_map); 676 return !buffer_mapped(&bh_map);
644} 677}
645 678
646static int write_empty_blocks(struct page *page, unsigned from, unsigned to) 679static int write_empty_blocks(struct page *page, unsigned from, unsigned to,
680 int mode)
647{ 681{
648 struct inode *inode = page->mapping->host; 682 struct inode *inode = page->mapping->host;
649 unsigned start, end, next, blksize; 683 unsigned start, end, next, blksize;
@@ -668,7 +702,9 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
668 gfs2_block_map); 702 gfs2_block_map);
669 if (unlikely(ret)) 703 if (unlikely(ret))
670 return ret; 704 return ret;
671 empty_write_end(page, start, end); 705 ret = empty_write_end(page, start, end, mode);
706 if (unlikely(ret))
707 return ret;
672 end = 0; 708 end = 0;
673 } 709 }
674 start = next; 710 start = next;
@@ -682,7 +718,9 @@ static int write_empty_blocks(struct page *page, unsigned from, unsigned to)
682 ret = __block_write_begin(page, start, end - start, gfs2_block_map); 718 ret = __block_write_begin(page, start, end - start, gfs2_block_map);
683 if (unlikely(ret)) 719 if (unlikely(ret))
684 return ret; 720 return ret;
685 empty_write_end(page, start, end); 721 ret = empty_write_end(page, start, end, mode);
722 if (unlikely(ret))
723 return ret;
686 } 724 }
687 725
688 return 0; 726 return 0;
@@ -731,7 +769,7 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
731 769
732 if (curr == end) 770 if (curr == end)
733 to = end_offset; 771 to = end_offset;
734 error = write_empty_blocks(page, from, to); 772 error = write_empty_blocks(page, from, to, mode);
735 if (!error && offset + to > inode->i_size && 773 if (!error && offset + to > inode->i_size &&
736 !(mode & FALLOC_FL_KEEP_SIZE)) { 774 !(mode & FALLOC_FL_KEEP_SIZE)) {
737 i_size_write(inode, offset + to); 775 i_size_write(inode, offset + to);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f07643e21bfa..7a4fb630a320 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -93,14 +93,12 @@ static unsigned int gl_hash(const struct gfs2_sbd *sdp,
93 93
94static inline void spin_lock_bucket(unsigned int hash) 94static inline void spin_lock_bucket(unsigned int hash)
95{ 95{
96 struct hlist_bl_head *bl = &gl_hash_table[hash]; 96 hlist_bl_lock(&gl_hash_table[hash]);
97 bit_spin_lock(0, (unsigned long *)bl);
98} 97}
99 98
100static inline void spin_unlock_bucket(unsigned int hash) 99static inline void spin_unlock_bucket(unsigned int hash)
101{ 100{
102 struct hlist_bl_head *bl = &gl_hash_table[hash]; 101 hlist_bl_unlock(&gl_hash_table[hash]);
103 __bit_spin_unlock(0, (unsigned long *)bl);
104} 102}
105 103
106static void gfs2_glock_dealloc(struct rcu_head *rcu) 104static void gfs2_glock_dealloc(struct rcu_head *rcu)
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 3754e3cbf02b..25eeb2bcee47 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -385,6 +385,10 @@ static int trans_go_demote_ok(const struct gfs2_glock *gl)
385static void iopen_go_callback(struct gfs2_glock *gl) 385static void iopen_go_callback(struct gfs2_glock *gl)
386{ 386{
387 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; 387 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
388 struct gfs2_sbd *sdp = gl->gl_sbd;
389
390 if (sdp->sd_vfs->s_flags & MS_RDONLY)
391 return;
388 392
389 if (gl->gl_demote_state == LM_ST_UNLOCKED && 393 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
390 gl->gl_state == LM_ST_SHARED && ip) { 394 gl->gl_state == LM_ST_SHARED && ip) {
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 97d54a28776a..9134dcb89479 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -40,37 +40,61 @@ struct gfs2_inum_range_host {
40 u64 ir_length; 40 u64 ir_length;
41}; 41};
42 42
43struct gfs2_skip_data {
44 u64 no_addr;
45 int skipped;
46 int non_block;
47};
48
43static int iget_test(struct inode *inode, void *opaque) 49static int iget_test(struct inode *inode, void *opaque)
44{ 50{
45 struct gfs2_inode *ip = GFS2_I(inode); 51 struct gfs2_inode *ip = GFS2_I(inode);
46 u64 *no_addr = opaque; 52 struct gfs2_skip_data *data = opaque;
47 53
48 if (ip->i_no_addr == *no_addr) 54 if (ip->i_no_addr == data->no_addr) {
55 if (data->non_block &&
56 inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)) {
57 data->skipped = 1;
58 return 0;
59 }
49 return 1; 60 return 1;
50 61 }
51 return 0; 62 return 0;
52} 63}
53 64
54static int iget_set(struct inode *inode, void *opaque) 65static int iget_set(struct inode *inode, void *opaque)
55{ 66{
56 struct gfs2_inode *ip = GFS2_I(inode); 67 struct gfs2_inode *ip = GFS2_I(inode);
57 u64 *no_addr = opaque; 68 struct gfs2_skip_data *data = opaque;
58 69
59 inode->i_ino = (unsigned long)*no_addr; 70 if (data->skipped)
60 ip->i_no_addr = *no_addr; 71 return -ENOENT;
72 inode->i_ino = (unsigned long)(data->no_addr);
73 ip->i_no_addr = data->no_addr;
61 return 0; 74 return 0;
62} 75}
63 76
64struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr) 77struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
65{ 78{
66 unsigned long hash = (unsigned long)no_addr; 79 unsigned long hash = (unsigned long)no_addr;
67 return ilookup5(sb, hash, iget_test, &no_addr); 80 struct gfs2_skip_data data;
81
82 data.no_addr = no_addr;
83 data.skipped = 0;
84 data.non_block = 0;
85 return ilookup5(sb, hash, iget_test, &data);
68} 86}
69 87
70static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr) 88static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr,
89 int non_block)
71{ 90{
91 struct gfs2_skip_data data;
72 unsigned long hash = (unsigned long)no_addr; 92 unsigned long hash = (unsigned long)no_addr;
73 return iget5_locked(sb, hash, iget_test, iget_set, &no_addr); 93
94 data.no_addr = no_addr;
95 data.skipped = 0;
96 data.non_block = non_block;
97 return iget5_locked(sb, hash, iget_test, iget_set, &data);
74} 98}
75 99
76/** 100/**
@@ -111,19 +135,20 @@ static void gfs2_set_iop(struct inode *inode)
111 * @sb: The super block 135 * @sb: The super block
112 * @no_addr: The inode number 136 * @no_addr: The inode number
113 * @type: The type of the inode 137 * @type: The type of the inode
138 * non_block: Can we block on inodes that are being freed?
114 * 139 *
115 * Returns: A VFS inode, or an error 140 * Returns: A VFS inode, or an error
116 */ 141 */
117 142
118struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, 143struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
119 u64 no_addr, u64 no_formal_ino) 144 u64 no_addr, u64 no_formal_ino, int non_block)
120{ 145{
121 struct inode *inode; 146 struct inode *inode;
122 struct gfs2_inode *ip; 147 struct gfs2_inode *ip;
123 struct gfs2_glock *io_gl = NULL; 148 struct gfs2_glock *io_gl = NULL;
124 int error; 149 int error;
125 150
126 inode = gfs2_iget(sb, no_addr); 151 inode = gfs2_iget(sb, no_addr, non_block);
127 ip = GFS2_I(inode); 152 ip = GFS2_I(inode);
128 153
129 if (!inode) 154 if (!inode)
@@ -185,11 +210,12 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
185{ 210{
186 struct super_block *sb = sdp->sd_vfs; 211 struct super_block *sb = sdp->sd_vfs;
187 struct gfs2_holder i_gh; 212 struct gfs2_holder i_gh;
188 struct inode *inode; 213 struct inode *inode = NULL;
189 int error; 214 int error;
190 215
216 /* Must not read in block until block type is verified */
191 error = gfs2_glock_nq_num(sdp, no_addr, &gfs2_inode_glops, 217 error = gfs2_glock_nq_num(sdp, no_addr, &gfs2_inode_glops,
192 LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 218 LM_ST_EXCLUSIVE, GL_SKIP, &i_gh);
193 if (error) 219 if (error)
194 return ERR_PTR(error); 220 return ERR_PTR(error);
195 221
@@ -197,7 +223,7 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
197 if (error) 223 if (error)
198 goto fail; 224 goto fail;
199 225
200 inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0); 226 inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0, 1);
201 if (IS_ERR(inode)) 227 if (IS_ERR(inode))
202 goto fail; 228 goto fail;
203 229
@@ -843,7 +869,7 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
843 goto fail_gunlock2; 869 goto fail_gunlock2;
844 870
845 inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode), inum.no_addr, 871 inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode), inum.no_addr,
846 inum.no_formal_ino); 872 inum.no_formal_ino, 0);
847 if (IS_ERR(inode)) 873 if (IS_ERR(inode))
848 goto fail_gunlock2; 874 goto fail_gunlock2;
849 875
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 3e00a66e7cbd..099ca305e518 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -97,7 +97,8 @@ err:
97} 97}
98 98
99extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, 99extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
100 u64 no_addr, u64 no_formal_ino); 100 u64 no_addr, u64 no_formal_ino,
101 int non_block);
101extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr, 102extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
102 u64 *no_formal_ino, 103 u64 *no_formal_ino,
103 unsigned int blktype); 104 unsigned int blktype);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 42ef24355afb..d3c69eb91c74 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -430,7 +430,7 @@ static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr,
430 struct dentry *dentry; 430 struct dentry *dentry;
431 struct inode *inode; 431 struct inode *inode;
432 432
433 inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0); 433 inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0, 0);
434 if (IS_ERR(inode)) { 434 if (IS_ERR(inode)) {
435 fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode)); 435 fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode));
436 return PTR_ERR(inode); 436 return PTR_ERR(inode);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index cf930cd9664a..6fcae8469f6d 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -945,7 +945,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
945 /* rgblk_search can return a block < goal, so we need to 945 /* rgblk_search can return a block < goal, so we need to
946 keep it marching forward. */ 946 keep it marching forward. */
947 no_addr = block + rgd->rd_data0; 947 no_addr = block + rgd->rd_data0;
948 goal++; 948 goal = max(block + 1, goal + 1);
949 if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked) 949 if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked)
950 continue; 950 continue;
951 if (no_addr == skip) 951 if (no_addr == skip)
@@ -971,7 +971,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
971 found++; 971 found++;
972 972
973 /* Limit reclaim to sensible number of tasks */ 973 /* Limit reclaim to sensible number of tasks */
974 if (found > 2*NR_CPUS) 974 if (found > NR_CPUS)
975 return; 975 return;
976 } 976 }
977 977
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index a4e23d68a398..b9f28e66dad1 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1318,15 +1318,17 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
1318 1318
1319static void gfs2_evict_inode(struct inode *inode) 1319static void gfs2_evict_inode(struct inode *inode)
1320{ 1320{
1321 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info; 1321 struct super_block *sb = inode->i_sb;
1322 struct gfs2_sbd *sdp = sb->s_fs_info;
1322 struct gfs2_inode *ip = GFS2_I(inode); 1323 struct gfs2_inode *ip = GFS2_I(inode);
1323 struct gfs2_holder gh; 1324 struct gfs2_holder gh;
1324 int error; 1325 int error;
1325 1326
1326 if (inode->i_nlink) 1327 if (inode->i_nlink || (sb->s_flags & MS_RDONLY))
1327 goto out; 1328 goto out;
1328 1329
1329 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 1330 /* Must not read inode block until block type has been verified */
1331 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
1330 if (unlikely(error)) { 1332 if (unlikely(error)) {
1331 gfs2_glock_dq_uninit(&ip->i_iopen_gh); 1333 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1332 goto out; 1334 goto out;
@@ -1336,6 +1338,12 @@ static void gfs2_evict_inode(struct inode *inode)
1336 if (error) 1338 if (error)
1337 goto out_truncate; 1339 goto out_truncate;
1338 1340
1341 if (test_bit(GIF_INVALID, &ip->i_flags)) {
1342 error = gfs2_inode_refresh(ip);
1343 if (error)
1344 goto out_truncate;
1345 }
1346
1339 ip->i_iopen_gh.gh_flags |= GL_NOCACHE; 1347 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1340 gfs2_glock_dq_wait(&ip->i_iopen_gh); 1348 gfs2_glock_dq_wait(&ip->i_iopen_gh);
1341 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh); 1349 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
diff --git a/fs/namei.c b/fs/namei.c
index e6cd6113872c..54fc993e3027 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -697,6 +697,7 @@ static __always_inline void set_root_rcu(struct nameidata *nd)
697 do { 697 do {
698 seq = read_seqcount_begin(&fs->seq); 698 seq = read_seqcount_begin(&fs->seq);
699 nd->root = fs->root; 699 nd->root = fs->root;
700 nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
700 } while (read_seqcount_retry(&fs->seq, seq)); 701 } while (read_seqcount_retry(&fs->seq, seq));
701 } 702 }
702} 703}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index aa309aa93fe8..4cf04e11c66c 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -258,6 +258,7 @@ static void nfs4_put_deleg_lease(struct nfs4_file *fp)
258 if (atomic_dec_and_test(&fp->fi_delegees)) { 258 if (atomic_dec_and_test(&fp->fi_delegees)) {
259 vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease); 259 vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
260 fp->fi_lease = NULL; 260 fp->fi_lease = NULL;
261 fput(fp->fi_deleg_file);
261 fp->fi_deleg_file = NULL; 262 fp->fi_deleg_file = NULL;
262 } 263 }
263} 264}
@@ -402,8 +403,8 @@ static void free_generic_stateid(struct nfs4_stateid *stp)
402 if (stp->st_access_bmap) { 403 if (stp->st_access_bmap) {
403 oflag = nfs4_access_bmap_to_omode(stp); 404 oflag = nfs4_access_bmap_to_omode(stp);
404 nfs4_file_put_access(stp->st_file, oflag); 405 nfs4_file_put_access(stp->st_file, oflag);
405 put_nfs4_file(stp->st_file);
406 } 406 }
407 put_nfs4_file(stp->st_file);
407 kmem_cache_free(stateid_slab, stp); 408 kmem_cache_free(stateid_slab, stp);
408} 409}
409 410
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 2e1cebde90df..129f3c9f62d5 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1363,7 +1363,7 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
1363 goto out; 1363 goto out;
1364 if (!(iap->ia_valid & ATTR_MODE)) 1364 if (!(iap->ia_valid & ATTR_MODE))
1365 iap->ia_mode = 0; 1365 iap->ia_mode = 0;
1366 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE); 1366 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC);
1367 if (err) 1367 if (err)
1368 goto out; 1368 goto out;
1369 1369
@@ -1385,6 +1385,13 @@ nfsd_create_v3(struct svc_rqst *rqstp, struct svc_fh *fhp,
1385 if (IS_ERR(dchild)) 1385 if (IS_ERR(dchild))
1386 goto out_nfserr; 1386 goto out_nfserr;
1387 1387
1388 /* If file doesn't exist, check for permissions to create one */
1389 if (!dchild->d_inode) {
1390 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
1391 if (err)
1392 goto out;
1393 }
1394
1388 err = fh_compose(resfhp, fhp->fh_export, dchild, fhp); 1395 err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
1389 if (err) 1396 if (err)
1390 goto out; 1397 goto out;
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index b68f87a83924..938387a10d5d 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -1019,7 +1019,7 @@ struct ocfs2_xattr_entry {
1019 __le16 xe_name_offset; /* byte offset from the 1st entry in the 1019 __le16 xe_name_offset; /* byte offset from the 1st entry in the
1020 local xattr storage(inode, xattr block or 1020 local xattr storage(inode, xattr block or
1021 xattr bucket). */ 1021 xattr bucket). */
1022 __u8 xe_name_len; /* xattr name len, does't include prefix. */ 1022 __u8 xe_name_len; /* xattr name len, doesn't include prefix. */
1023 __u8 xe_type; /* the low 7 bits indicate the name prefix 1023 __u8 xe_type; /* the low 7 bits indicate the name prefix
1024 * type and the highest bit indicates whether 1024 * type and the highest bit indicates whether
1025 * the EA is stored in the local storage. */ 1025 * the EA is stored in the local storage. */
diff --git a/fs/proc/base.c b/fs/proc/base.c
index dd6628d3ba42..dfa532730e55 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -3124,11 +3124,16 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
3124/* for the /proc/ directory itself, after non-process stuff has been done */ 3124/* for the /proc/ directory itself, after non-process stuff has been done */
3125int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir) 3125int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
3126{ 3126{
3127 unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY; 3127 unsigned int nr;
3128 struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode); 3128 struct task_struct *reaper;
3129 struct tgid_iter iter; 3129 struct tgid_iter iter;
3130 struct pid_namespace *ns; 3130 struct pid_namespace *ns;
3131 3131
3132 if (filp->f_pos >= PID_MAX_LIMIT + TGID_OFFSET)
3133 goto out_no_task;
3134 nr = filp->f_pos - FIRST_PROCESS_ENTRY;
3135
3136 reaper = get_proc_task(filp->f_path.dentry->d_inode);
3132 if (!reaper) 3137 if (!reaper)
3133 goto out_no_task; 3138 goto out_no_task;
3134 3139
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index 919f0de29d8f..e6493cac193d 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -23,6 +23,12 @@
23#ifndef __UBIFS_DEBUG_H__ 23#ifndef __UBIFS_DEBUG_H__
24#define __UBIFS_DEBUG_H__ 24#define __UBIFS_DEBUG_H__
25 25
26/* Checking helper functions */
27typedef int (*dbg_leaf_callback)(struct ubifs_info *c,
28 struct ubifs_zbranch *zbr, void *priv);
29typedef int (*dbg_znode_callback)(struct ubifs_info *c,
30 struct ubifs_znode *znode, void *priv);
31
26#ifdef CONFIG_UBIFS_FS_DEBUG 32#ifdef CONFIG_UBIFS_FS_DEBUG
27 33
28/** 34/**
@@ -270,11 +276,6 @@ void dbg_dump_tnc(struct ubifs_info *c);
270void dbg_dump_index(struct ubifs_info *c); 276void dbg_dump_index(struct ubifs_info *c);
271void dbg_dump_lpt_lebs(const struct ubifs_info *c); 277void dbg_dump_lpt_lebs(const struct ubifs_info *c);
272 278
273/* Checking helper functions */
274typedef int (*dbg_leaf_callback)(struct ubifs_info *c,
275 struct ubifs_zbranch *zbr, void *priv);
276typedef int (*dbg_znode_callback)(struct ubifs_info *c,
277 struct ubifs_znode *znode, void *priv);
278int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb, 279int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
279 dbg_znode_callback znode_cb, void *priv); 280 dbg_znode_callback znode_cb, void *priv);
280 281
@@ -295,7 +296,6 @@ int dbg_check_idx_size(struct ubifs_info *c, long long idx_size);
295int dbg_check_filesystem(struct ubifs_info *c); 296int dbg_check_filesystem(struct ubifs_info *c);
296void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat, 297void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat,
297 int add_pos); 298 int add_pos);
298int dbg_check_lprops(struct ubifs_info *c);
299int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode, 299int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode,
300 int row, int col); 300 int row, int col);
301int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode, 301int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
@@ -401,58 +401,94 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
401#define DBGKEY(key) ((char *)(key)) 401#define DBGKEY(key) ((char *)(key))
402#define DBGKEY1(key) ((char *)(key)) 402#define DBGKEY1(key) ((char *)(key))
403 403
404#define ubifs_debugging_init(c) 0 404static inline int ubifs_debugging_init(struct ubifs_info *c) { return 0; }
405#define ubifs_debugging_exit(c) ({}) 405static inline void ubifs_debugging_exit(struct ubifs_info *c) { return; }
406 406static inline const char *dbg_ntype(int type) { return ""; }
407#define dbg_ntype(type) "" 407static inline const char *dbg_cstate(int cmt_state) { return ""; }
408#define dbg_cstate(cmt_state) "" 408static inline const char *dbg_jhead(int jhead) { return ""; }
409#define dbg_jhead(jhead) "" 409static inline const char *
410#define dbg_get_key_dump(c, key) ({}) 410dbg_get_key_dump(const struct ubifs_info *c,
411#define dbg_dump_inode(c, inode) ({}) 411 const union ubifs_key *key) { return ""; }
412#define dbg_dump_node(c, node) ({}) 412static inline void dbg_dump_inode(const struct ubifs_info *c,
413#define dbg_dump_lpt_node(c, node, lnum, offs) ({}) 413 const struct inode *inode) { return; }
414#define dbg_dump_budget_req(req) ({}) 414static inline void dbg_dump_node(const struct ubifs_info *c,
415#define dbg_dump_lstats(lst) ({}) 415 const void *node) { return; }
416#define dbg_dump_budg(c) ({}) 416static inline void dbg_dump_lpt_node(const struct ubifs_info *c,
417#define dbg_dump_lprop(c, lp) ({}) 417 void *node, int lnum,
418#define dbg_dump_lprops(c) ({}) 418 int offs) { return; }
419#define dbg_dump_lpt_info(c) ({}) 419static inline void
420#define dbg_dump_leb(c, lnum) ({}) 420dbg_dump_budget_req(const struct ubifs_budget_req *req) { return; }
421#define dbg_dump_znode(c, znode) ({}) 421static inline void
422#define dbg_dump_heap(c, heap, cat) ({}) 422dbg_dump_lstats(const struct ubifs_lp_stats *lst) { return; }
423#define dbg_dump_pnode(c, pnode, parent, iip) ({}) 423static inline void dbg_dump_budg(struct ubifs_info *c) { return; }
424#define dbg_dump_tnc(c) ({}) 424static inline void dbg_dump_lprop(const struct ubifs_info *c,
425#define dbg_dump_index(c) ({}) 425 const struct ubifs_lprops *lp) { return; }
426#define dbg_dump_lpt_lebs(c) ({}) 426static inline void dbg_dump_lprops(struct ubifs_info *c) { return; }
427 427static inline void dbg_dump_lpt_info(struct ubifs_info *c) { return; }
428#define dbg_walk_index(c, leaf_cb, znode_cb, priv) 0 428static inline void dbg_dump_leb(const struct ubifs_info *c,
429#define dbg_old_index_check_init(c, zroot) 0 429 int lnum) { return; }
430#define dbg_save_space_info(c) ({}) 430static inline void
431#define dbg_check_space_info(c) 0 431dbg_dump_znode(const struct ubifs_info *c,
432#define dbg_check_old_index(c, zroot) 0 432 const struct ubifs_znode *znode) { return; }
433#define dbg_check_cats(c) 0 433static inline void dbg_dump_heap(struct ubifs_info *c,
434#define dbg_check_ltab(c) 0 434 struct ubifs_lpt_heap *heap,
435#define dbg_chk_lpt_free_spc(c) 0 435 int cat) { return; }
436#define dbg_chk_lpt_sz(c, action, len) 0 436static inline void dbg_dump_pnode(struct ubifs_info *c,
437#define dbg_check_synced_i_size(inode) 0 437 struct ubifs_pnode *pnode,
438#define dbg_check_dir_size(c, dir) 0 438 struct ubifs_nnode *parent,
439#define dbg_check_tnc(c, x) 0 439 int iip) { return; }
440#define dbg_check_idx_size(c, idx_size) 0 440static inline void dbg_dump_tnc(struct ubifs_info *c) { return; }
441#define dbg_check_filesystem(c) 0 441static inline void dbg_dump_index(struct ubifs_info *c) { return; }
442#define dbg_check_heap(c, heap, cat, add_pos) ({}) 442static inline void dbg_dump_lpt_lebs(const struct ubifs_info *c) { return; }
443#define dbg_check_lprops(c) 0 443
444#define dbg_check_lpt_nodes(c, cnode, row, col) 0 444static inline int dbg_walk_index(struct ubifs_info *c,
445#define dbg_check_inode_size(c, inode, size) 0 445 dbg_leaf_callback leaf_cb,
446#define dbg_check_data_nodes_order(c, head) 0 446 dbg_znode_callback znode_cb,
447#define dbg_check_nondata_nodes_order(c, head) 0 447 void *priv) { return 0; }
448#define dbg_force_in_the_gaps_enabled 0 448static inline void dbg_save_space_info(struct ubifs_info *c) { return; }
449#define dbg_force_in_the_gaps() 0 449static inline int dbg_check_space_info(struct ubifs_info *c) { return 0; }
450#define dbg_failure_mode 0 450static inline int dbg_check_lprops(struct ubifs_info *c) { return 0; }
451 451static inline int
452#define dbg_debugfs_init() 0 452dbg_old_index_check_init(struct ubifs_info *c,
453#define dbg_debugfs_exit() 453 struct ubifs_zbranch *zroot) { return 0; }
454#define dbg_debugfs_init_fs(c) 0 454static inline int
455#define dbg_debugfs_exit_fs(c) 0 455dbg_check_old_index(struct ubifs_info *c,
456 struct ubifs_zbranch *zroot) { return 0; }
457static inline int dbg_check_cats(struct ubifs_info *c) { return 0; }
458static inline int dbg_check_ltab(struct ubifs_info *c) { return 0; }
459static inline int dbg_chk_lpt_free_spc(struct ubifs_info *c) { return 0; }
460static inline int dbg_chk_lpt_sz(struct ubifs_info *c,
461 int action, int len) { return 0; }
462static inline int dbg_check_synced_i_size(struct inode *inode) { return 0; }
463static inline int dbg_check_dir_size(struct ubifs_info *c,
464 const struct inode *dir) { return 0; }
465static inline int dbg_check_tnc(struct ubifs_info *c, int extra) { return 0; }
466static inline int dbg_check_idx_size(struct ubifs_info *c,
467 long long idx_size) { return 0; }
468static inline int dbg_check_filesystem(struct ubifs_info *c) { return 0; }
469static inline void dbg_check_heap(struct ubifs_info *c,
470 struct ubifs_lpt_heap *heap,
471 int cat, int add_pos) { return; }
472static inline int dbg_check_lpt_nodes(struct ubifs_info *c,
473 struct ubifs_cnode *cnode, int row, int col) { return 0; }
474static inline int dbg_check_inode_size(struct ubifs_info *c,
475 const struct inode *inode,
476 loff_t size) { return 0; }
477static inline int
478dbg_check_data_nodes_order(struct ubifs_info *c,
479 struct list_head *head) { return 0; }
480static inline int
481dbg_check_nondata_nodes_order(struct ubifs_info *c,
482 struct list_head *head) { return 0; }
483
484static inline int dbg_force_in_the_gaps(void) { return 0; }
485#define dbg_force_in_the_gaps_enabled 0
486#define dbg_failure_mode 0
487
488static inline int dbg_debugfs_init(void) { return 0; }
489static inline void dbg_debugfs_exit(void) { return; }
490static inline int dbg_debugfs_init_fs(struct ubifs_info *c) { return 0; }
491static inline int dbg_debugfs_exit_fs(struct ubifs_info *c) { return 0; }
456 492
457#endif /* !CONFIG_UBIFS_FS_DEBUG */ 493#endif /* !CONFIG_UBIFS_FS_DEBUG */
458#endif /* !__UBIFS_DEBUG_H__ */ 494#endif /* !__UBIFS_DEBUG_H__ */
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 28be1e6a65e8..b286db79c686 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1312,6 +1312,9 @@ int ubifs_fsync(struct file *file, int datasync)
1312 1312
1313 dbg_gen("syncing inode %lu", inode->i_ino); 1313 dbg_gen("syncing inode %lu", inode->i_ino);
1314 1314
1315 if (inode->i_sb->s_flags & MS_RDONLY)
1316 return 0;
1317
1315 /* 1318 /*
1316 * VFS has already synchronized dirty pages for this inode. Synchronize 1319 * VFS has already synchronized dirty pages for this inode. Synchronize
1317 * the inode unless this is a 'datasync()' call. 1320 * the inode unless this is a 'datasync()' call.
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index 936f2cbfe6b6..3dbad6fbd1eb 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -317,6 +317,32 @@ int ubifs_recover_master_node(struct ubifs_info *c)
317 goto out_free; 317 goto out_free;
318 } 318 }
319 memcpy(c->rcvrd_mst_node, c->mst_node, UBIFS_MST_NODE_SZ); 319 memcpy(c->rcvrd_mst_node, c->mst_node, UBIFS_MST_NODE_SZ);
320
321 /*
322 * We had to recover the master node, which means there was an
323 * unclean reboot. However, it is possible that the master node
324 * is clean at this point, i.e., %UBIFS_MST_DIRTY is not set.
325 * E.g., consider the following chain of events:
326 *
327 * 1. UBIFS was cleanly unmounted, so the master node is clean
328 * 2. UBIFS is being mounted R/W and starts changing the master
329 * node in the first (%UBIFS_MST_LNUM). A power cut happens,
330 * so this LEB ends up with some amount of garbage at the
331 * end.
332 * 3. UBIFS is being mounted R/O. We reach this place and
333 * recover the master node from the second LEB
334 * (%UBIFS_MST_LNUM + 1). But we cannot update the media
335 * because we are being mounted R/O. We have to defer the
336 * operation.
337 * 4. However, this master node (@c->mst_node) is marked as
338 * clean (since the step 1). And if we just return, the
339 * mount code will be confused and won't recover the master
340 * node when it is re-mounter R/W later.
341 *
342 * Thus, to force the recovery by marking the master node as
343 * dirty.
344 */
345 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
320 } else { 346 } else {
321 /* Write the recovered master node */ 347 /* Write the recovered master node */
322 c->max_sqnum = le64_to_cpu(mst->ch.sqnum) - 1; 348 c->max_sqnum = le64_to_cpu(mst->ch.sqnum) - 1;
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index c75f6133206c..be6c7b008f38 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1671,14 +1671,25 @@ static int ubifs_remount_rw(struct ubifs_info *c)
1671 if (err) 1671 if (err)
1672 goto out; 1672 goto out;
1673 1673
1674 dbg_gen("re-mounted read-write");
1675 c->remounting_rw = 0;
1676
1674 if (c->need_recovery) { 1677 if (c->need_recovery) {
1675 c->need_recovery = 0; 1678 c->need_recovery = 0;
1676 ubifs_msg("deferred recovery completed"); 1679 ubifs_msg("deferred recovery completed");
1680 } else {
1681 /*
1682 * Do not run the debugging space check if the were doing
1683 * recovery, because when we saved the information we had the
1684 * file-system in a state where the TNC and lprops has been
1685 * modified in memory, but all the I/O operations (including a
1686 * commit) were deferred. So the file-system was in
1687 * "non-committed" state. Now the file-system is in committed
1688 * state, and of course the amount of free space will change
1689 * because, for example, the old index size was imprecise.
1690 */
1691 err = dbg_check_space_info(c);
1677 } 1692 }
1678
1679 dbg_gen("re-mounted read-write");
1680 c->remounting_rw = 0;
1681 err = dbg_check_space_info(c);
1682 mutex_unlock(&c->umount_mutex); 1693 mutex_unlock(&c->umount_mutex);
1683 return err; 1694 return err;
1684 1695
@@ -1761,10 +1772,12 @@ static void ubifs_put_super(struct super_block *sb)
1761 * of the media. For example, there will be dirty inodes if we failed 1772 * of the media. For example, there will be dirty inodes if we failed
1762 * to write them back because of I/O errors. 1773 * to write them back because of I/O errors.
1763 */ 1774 */
1764 ubifs_assert(atomic_long_read(&c->dirty_pg_cnt) == 0); 1775 if (!c->ro_error) {
1765 ubifs_assert(c->budg_idx_growth == 0); 1776 ubifs_assert(atomic_long_read(&c->dirty_pg_cnt) == 0);
1766 ubifs_assert(c->budg_dd_growth == 0); 1777 ubifs_assert(c->budg_idx_growth == 0);
1767 ubifs_assert(c->budg_data_growth == 0); 1778 ubifs_assert(c->budg_dd_growth == 0);
1779 ubifs_assert(c->budg_data_growth == 0);
1780 }
1768 1781
1769 /* 1782 /*
1770 * The 'c->umount_lock' prevents races between UBIFS memory shrinker 1783 * The 'c->umount_lock' prevents races between UBIFS memory shrinker
diff --git a/fs/xattr.c b/fs/xattr.c
index a19acdb81cd1..f1ef94974dea 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -666,7 +666,7 @@ generic_setxattr(struct dentry *dentry, const char *name, const void *value, siz
666 handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name); 666 handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name);
667 if (!handler) 667 if (!handler)
668 return -EOPNOTSUPP; 668 return -EOPNOTSUPP;
669 return handler->set(dentry, name, value, size, 0, handler->flags); 669 return handler->set(dentry, name, value, size, flags, handler->flags);
670} 670}
671 671
672/* 672/*
diff --git a/fs/xfs/linux-2.6/xfs_message.c b/fs/xfs/linux-2.6/xfs_message.c
index 3ca795609113..9f76cceb678d 100644
--- a/fs/xfs/linux-2.6/xfs_message.c
+++ b/fs/xfs/linux-2.6/xfs_message.c
@@ -34,8 +34,10 @@ __xfs_printk(
34 const struct xfs_mount *mp, 34 const struct xfs_mount *mp,
35 struct va_format *vaf) 35 struct va_format *vaf)
36{ 36{
37 if (mp && mp->m_fsname) 37 if (mp && mp->m_fsname) {
38 printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf); 38 printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf);
39 return;
40 }
39 printk("%sXFS: %pV\n", level, vaf); 41 printk("%sXFS: %pV\n", level, vaf);
40} 42}
41 43
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h
index e612575a2596..b4326bfa684f 100644
--- a/include/linux/bit_spinlock.h
+++ b/include/linux/bit_spinlock.h
@@ -23,11 +23,11 @@ static inline void bit_spin_lock(int bitnum, unsigned long *addr)
23 preempt_disable(); 23 preempt_disable();
24#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 24#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
25 while (unlikely(test_and_set_bit_lock(bitnum, addr))) { 25 while (unlikely(test_and_set_bit_lock(bitnum, addr))) {
26 while (test_bit(bitnum, addr)) { 26 preempt_enable();
27 preempt_enable(); 27 do {
28 cpu_relax(); 28 cpu_relax();
29 preempt_disable(); 29 } while (test_bit(bitnum, addr));
30 } 30 preempt_disable();
31 } 31 }
32#endif 32#endif
33 __acquire(bitlock); 33 __acquire(bitlock);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 32176cc8e715..2ad95fa1d130 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -388,20 +388,19 @@ struct request_queue
388#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 388#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
389#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 389#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
390#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 390#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
391#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 391#define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */
392#define QUEUE_FLAG_ELVSWITCH 7 /* don't use elevator, just do FIFO */ 392#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
393#define QUEUE_FLAG_BIDI 8 /* queue supports bidi requests */ 393#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
394#define QUEUE_FLAG_NOMERGES 9 /* disable merge attempts */ 394#define QUEUE_FLAG_SAME_COMP 9 /* force complete on same CPU */
395#define QUEUE_FLAG_SAME_COMP 10 /* force complete on same CPU */ 395#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
396#define QUEUE_FLAG_FAIL_IO 11 /* fake timeout */ 396#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
397#define QUEUE_FLAG_STACKABLE 12 /* supports request stacking */ 397#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
398#define QUEUE_FLAG_NONROT 13 /* non-rotational device (SSD) */
399#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 398#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
400#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 399#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
401#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ 400#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
402#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ 401#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
403#define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */ 402#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
404#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ 403#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
405 404
406#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 405#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
407 (1 << QUEUE_FLAG_STACKABLE) | \ 406 (1 << QUEUE_FLAG_STACKABLE) | \
@@ -697,8 +696,9 @@ extern void blk_start_queue(struct request_queue *q);
697extern void blk_stop_queue(struct request_queue *q); 696extern void blk_stop_queue(struct request_queue *q);
698extern void blk_sync_queue(struct request_queue *q); 697extern void blk_sync_queue(struct request_queue *q);
699extern void __blk_stop_queue(struct request_queue *q); 698extern void __blk_stop_queue(struct request_queue *q);
700extern void __blk_run_queue(struct request_queue *q, bool force_kblockd); 699extern void __blk_run_queue(struct request_queue *q);
701extern void blk_run_queue(struct request_queue *); 700extern void blk_run_queue(struct request_queue *);
701extern void blk_run_queue_async(struct request_queue *q);
702extern int blk_rq_map_user(struct request_queue *, struct request *, 702extern int blk_rq_map_user(struct request_queue *, struct request *,
703 struct rq_map_data *, void __user *, unsigned long, 703 struct rq_map_data *, void __user *, unsigned long,
704 gfp_t); 704 gfp_t);
@@ -857,26 +857,39 @@ extern void blk_put_queue(struct request_queue *);
857struct blk_plug { 857struct blk_plug {
858 unsigned long magic; 858 unsigned long magic;
859 struct list_head list; 859 struct list_head list;
860 struct list_head cb_list;
860 unsigned int should_sort; 861 unsigned int should_sort;
861}; 862};
863struct blk_plug_cb {
864 struct list_head list;
865 void (*callback)(struct blk_plug_cb *);
866};
862 867
863extern void blk_start_plug(struct blk_plug *); 868extern void blk_start_plug(struct blk_plug *);
864extern void blk_finish_plug(struct blk_plug *); 869extern void blk_finish_plug(struct blk_plug *);
865extern void __blk_flush_plug(struct task_struct *, struct blk_plug *); 870extern void blk_flush_plug_list(struct blk_plug *, bool);
866 871
867static inline void blk_flush_plug(struct task_struct *tsk) 872static inline void blk_flush_plug(struct task_struct *tsk)
868{ 873{
869 struct blk_plug *plug = tsk->plug; 874 struct blk_plug *plug = tsk->plug;
870 875
871 if (unlikely(plug)) 876 if (plug)
872 __blk_flush_plug(tsk, plug); 877 blk_flush_plug_list(plug, false);
878}
879
880static inline void blk_schedule_flush_plug(struct task_struct *tsk)
881{
882 struct blk_plug *plug = tsk->plug;
883
884 if (plug)
885 blk_flush_plug_list(plug, true);
873} 886}
874 887
875static inline bool blk_needs_flush_plug(struct task_struct *tsk) 888static inline bool blk_needs_flush_plug(struct task_struct *tsk)
876{ 889{
877 struct blk_plug *plug = tsk->plug; 890 struct blk_plug *plug = tsk->plug;
878 891
879 return plug && !list_empty(&plug->list); 892 return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list));
880} 893}
881 894
882/* 895/*
@@ -1314,6 +1327,11 @@ static inline void blk_flush_plug(struct task_struct *task)
1314{ 1327{
1315} 1328}
1316 1329
1330static inline void blk_schedule_flush_plug(struct task_struct *task)
1331{
1332}
1333
1334
1317static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1335static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1318{ 1336{
1319 return false; 1337 return false;
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index f2afed4fa945..19d90a55541d 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -197,7 +197,7 @@ struct dentry_operations {
197 * typically using d_splice_alias. */ 197 * typically using d_splice_alias. */
198 198
199#define DCACHE_REFERENCED 0x0008 /* Recently used, don't discard. */ 199#define DCACHE_REFERENCED 0x0008 /* Recently used, don't discard. */
200#define DCACHE_UNHASHED 0x0010 200#define DCACHE_RCUACCESS 0x0010 /* Entry has ever been RCU-visible */
201#define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 201#define DCACHE_INOTIFY_PARENT_WATCHED 0x0020
202 /* Parent inode is watched by inotify */ 202 /* Parent inode is watched by inotify */
203 203
@@ -384,7 +384,7 @@ extern struct dentry *dget_parent(struct dentry *dentry);
384 384
385static inline int d_unhashed(struct dentry *dentry) 385static inline int d_unhashed(struct dentry *dentry)
386{ 386{
387 return (dentry->d_flags & DCACHE_UNHASHED); 387 return hlist_bl_unhashed(&dentry->d_hash);
388} 388}
389 389
390static inline int d_unlinked(struct dentry *dentry) 390static inline int d_unlinked(struct dentry *dentry)
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index e2768834f397..32a4423710f5 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -197,7 +197,6 @@ struct dm_target {
197struct dm_target_callbacks { 197struct dm_target_callbacks {
198 struct list_head list; 198 struct list_head list;
199 int (*congested_fn) (struct dm_target_callbacks *, int); 199 int (*congested_fn) (struct dm_target_callbacks *, int);
200 void (*unplug_fn)(struct dm_target_callbacks *);
201}; 200};
202 201
203int dm_register_target(struct target_type *t); 202int dm_register_target(struct target_type *t);
diff --git a/include/linux/input.h b/include/linux/input.h
index f3a7794a18c4..771d6d85667d 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -167,6 +167,7 @@ struct input_keymap_entry {
167#define SYN_REPORT 0 167#define SYN_REPORT 0
168#define SYN_CONFIG 1 168#define SYN_CONFIG 1
169#define SYN_MT_REPORT 2 169#define SYN_MT_REPORT 2
170#define SYN_DROPPED 3
170 171
171/* 172/*
172 * Keys and buttons 173 * Keys and buttons
@@ -553,8 +554,8 @@ struct input_keymap_entry {
553#define KEY_DVD 0x185 /* Media Select DVD */ 554#define KEY_DVD 0x185 /* Media Select DVD */
554#define KEY_AUX 0x186 555#define KEY_AUX 0x186
555#define KEY_MP3 0x187 556#define KEY_MP3 0x187
556#define KEY_AUDIO 0x188 557#define KEY_AUDIO 0x188 /* AL Audio Browser */
557#define KEY_VIDEO 0x189 558#define KEY_VIDEO 0x189 /* AL Movie Browser */
558#define KEY_DIRECTORY 0x18a 559#define KEY_DIRECTORY 0x18a
559#define KEY_LIST 0x18b 560#define KEY_LIST 0x18b
560#define KEY_MEMO 0x18c /* Media Select Messages */ 561#define KEY_MEMO 0x18c /* Media Select Messages */
@@ -603,8 +604,9 @@ struct input_keymap_entry {
603#define KEY_FRAMEFORWARD 0x1b5 604#define KEY_FRAMEFORWARD 0x1b5
604#define KEY_CONTEXT_MENU 0x1b6 /* GenDesc - system context menu */ 605#define KEY_CONTEXT_MENU 0x1b6 /* GenDesc - system context menu */
605#define KEY_MEDIA_REPEAT 0x1b7 /* Consumer - transport control */ 606#define KEY_MEDIA_REPEAT 0x1b7 /* Consumer - transport control */
606#define KEY_10CHANNELSUP 0x1b8 /* 10 channels up (10+) */ 607#define KEY_10CHANNELSUP 0x1b8 /* 10 channels up (10+) */
607#define KEY_10CHANNELSDOWN 0x1b9 /* 10 channels down (10-) */ 608#define KEY_10CHANNELSDOWN 0x1b9 /* 10 channels down (10-) */
609#define KEY_IMAGES 0x1ba /* AL Image Browser */
608 610
609#define KEY_DEL_EOL 0x1c0 611#define KEY_DEL_EOL 0x1c0
610#define KEY_DEL_EOS 0x1c1 612#define KEY_DEL_EOS 0x1c1
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index b3ac06a4435d..318bb82325a6 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -48,6 +48,12 @@ static inline void input_mt_slot(struct input_dev *dev, int slot)
48 input_event(dev, EV_ABS, ABS_MT_SLOT, slot); 48 input_event(dev, EV_ABS, ABS_MT_SLOT, slot);
49} 49}
50 50
51static inline bool input_is_mt_axis(int axis)
52{
53 return axis == ABS_MT_SLOT ||
54 (axis >= ABS_MT_FIRST && axis <= ABS_MT_LAST);
55}
56
51void input_mt_report_slot_state(struct input_dev *dev, 57void input_mt_report_slot_state(struct input_dev *dev,
52 unsigned int tool_type, bool active); 58 unsigned int tool_type, bool active);
53 59
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 7f675aa81d87..04f32a3eb26b 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -137,8 +137,6 @@ enum {
137 ATA_DFLAG_ACPI_PENDING = (1 << 5), /* ACPI resume action pending */ 137 ATA_DFLAG_ACPI_PENDING = (1 << 5), /* ACPI resume action pending */
138 ATA_DFLAG_ACPI_FAILED = (1 << 6), /* ACPI on devcfg has failed */ 138 ATA_DFLAG_ACPI_FAILED = (1 << 6), /* ACPI on devcfg has failed */
139 ATA_DFLAG_AN = (1 << 7), /* AN configured */ 139 ATA_DFLAG_AN = (1 << 7), /* AN configured */
140 ATA_DFLAG_HIPM = (1 << 8), /* device supports HIPM */
141 ATA_DFLAG_DIPM = (1 << 9), /* device supports DIPM */
142 ATA_DFLAG_DMADIR = (1 << 10), /* device requires DMADIR */ 140 ATA_DFLAG_DMADIR = (1 << 10), /* device requires DMADIR */
143 ATA_DFLAG_CFG_MASK = (1 << 12) - 1, 141 ATA_DFLAG_CFG_MASK = (1 << 12) - 1,
144 142
@@ -198,6 +196,7 @@ enum {
198 * management */ 196 * management */
199 ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity 197 ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity
200 * led */ 198 * led */
199 ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */
201 200
202 /* bits 24:31 of ap->flags are reserved for LLD specific flags */ 201 /* bits 24:31 of ap->flags are reserved for LLD specific flags */
203 202
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
index 5bad17d1acde..31f9d75adc5b 100644
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
@@ -2,6 +2,7 @@
2#define _LINUX_LIST_BL_H 2#define _LINUX_LIST_BL_H
3 3
4#include <linux/list.h> 4#include <linux/list.h>
5#include <linux/bit_spinlock.h>
5 6
6/* 7/*
7 * Special version of lists, where head of the list has a lock in the lowest 8 * Special version of lists, where head of the list has a lock in the lowest
@@ -114,6 +115,16 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n)
114 } 115 }
115} 116}
116 117
118static inline void hlist_bl_lock(struct hlist_bl_head *b)
119{
120 bit_spin_lock(0, (unsigned long *)b);
121}
122
123static inline void hlist_bl_unlock(struct hlist_bl_head *b)
124{
125 __bit_spin_unlock(0, (unsigned long *)b);
126}
127
117/** 128/**
118 * hlist_bl_for_each_entry - iterate over list of given type 129 * hlist_bl_for_each_entry - iterate over list of given type
119 * @tpos: the type * to use as a loop cursor. 130 * @tpos: the type * to use as a loop cursor.
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 31afb7ecbe1f..cdced84261d7 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -117,7 +117,7 @@ extern struct pid *find_vpid(int nr);
117 */ 117 */
118extern struct pid *find_get_pid(int nr); 118extern struct pid *find_get_pid(int nr);
119extern struct pid *find_ge_pid(int nr, struct pid_namespace *); 119extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
120int next_pidmap(struct pid_namespace *pid_ns, int last); 120int next_pidmap(struct pid_namespace *pid_ns, unsigned int last);
121 121
122extern struct pid *alloc_pid(struct pid_namespace *ns); 122extern struct pid *alloc_pid(struct pid_namespace *ns);
123extern void free_pid(struct pid *pid); 123extern void free_pid(struct pid *pid);
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index 369e19d3750b..7f1183dcd119 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -24,6 +24,7 @@
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <linux/poll.h> 25#include <linux/poll.h>
26#include <linux/posix-timers.h> 26#include <linux/posix-timers.h>
27#include <linux/rwsem.h>
27 28
28struct posix_clock; 29struct posix_clock;
29 30
@@ -104,7 +105,7 @@ struct posix_clock_operations {
104 * @ops: Functional interface to the clock 105 * @ops: Functional interface to the clock
105 * @cdev: Character device instance for this clock 106 * @cdev: Character device instance for this clock
106 * @kref: Reference count. 107 * @kref: Reference count.
107 * @mutex: Protects the 'zombie' field from concurrent access. 108 * @rwsem: Protects the 'zombie' field from concurrent access.
108 * @zombie: If 'zombie' is true, then the hardware has disappeared. 109 * @zombie: If 'zombie' is true, then the hardware has disappeared.
109 * @release: A function to free the structure when the reference count reaches 110 * @release: A function to free the structure when the reference count reaches
110 * zero. May be NULL if structure is statically allocated. 111 * zero. May be NULL if structure is statically allocated.
@@ -117,7 +118,7 @@ struct posix_clock {
117 struct posix_clock_operations ops; 118 struct posix_clock_operations ops;
118 struct cdev cdev; 119 struct cdev cdev;
119 struct kref kref; 120 struct kref kref;
120 struct mutex mutex; 121 struct rw_semaphore rwsem;
121 bool zombie; 122 bool zombie;
122 void (*release)(struct posix_clock *clk); 123 void (*release)(struct posix_clock *clk);
123}; 124};
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 2ca7e8a78060..877ece45426f 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -228,6 +228,8 @@ extern int rtc_read_alarm(struct rtc_device *rtc,
228 struct rtc_wkalrm *alrm); 228 struct rtc_wkalrm *alrm);
229extern int rtc_set_alarm(struct rtc_device *rtc, 229extern int rtc_set_alarm(struct rtc_device *rtc,
230 struct rtc_wkalrm *alrm); 230 struct rtc_wkalrm *alrm);
231extern int rtc_initialize_alarm(struct rtc_device *rtc,
232 struct rtc_wkalrm *alrm);
231extern void rtc_update_irq(struct rtc_device *rtc, 233extern void rtc_update_irq(struct rtc_device *rtc,
232 unsigned long num, unsigned long events); 234 unsigned long num, unsigned long events);
233 235
diff --git a/include/linux/security.h b/include/linux/security.h
index ca02f1716736..8ce59ef3e5af 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1456,7 +1456,7 @@ struct security_operations {
1456 struct inode *new_dir, struct dentry *new_dentry); 1456 struct inode *new_dir, struct dentry *new_dentry);
1457 int (*inode_readlink) (struct dentry *dentry); 1457 int (*inode_readlink) (struct dentry *dentry);
1458 int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd); 1458 int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd);
1459 int (*inode_permission) (struct inode *inode, int mask); 1459 int (*inode_permission) (struct inode *inode, int mask, unsigned flags);
1460 int (*inode_setattr) (struct dentry *dentry, struct iattr *attr); 1460 int (*inode_setattr) (struct dentry *dentry, struct iattr *attr);
1461 int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry); 1461 int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry);
1462 int (*inode_setxattr) (struct dentry *dentry, const char *name, 1462 int (*inode_setxattr) (struct dentry *dentry, const char *name,
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 3c7329b8ea0e..0e1855079fbb 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -103,8 +103,8 @@ struct driver_info {
103 * Indicates to usbnet, that USB driver accumulates multiple IP packets. 103 * Indicates to usbnet, that USB driver accumulates multiple IP packets.
104 * Affects statistic (counters) and short packet handling. 104 * Affects statistic (counters) and short packet handling.
105 */ 105 */
106#define FLAG_MULTI_PACKET 0x1000 106#define FLAG_MULTI_PACKET 0x2000
107#define FLAG_RX_ASSEMBLE 0x2000 /* rx packets may span >1 frames */ 107#define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */
108 108
109 /* init device ... can sleep, or cause probe() failure */ 109 /* init device ... can sleep, or cause probe() failure */
110 int (*bind)(struct usbnet *, struct usb_interface *); 110 int (*bind)(struct usbnet *, struct usb_interface *);
diff --git a/include/linux/v4l2-mediabus.h b/include/linux/v4l2-mediabus.h
index 7054a7a8065e..de5c15921025 100644
--- a/include/linux/v4l2-mediabus.h
+++ b/include/linux/v4l2-mediabus.h
@@ -47,7 +47,7 @@ enum v4l2_mbus_pixelcode {
47 V4L2_MBUS_FMT_RGB565_2X8_BE = 0x1007, 47 V4L2_MBUS_FMT_RGB565_2X8_BE = 0x1007,
48 V4L2_MBUS_FMT_RGB565_2X8_LE = 0x1008, 48 V4L2_MBUS_FMT_RGB565_2X8_LE = 0x1008,
49 49
50 /* YUV (including grey) - next is 0x2013 */ 50 /* YUV (including grey) - next is 0x2014 */
51 V4L2_MBUS_FMT_Y8_1X8 = 0x2001, 51 V4L2_MBUS_FMT_Y8_1X8 = 0x2001,
52 V4L2_MBUS_FMT_UYVY8_1_5X8 = 0x2002, 52 V4L2_MBUS_FMT_UYVY8_1_5X8 = 0x2002,
53 V4L2_MBUS_FMT_VYUY8_1_5X8 = 0x2003, 53 V4L2_MBUS_FMT_VYUY8_1_5X8 = 0x2003,
@@ -60,6 +60,7 @@ enum v4l2_mbus_pixelcode {
60 V4L2_MBUS_FMT_Y10_1X10 = 0x200a, 60 V4L2_MBUS_FMT_Y10_1X10 = 0x200a,
61 V4L2_MBUS_FMT_YUYV10_2X10 = 0x200b, 61 V4L2_MBUS_FMT_YUYV10_2X10 = 0x200b,
62 V4L2_MBUS_FMT_YVYU10_2X10 = 0x200c, 62 V4L2_MBUS_FMT_YVYU10_2X10 = 0x200c,
63 V4L2_MBUS_FMT_Y12_1X12 = 0x2013,
63 V4L2_MBUS_FMT_UYVY8_1X16 = 0x200f, 64 V4L2_MBUS_FMT_UYVY8_1X16 = 0x200f,
64 V4L2_MBUS_FMT_VYUY8_1X16 = 0x2010, 65 V4L2_MBUS_FMT_VYUY8_1X16 = 0x2010,
65 V4L2_MBUS_FMT_YUYV8_1X16 = 0x2011, 66 V4L2_MBUS_FMT_YUYV8_1X16 = 0x2011,
@@ -67,9 +68,11 @@ enum v4l2_mbus_pixelcode {
67 V4L2_MBUS_FMT_YUYV10_1X20 = 0x200d, 68 V4L2_MBUS_FMT_YUYV10_1X20 = 0x200d,
68 V4L2_MBUS_FMT_YVYU10_1X20 = 0x200e, 69 V4L2_MBUS_FMT_YVYU10_1X20 = 0x200e,
69 70
70 /* Bayer - next is 0x3013 */ 71 /* Bayer - next is 0x3015 */
71 V4L2_MBUS_FMT_SBGGR8_1X8 = 0x3001, 72 V4L2_MBUS_FMT_SBGGR8_1X8 = 0x3001,
73 V4L2_MBUS_FMT_SGBRG8_1X8 = 0x3013,
72 V4L2_MBUS_FMT_SGRBG8_1X8 = 0x3002, 74 V4L2_MBUS_FMT_SGRBG8_1X8 = 0x3002,
75 V4L2_MBUS_FMT_SRGGB8_1X8 = 0x3014,
73 V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8 = 0x300b, 76 V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8 = 0x300b,
74 V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8 = 0x300c, 77 V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8 = 0x300c,
75 V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8 = 0x3009, 78 V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8 = 0x3009,
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index aa6c393b7ae9..be82c8ead1af 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -308,6 +308,7 @@ struct v4l2_pix_format {
308#define V4L2_PIX_FMT_Y4 v4l2_fourcc('Y', '0', '4', ' ') /* 4 Greyscale */ 308#define V4L2_PIX_FMT_Y4 v4l2_fourcc('Y', '0', '4', ' ') /* 4 Greyscale */
309#define V4L2_PIX_FMT_Y6 v4l2_fourcc('Y', '0', '6', ' ') /* 6 Greyscale */ 309#define V4L2_PIX_FMT_Y6 v4l2_fourcc('Y', '0', '6', ' ') /* 6 Greyscale */
310#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */ 310#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */
311#define V4L2_PIX_FMT_Y12 v4l2_fourcc('Y', '1', '2', ' ') /* 12 Greyscale */
311#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */ 312#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
312 313
313/* Palette formats */ 314/* Palette formats */
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index bd102cf509ac..d61febfb1668 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -163,7 +163,7 @@ v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev);
163({ \ 163({ \
164 struct v4l2_subdev *__sd; \ 164 struct v4l2_subdev *__sd; \
165 __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, cond, o, \ 165 __v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, cond, o, \
166 f, args...); \ 166 f , ##args); \
167}) 167})
168 168
169/* Call the specified callback for all subdevs matching grp_id (if 0, then 169/* Call the specified callback for all subdevs matching grp_id (if 0, then
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index cdf2e8ac4309..d2df55b0c213 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -139,8 +139,6 @@ do { \
139 */ 139 */
140 140
141enum p9_msg_t { 141enum p9_msg_t {
142 P9_TSYNCFS = 0,
143 P9_RSYNCFS,
144 P9_TLERROR = 6, 142 P9_TLERROR = 6,
145 P9_RLERROR, 143 P9_RLERROR,
146 P9_TSTATFS = 8, 144 P9_TSTATFS = 8,
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 85c1413f054d..051a99f79769 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -218,8 +218,8 @@ void p9_client_disconnect(struct p9_client *clnt);
218void p9_client_begin_disconnect(struct p9_client *clnt); 218void p9_client_begin_disconnect(struct p9_client *clnt);
219struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, 219struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
220 char *uname, u32 n_uname, char *aname); 220 char *uname, u32 n_uname, char *aname);
221struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, 221struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
222 int clone); 222 char **wnames, int clone);
223int p9_client_open(struct p9_fid *fid, int mode); 223int p9_client_open(struct p9_fid *fid, int mode);
224int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode, 224int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
225 char *extension); 225 char *extension);
@@ -230,7 +230,6 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode,
230 gid_t gid, struct p9_qid *qid); 230 gid_t gid, struct p9_qid *qid);
231int p9_client_clunk(struct p9_fid *fid); 231int p9_client_clunk(struct p9_fid *fid);
232int p9_client_fsync(struct p9_fid *fid, int datasync); 232int p9_client_fsync(struct p9_fid *fid, int datasync);
233int p9_client_sync_fs(struct p9_fid *fid);
234int p9_client_remove(struct p9_fid *fid); 233int p9_client_remove(struct p9_fid *fid);
235int p9_client_read(struct p9_fid *fid, char *data, char __user *udata, 234int p9_client_read(struct p9_fid *fid, char *data, char __user *udata,
236 u64 offset, u32 count); 235 u64 offset, u32 count);
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 78f18adb49c8..bf366547da25 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -401,9 +401,9 @@ TRACE_EVENT(block_plug,
401 401
402DECLARE_EVENT_CLASS(block_unplug, 402DECLARE_EVENT_CLASS(block_unplug,
403 403
404 TP_PROTO(struct request_queue *q), 404 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
405 405
406 TP_ARGS(q), 406 TP_ARGS(q, depth, explicit),
407 407
408 TP_STRUCT__entry( 408 TP_STRUCT__entry(
409 __field( int, nr_rq ) 409 __field( int, nr_rq )
@@ -411,7 +411,7 @@ DECLARE_EVENT_CLASS(block_unplug,
411 ), 411 ),
412 412
413 TP_fast_assign( 413 TP_fast_assign(
414 __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE]; 414 __entry->nr_rq = depth;
415 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 415 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
416 ), 416 ),
417 417
@@ -419,31 +419,19 @@ DECLARE_EVENT_CLASS(block_unplug,
419); 419);
420 420
421/** 421/**
422 * block_unplug_timer - timed release of operations requests in queue to device driver 422 * block_unplug - release of operations requests in request queue
423 * @q: request queue to unplug
424 *
425 * Unplug the request queue @q because a timer expired and allow block
426 * operation requests to be sent to the device driver.
427 */
428DEFINE_EVENT(block_unplug, block_unplug_timer,
429
430 TP_PROTO(struct request_queue *q),
431
432 TP_ARGS(q)
433);
434
435/**
436 * block_unplug_io - release of operations requests in request queue
437 * @q: request queue to unplug 423 * @q: request queue to unplug
424 * @depth: number of requests just added to the queue
425 * @explicit: whether this was an explicit unplug, or one from schedule()
438 * 426 *
439 * Unplug request queue @q because device driver is scheduled to work 427 * Unplug request queue @q because device driver is scheduled to work
440 * on elements in the request queue. 428 * on elements in the request queue.
441 */ 429 */
442DEFINE_EVENT(block_unplug, block_unplug_io, 430DEFINE_EVENT(block_unplug, block_unplug,
443 431
444 TP_PROTO(struct request_queue *q), 432 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
445 433
446 TP_ARGS(q) 434 TP_ARGS(q, depth, explicit)
447); 435);
448 436
449/** 437/**
diff --git a/init/Kconfig b/init/Kconfig
index a7ad8fbdb564..d886b1e9278e 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -924,14 +924,6 @@ menuconfig EXPERT
924 environments which can tolerate a "non-standard" kernel. 924 environments which can tolerate a "non-standard" kernel.
925 Only use this if you really know what you are doing. 925 Only use this if you really know what you are doing.
926 926
927config EMBEDDED
928 bool "Embedded system"
929 select EXPERT
930 help
931 This option should be enabled if compiling the kernel for
932 an embedded system so certain expert options are available
933 for configuration.
934
935config UID16 927config UID16
936 bool "Enable 16-bit UID system calls" if EXPERT 928 bool "Enable 16-bit UID system calls" if EXPERT
937 depends on ARM || BLACKFIN || CRIS || FRV || H8300 || X86_32 || M68K || (S390 && !64BIT) || SUPERH || SPARC32 || (SPARC64 && COMPAT) || UML || (X86_64 && IA32_EMULATION) 929 depends on ARM || BLACKFIN || CRIS || FRV || H8300 || X86_32 || M68K || (S390 && !64BIT) || SUPERH || SPARC32 || (SPARC64 && COMPAT) || UML || (X86_64 && IA32_EMULATION)
@@ -1104,6 +1096,14 @@ config AIO
1104 by some high performance threaded applications. Disabling 1096 by some high performance threaded applications. Disabling
1105 this option saves about 7k. 1097 this option saves about 7k.
1106 1098
1099config EMBEDDED
1100 bool "Embedded system"
1101 select EXPERT
1102 help
1103 This option should be enabled if compiling the kernel for
1104 an embedded system so certain expert options are available
1105 for configuration.
1106
1107config HAVE_PERF_EVENTS 1107config HAVE_PERF_EVENTS
1108 bool 1108 bool
1109 help 1109 help
diff --git a/kernel/futex.c b/kernel/futex.c
index dfb924ffe65b..fe28dc282eae 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1886,7 +1886,7 @@ retry:
1886 restart->futex.val = val; 1886 restart->futex.val = val;
1887 restart->futex.time = abs_time->tv64; 1887 restart->futex.time = abs_time->tv64;
1888 restart->futex.bitset = bitset; 1888 restart->futex.bitset = bitset;
1889 restart->futex.flags = flags; 1889 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
1890 1890
1891 ret = -ERESTART_RESTARTBLOCK; 1891 ret = -ERESTART_RESTARTBLOCK;
1892 1892
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 55936f9cb251..87b77de03dd3 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -33,6 +33,7 @@
33#include <linux/vmalloc.h> 33#include <linux/vmalloc.h>
34#include <linux/swap.h> 34#include <linux/swap.h>
35#include <linux/kmsg_dump.h> 35#include <linux/kmsg_dump.h>
36#include <linux/syscore_ops.h>
36 37
37#include <asm/page.h> 38#include <asm/page.h>
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
@@ -1532,6 +1533,11 @@ int kernel_kexec(void)
1532 local_irq_disable(); 1533 local_irq_disable();
1533 /* Suspend system devices */ 1534 /* Suspend system devices */
1534 error = sysdev_suspend(PMSG_FREEZE); 1535 error = sysdev_suspend(PMSG_FREEZE);
1536 if (!error) {
1537 error = syscore_suspend();
1538 if (error)
1539 sysdev_resume();
1540 }
1535 if (error) 1541 if (error)
1536 goto Enable_irqs; 1542 goto Enable_irqs;
1537 } else 1543 } else
@@ -1546,6 +1552,7 @@ int kernel_kexec(void)
1546 1552
1547#ifdef CONFIG_KEXEC_JUMP 1553#ifdef CONFIG_KEXEC_JUMP
1548 if (kexec_image->preserve_context) { 1554 if (kexec_image->preserve_context) {
1555 syscore_resume();
1549 sysdev_resume(); 1556 sysdev_resume();
1550 Enable_irqs: 1557 Enable_irqs:
1551 local_irq_enable(); 1558 local_irq_enable();
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 27960f114efd..8e81a9860a0d 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -364,6 +364,7 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
364 } 364 }
365 365
366 if (mode & PERF_CGROUP_SWIN) { 366 if (mode & PERF_CGROUP_SWIN) {
367 WARN_ON_ONCE(cpuctx->cgrp);
367 /* set cgrp before ctxsw in to 368 /* set cgrp before ctxsw in to
368 * allow event_filter_match() to not 369 * allow event_filter_match() to not
369 * have to pass task around 370 * have to pass task around
@@ -2423,6 +2424,14 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2423 if (!ctx || !ctx->nr_events) 2424 if (!ctx || !ctx->nr_events)
2424 goto out; 2425 goto out;
2425 2426
2427 /*
2428 * We must ctxsw out cgroup events to avoid conflict
2429 * when invoking perf_task_event_sched_in() later on
2430 * in this function. Otherwise we end up trying to
2431 * ctxswin cgroup events which are already scheduled
2432 * in.
2433 */
2434 perf_cgroup_sched_out(current);
2426 task_ctx_sched_out(ctx, EVENT_ALL); 2435 task_ctx_sched_out(ctx, EVENT_ALL);
2427 2436
2428 raw_spin_lock(&ctx->lock); 2437 raw_spin_lock(&ctx->lock);
@@ -2447,6 +2456,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2447 2456
2448 raw_spin_unlock(&ctx->lock); 2457 raw_spin_unlock(&ctx->lock);
2449 2458
2459 /*
2460 * Also calls ctxswin for cgroup events, if any:
2461 */
2450 perf_event_context_sched_in(ctx, ctx->task); 2462 perf_event_context_sched_in(ctx, ctx->task);
2451out: 2463out:
2452 local_irq_restore(flags); 2464 local_irq_restore(flags);
diff --git a/kernel/pid.c b/kernel/pid.c
index 02f221274265..57a8346a270e 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -217,11 +217,14 @@ static int alloc_pidmap(struct pid_namespace *pid_ns)
217 return -1; 217 return -1;
218} 218}
219 219
220int next_pidmap(struct pid_namespace *pid_ns, int last) 220int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
221{ 221{
222 int offset; 222 int offset;
223 struct pidmap *map, *end; 223 struct pidmap *map, *end;
224 224
225 if (last >= PID_MAX_LIMIT)
226 return -1;
227
225 offset = (last + 1) & BITS_PER_PAGE_MASK; 228 offset = (last + 1) & BITS_PER_PAGE_MASK;
226 map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE]; 229 map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
227 end = &pid_ns->pidmap[PIDMAP_ENTRIES]; 230 end = &pid_ns->pidmap[PIDMAP_ENTRIES];
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index aeabd26e3342..50aae660174d 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -273,8 +273,11 @@ static int create_image(int platform_mode)
273 local_irq_disable(); 273 local_irq_disable();
274 274
275 error = sysdev_suspend(PMSG_FREEZE); 275 error = sysdev_suspend(PMSG_FREEZE);
276 if (!error) 276 if (!error) {
277 error = syscore_suspend(); 277 error = syscore_suspend();
278 if (error)
279 sysdev_resume();
280 }
278 if (error) { 281 if (error) {
279 printk(KERN_ERR "PM: Some system devices failed to power down, " 282 printk(KERN_ERR "PM: Some system devices failed to power down, "
280 "aborting hibernation\n"); 283 "aborting hibernation\n");
@@ -407,8 +410,11 @@ static int resume_target_kernel(bool platform_mode)
407 local_irq_disable(); 410 local_irq_disable();
408 411
409 error = sysdev_suspend(PMSG_QUIESCE); 412 error = sysdev_suspend(PMSG_QUIESCE);
410 if (!error) 413 if (!error) {
411 error = syscore_suspend(); 414 error = syscore_suspend();
415 if (error)
416 sysdev_resume();
417 }
412 if (error) 418 if (error)
413 goto Enable_irqs; 419 goto Enable_irqs;
414 420
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 2814c32aed51..8935369d503a 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -164,8 +164,11 @@ static int suspend_enter(suspend_state_t state)
164 BUG_ON(!irqs_disabled()); 164 BUG_ON(!irqs_disabled());
165 165
166 error = sysdev_suspend(PMSG_SUSPEND); 166 error = sysdev_suspend(PMSG_SUSPEND);
167 if (!error) 167 if (!error) {
168 error = syscore_suspend(); 168 error = syscore_suspend();
169 if (error)
170 sysdev_resume();
171 }
169 if (!error) { 172 if (!error) {
170 if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) { 173 if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) {
171 error = suspend_ops->enter(state); 174 error = suspend_ops->enter(state);
diff --git a/kernel/sched.c b/kernel/sched.c
index a187c3fe027b..312f8b95c2d4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4118,7 +4118,7 @@ need_resched:
4118 */ 4118 */
4119 if (blk_needs_flush_plug(prev)) { 4119 if (blk_needs_flush_plug(prev)) {
4120 raw_spin_unlock(&rq->lock); 4120 raw_spin_unlock(&rq->lock);
4121 blk_flush_plug(prev); 4121 blk_schedule_flush_plug(prev);
4122 raw_spin_lock(&rq->lock); 4122 raw_spin_lock(&rq->lock);
4123 } 4123 }
4124 } 4124 }
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 7f00772e57c9..6fa833ab2cb8 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -2104,21 +2104,20 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2104 enum cpu_idle_type idle, int *all_pinned, 2104 enum cpu_idle_type idle, int *all_pinned,
2105 int *this_best_prio, struct cfs_rq *busiest_cfs_rq) 2105 int *this_best_prio, struct cfs_rq *busiest_cfs_rq)
2106{ 2106{
2107 int loops = 0, pulled = 0, pinned = 0; 2107 int loops = 0, pulled = 0;
2108 long rem_load_move = max_load_move; 2108 long rem_load_move = max_load_move;
2109 struct task_struct *p, *n; 2109 struct task_struct *p, *n;
2110 2110
2111 if (max_load_move == 0) 2111 if (max_load_move == 0)
2112 goto out; 2112 goto out;
2113 2113
2114 pinned = 1;
2115
2116 list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { 2114 list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
2117 if (loops++ > sysctl_sched_nr_migrate) 2115 if (loops++ > sysctl_sched_nr_migrate)
2118 break; 2116 break;
2119 2117
2120 if ((p->se.load.weight >> 1) > rem_load_move || 2118 if ((p->se.load.weight >> 1) > rem_load_move ||
2121 !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) 2119 !can_migrate_task(p, busiest, this_cpu, sd, idle,
2120 all_pinned))
2122 continue; 2121 continue;
2123 2122
2124 pull_task(busiest, p, this_rq, this_cpu); 2123 pull_task(busiest, p, this_rq, this_cpu);
@@ -2153,9 +2152,6 @@ out:
2153 */ 2152 */
2154 schedstat_add(sd, lb_gained[idle], pulled); 2153 schedstat_add(sd, lb_gained[idle], pulled);
2155 2154
2156 if (all_pinned)
2157 *all_pinned = pinned;
2158
2159 return max_load_move - rem_load_move; 2155 return max_load_move - rem_load_move;
2160} 2156}
2161 2157
@@ -3127,6 +3123,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3127 if (!sds.busiest || sds.busiest_nr_running == 0) 3123 if (!sds.busiest || sds.busiest_nr_running == 0)
3128 goto out_balanced; 3124 goto out_balanced;
3129 3125
3126 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
3127
3130 /* 3128 /*
3131 * If the busiest group is imbalanced the below checks don't 3129 * If the busiest group is imbalanced the below checks don't
3132 * work because they assumes all things are equal, which typically 3130 * work because they assumes all things are equal, which typically
@@ -3151,7 +3149,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3151 * Don't pull any tasks if this group is already above the domain 3149 * Don't pull any tasks if this group is already above the domain
3152 * average load. 3150 * average load.
3153 */ 3151 */
3154 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
3155 if (sds.this_load >= sds.avg_load) 3152 if (sds.this_load >= sds.avg_load)
3156 goto out_balanced; 3153 goto out_balanced;
3157 3154
@@ -3340,6 +3337,7 @@ redo:
3340 * still unbalanced. ld_moved simply stays zero, so it is 3337 * still unbalanced. ld_moved simply stays zero, so it is
3341 * correctly treated as an imbalance. 3338 * correctly treated as an imbalance.
3342 */ 3339 */
3340 all_pinned = 1;
3343 local_irq_save(flags); 3341 local_irq_save(flags);
3344 double_rq_lock(this_rq, busiest); 3342 double_rq_lock(this_rq, busiest);
3345 ld_moved = move_tasks(this_rq, this_cpu, busiest, 3343 ld_moved = move_tasks(this_rq, this_cpu, busiest,
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
index 25028dd4fa18..c340ca658f37 100644
--- a/kernel/time/posix-clock.c
+++ b/kernel/time/posix-clock.c
@@ -19,7 +19,6 @@
19 */ 19 */
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/file.h> 21#include <linux/file.h>
22#include <linux/mutex.h>
23#include <linux/posix-clock.h> 22#include <linux/posix-clock.h>
24#include <linux/slab.h> 23#include <linux/slab.h>
25#include <linux/syscalls.h> 24#include <linux/syscalls.h>
@@ -34,19 +33,19 @@ static struct posix_clock *get_posix_clock(struct file *fp)
34{ 33{
35 struct posix_clock *clk = fp->private_data; 34 struct posix_clock *clk = fp->private_data;
36 35
37 mutex_lock(&clk->mutex); 36 down_read(&clk->rwsem);
38 37
39 if (!clk->zombie) 38 if (!clk->zombie)
40 return clk; 39 return clk;
41 40
42 mutex_unlock(&clk->mutex); 41 up_read(&clk->rwsem);
43 42
44 return NULL; 43 return NULL;
45} 44}
46 45
47static void put_posix_clock(struct posix_clock *clk) 46static void put_posix_clock(struct posix_clock *clk)
48{ 47{
49 mutex_unlock(&clk->mutex); 48 up_read(&clk->rwsem);
50} 49}
51 50
52static ssize_t posix_clock_read(struct file *fp, char __user *buf, 51static ssize_t posix_clock_read(struct file *fp, char __user *buf,
@@ -156,7 +155,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp)
156 struct posix_clock *clk = 155 struct posix_clock *clk =
157 container_of(inode->i_cdev, struct posix_clock, cdev); 156 container_of(inode->i_cdev, struct posix_clock, cdev);
158 157
159 mutex_lock(&clk->mutex); 158 down_read(&clk->rwsem);
160 159
161 if (clk->zombie) { 160 if (clk->zombie) {
162 err = -ENODEV; 161 err = -ENODEV;
@@ -172,7 +171,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp)
172 fp->private_data = clk; 171 fp->private_data = clk;
173 } 172 }
174out: 173out:
175 mutex_unlock(&clk->mutex); 174 up_read(&clk->rwsem);
176 return err; 175 return err;
177} 176}
178 177
@@ -211,25 +210,20 @@ int posix_clock_register(struct posix_clock *clk, dev_t devid)
211 int err; 210 int err;
212 211
213 kref_init(&clk->kref); 212 kref_init(&clk->kref);
214 mutex_init(&clk->mutex); 213 init_rwsem(&clk->rwsem);
215 214
216 cdev_init(&clk->cdev, &posix_clock_file_operations); 215 cdev_init(&clk->cdev, &posix_clock_file_operations);
217 clk->cdev.owner = clk->ops.owner; 216 clk->cdev.owner = clk->ops.owner;
218 err = cdev_add(&clk->cdev, devid, 1); 217 err = cdev_add(&clk->cdev, devid, 1);
219 if (err)
220 goto no_cdev;
221 218
222 return err; 219 return err;
223no_cdev:
224 mutex_destroy(&clk->mutex);
225 return err;
226} 220}
227EXPORT_SYMBOL_GPL(posix_clock_register); 221EXPORT_SYMBOL_GPL(posix_clock_register);
228 222
229static void delete_clock(struct kref *kref) 223static void delete_clock(struct kref *kref)
230{ 224{
231 struct posix_clock *clk = container_of(kref, struct posix_clock, kref); 225 struct posix_clock *clk = container_of(kref, struct posix_clock, kref);
232 mutex_destroy(&clk->mutex); 226
233 if (clk->release) 227 if (clk->release)
234 clk->release(clk); 228 clk->release(clk);
235} 229}
@@ -238,9 +232,9 @@ void posix_clock_unregister(struct posix_clock *clk)
238{ 232{
239 cdev_del(&clk->cdev); 233 cdev_del(&clk->cdev);
240 234
241 mutex_lock(&clk->mutex); 235 down_write(&clk->rwsem);
242 clk->zombie = true; 236 clk->zombie = true;
243 mutex_unlock(&clk->mutex); 237 up_write(&clk->rwsem);
244 238
245 kref_put(&clk->kref, delete_clock); 239 kref_put(&clk->kref, delete_clock);
246} 240}
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 7aa40f8e182d..6957aa298dfa 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -850,29 +850,21 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
850 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); 850 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
851} 851}
852 852
853static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q) 853static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
854 unsigned int depth, bool explicit)
854{ 855{
855 struct blk_trace *bt = q->blk_trace; 856 struct blk_trace *bt = q->blk_trace;
856 857
857 if (bt) { 858 if (bt) {
858 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; 859 __be64 rpdu = cpu_to_be64(depth);
859 __be64 rpdu = cpu_to_be64(pdu); 860 u32 what;
860 861
861 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0, 862 if (explicit)
862 sizeof(rpdu), &rpdu); 863 what = BLK_TA_UNPLUG_IO;
863 } 864 else
864} 865 what = BLK_TA_UNPLUG_TIMER;
865
866static void blk_add_trace_unplug_timer(void *ignore, struct request_queue *q)
867{
868 struct blk_trace *bt = q->blk_trace;
869
870 if (bt) {
871 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
872 __be64 rpdu = cpu_to_be64(pdu);
873 866
874 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0, 867 __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
875 sizeof(rpdu), &rpdu);
876 } 868 }
877} 869}
878 870
@@ -1015,9 +1007,7 @@ static void blk_register_tracepoints(void)
1015 WARN_ON(ret); 1007 WARN_ON(ret);
1016 ret = register_trace_block_plug(blk_add_trace_plug, NULL); 1008 ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1017 WARN_ON(ret); 1009 WARN_ON(ret);
1018 ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL); 1010 ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1019 WARN_ON(ret);
1020 ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
1021 WARN_ON(ret); 1011 WARN_ON(ret);
1022 ret = register_trace_block_split(blk_add_trace_split, NULL); 1012 ret = register_trace_block_split(blk_add_trace_split, NULL);
1023 WARN_ON(ret); 1013 WARN_ON(ret);
@@ -1032,8 +1022,7 @@ static void blk_unregister_tracepoints(void)
1032 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); 1022 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1033 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); 1023 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1034 unregister_trace_block_split(blk_add_trace_split, NULL); 1024 unregister_trace_block_split(blk_add_trace_split, NULL);
1035 unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); 1025 unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1036 unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
1037 unregister_trace_block_plug(blk_add_trace_plug, NULL); 1026 unregister_trace_block_plug(blk_add_trace_plug, NULL);
1038 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); 1027 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1039 unregister_trace_block_getrq(blk_add_trace_getrq, NULL); 1028 unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
diff --git a/net/9p/client.c b/net/9p/client.c
index 48b8e084e710..77367745be9b 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -929,15 +929,15 @@ error:
929} 929}
930EXPORT_SYMBOL(p9_client_attach); 930EXPORT_SYMBOL(p9_client_attach);
931 931
932struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, 932struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
933 int clone) 933 char **wnames, int clone)
934{ 934{
935 int err; 935 int err;
936 struct p9_client *clnt; 936 struct p9_client *clnt;
937 struct p9_fid *fid; 937 struct p9_fid *fid;
938 struct p9_qid *wqids; 938 struct p9_qid *wqids;
939 struct p9_req_t *req; 939 struct p9_req_t *req;
940 int16_t nwqids, count; 940 uint16_t nwqids, count;
941 941
942 err = 0; 942 err = 0;
943 wqids = NULL; 943 wqids = NULL;
@@ -955,7 +955,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
955 fid = oldfid; 955 fid = oldfid;
956 956
957 957
958 P9_DPRINTK(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %d wname[0] %s\n", 958 P9_DPRINTK(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %ud wname[0] %s\n",
959 oldfid->fid, fid->fid, nwname, wnames ? wnames[0] : NULL); 959 oldfid->fid, fid->fid, nwname, wnames ? wnames[0] : NULL);
960 960
961 req = p9_client_rpc(clnt, P9_TWALK, "ddT", oldfid->fid, fid->fid, 961 req = p9_client_rpc(clnt, P9_TWALK, "ddT", oldfid->fid, fid->fid,
@@ -1220,27 +1220,6 @@ error:
1220} 1220}
1221EXPORT_SYMBOL(p9_client_fsync); 1221EXPORT_SYMBOL(p9_client_fsync);
1222 1222
1223int p9_client_sync_fs(struct p9_fid *fid)
1224{
1225 int err = 0;
1226 struct p9_req_t *req;
1227 struct p9_client *clnt;
1228
1229 P9_DPRINTK(P9_DEBUG_9P, ">>> TSYNC_FS fid %d\n", fid->fid);
1230
1231 clnt = fid->clnt;
1232 req = p9_client_rpc(clnt, P9_TSYNCFS, "d", fid->fid);
1233 if (IS_ERR(req)) {
1234 err = PTR_ERR(req);
1235 goto error;
1236 }
1237 P9_DPRINTK(P9_DEBUG_9P, "<<< RSYNCFS fid %d\n", fid->fid);
1238 p9_free_req(clnt, req);
1239error:
1240 return err;
1241}
1242EXPORT_SYMBOL(p9_client_sync_fs);
1243
1244int p9_client_clunk(struct p9_fid *fid) 1223int p9_client_clunk(struct p9_fid *fid)
1245{ 1224{
1246 int err; 1225 int err;
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 8a4084fa8b5a..b58a501cf3d1 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -265,7 +265,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
265 } 265 }
266 break; 266 break;
267 case 'T':{ 267 case 'T':{
268 int16_t *nwname = va_arg(ap, int16_t *); 268 uint16_t *nwname = va_arg(ap, uint16_t *);
269 char ***wnames = va_arg(ap, char ***); 269 char ***wnames = va_arg(ap, char ***);
270 270
271 errcode = p9pdu_readf(pdu, proto_version, 271 errcode = p9pdu_readf(pdu, proto_version,
@@ -468,7 +468,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
468 case 'E':{ 468 case 'E':{
469 int32_t cnt = va_arg(ap, int32_t); 469 int32_t cnt = va_arg(ap, int32_t);
470 const char *k = va_arg(ap, const void *); 470 const char *k = va_arg(ap, const void *);
471 const char *u = va_arg(ap, const void *); 471 const char __user *u = va_arg(ap,
472 const void __user *);
472 errcode = p9pdu_writef(pdu, proto_version, "d", 473 errcode = p9pdu_writef(pdu, proto_version, "d",
473 cnt); 474 cnt);
474 if (!errcode && pdu_write_urw(pdu, k, u, cnt)) 475 if (!errcode && pdu_write_urw(pdu, k, u, cnt))
@@ -495,7 +496,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
495 } 496 }
496 break; 497 break;
497 case 'T':{ 498 case 'T':{
498 int16_t nwname = va_arg(ap, int); 499 uint16_t nwname = va_arg(ap, int);
499 const char **wnames = va_arg(ap, const char **); 500 const char **wnames = va_arg(ap, const char **);
500 501
501 errcode = p9pdu_writef(pdu, proto_version, "w", 502 errcode = p9pdu_writef(pdu, proto_version, "w",
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
index d47880e971dd..e883172f9aa2 100644
--- a/net/9p/trans_common.c
+++ b/net/9p/trans_common.c
@@ -66,7 +66,7 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len,
66 uint32_t pdata_mapped_pages; 66 uint32_t pdata_mapped_pages;
67 struct trans_rpage_info *rpinfo; 67 struct trans_rpage_info *rpinfo;
68 68
69 *pdata_off = (size_t)req->tc->pubuf & (PAGE_SIZE-1); 69 *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1);
70 70
71 if (*pdata_off) 71 if (*pdata_off)
72 first_page_bytes = min(((size_t)PAGE_SIZE - *pdata_off), 72 first_page_bytes = min(((size_t)PAGE_SIZE - *pdata_off),
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index e8f046b07182..244e70742183 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -326,8 +326,11 @@ req_retry_pinned:
326 outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM, 326 outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
327 pdata_off, rpinfo->rp_data, pdata_len); 327 pdata_off, rpinfo->rp_data, pdata_len);
328 } else { 328 } else {
329 char *pbuf = req->tc->pubuf ? req->tc->pubuf : 329 char *pbuf;
330 req->tc->pkbuf; 330 if (req->tc->pubuf)
331 pbuf = (__force char *) req->tc->pubuf;
332 else
333 pbuf = req->tc->pkbuf;
331 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf, 334 outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
332 req->tc->pbuf_size); 335 req->tc->pbuf_size);
333 } 336 }
@@ -352,8 +355,12 @@ req_retry_pinned:
352 in = pack_sg_list_p(chan->sg, out+inp, VIRTQUEUE_NUM, 355 in = pack_sg_list_p(chan->sg, out+inp, VIRTQUEUE_NUM,
353 pdata_off, rpinfo->rp_data, pdata_len); 356 pdata_off, rpinfo->rp_data, pdata_len);
354 } else { 357 } else {
355 char *pbuf = req->tc->pubuf ? req->tc->pubuf : 358 char *pbuf;
356 req->tc->pkbuf; 359 if (req->tc->pubuf)
360 pbuf = (__force char *) req->tc->pubuf;
361 else
362 pbuf = req->tc->pkbuf;
363
357 in = pack_sg_list(chan->sg, out+inp, VIRTQUEUE_NUM, 364 in = pack_sg_list(chan->sg, out+inp, VIRTQUEUE_NUM,
358 pbuf, req->tc->pbuf_size); 365 pbuf, req->tc->pbuf_size);
359 } 366 }
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 008ff6c4eecf..f3bc322c5891 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -249,11 +249,9 @@ static int br_parse_ip_options(struct sk_buff *skb)
249 goto drop; 249 goto drop;
250 } 250 }
251 251
252 /* Zero out the CB buffer if no options present */ 252 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
253 if (iph->ihl == 5) { 253 if (iph->ihl == 5)
254 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
255 return 0; 254 return 0;
256 }
257 255
258 opt->optlen = iph->ihl*4 - sizeof(struct iphdr); 256 opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
259 if (ip_options_compile(dev_net(dev), opt, skb)) 257 if (ip_options_compile(dev_net(dev), opt, skb))
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index 27dab26ad3b8..054fdb5aeb88 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -13,6 +13,7 @@
13#include <net/caif/cfsrvl.h> 13#include <net/caif/cfsrvl.h>
14#include <net/caif/cfpkt.h> 14#include <net/caif/cfpkt.h>
15 15
16
16#define container_obj(layr) ((struct cfsrvl *) layr) 17#define container_obj(layr) ((struct cfsrvl *) layr)
17 18
18#define DGM_CMD_BIT 0x80 19#define DGM_CMD_BIT 0x80
@@ -83,6 +84,7 @@ static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt)
83 84
84static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt) 85static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
85{ 86{
87 u8 packet_type;
86 u32 zero = 0; 88 u32 zero = 0;
87 struct caif_payload_info *info; 89 struct caif_payload_info *info;
88 struct cfsrvl *service = container_obj(layr); 90 struct cfsrvl *service = container_obj(layr);
@@ -94,7 +96,9 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
94 if (cfpkt_getlen(pkt) > DGM_MTU) 96 if (cfpkt_getlen(pkt) > DGM_MTU)
95 return -EMSGSIZE; 97 return -EMSGSIZE;
96 98
97 cfpkt_add_head(pkt, &zero, 4); 99 cfpkt_add_head(pkt, &zero, 3);
100 packet_type = 0x08; /* B9 set - UNCLASSIFIED */
101 cfpkt_add_head(pkt, &packet_type, 1);
98 102
99 /* Add info for MUX-layer to route the packet out. */ 103 /* Add info for MUX-layer to route the packet out. */
100 info = cfpkt_info(pkt); 104 info = cfpkt_info(pkt);
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 46f34b2e0478..24f1ffa74b06 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -244,9 +244,9 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
244 int phyid) 244 int phyid)
245{ 245{
246 struct cfmuxl *muxl = container_obj(layr); 246 struct cfmuxl *muxl = container_obj(layr);
247 struct list_head *node; 247 struct list_head *node, *next;
248 struct cflayer *layer; 248 struct cflayer *layer;
249 list_for_each(node, &muxl->srvl_list) { 249 list_for_each_safe(node, next, &muxl->srvl_list) {
250 layer = list_entry(node, struct cflayer, node); 250 layer = list_entry(node, struct cflayer, node);
251 if (cfsrvl_phyid_match(layer, phyid)) 251 if (cfsrvl_phyid_match(layer, phyid))
252 layer->ctrlcmd(layer, ctrl, phyid); 252 layer->ctrlcmd(layer, ctrl, phyid);
diff --git a/net/core/dev.c b/net/core/dev.c
index 956d3b006e8b..c2ac599fa0f6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5203,11 +5203,15 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
5203 } 5203 }
5204 5204
5205 /* TSO requires that SG is present as well. */ 5205 /* TSO requires that SG is present as well. */
5206 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) { 5206 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
5207 netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n"); 5207 netdev_info(dev, "Dropping TSO features since no SG feature.\n");
5208 features &= ~NETIF_F_TSO; 5208 features &= ~NETIF_F_ALL_TSO;
5209 } 5209 }
5210 5210
5211 /* TSO ECN requires that TSO is present as well. */
5212 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5213 features &= ~NETIF_F_TSO_ECN;
5214
5211 /* Software GSO depends on SG. */ 5215 /* Software GSO depends on SG. */
5212 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 5216 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5213 netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 5217 netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index ce2d33582859..5761185f884e 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -1,5 +1,3 @@
1obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o 1obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
2ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o 2ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
3af_802154-y := af_ieee802154.o raw.o dgram.o 3af_802154-y := af_ieee802154.o raw.o dgram.o
4
5ccflags-y += -Wall -DDEBUG
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 6c0b7f4a3d7d..38f23e721b80 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -73,7 +73,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
73 !sk2->sk_bound_dev_if || 73 !sk2->sk_bound_dev_if ||
74 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { 74 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
75 if (!reuse || !sk2->sk_reuse || 75 if (!reuse || !sk2->sk_reuse ||
76 ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) { 76 sk2->sk_state == TCP_LISTEN) {
77 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); 77 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
78 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || 78 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
79 sk2_rcv_saddr == sk_rcv_saddr(sk)) 79 sk2_rcv_saddr == sk_rcv_saddr(sk))
@@ -122,8 +122,7 @@ again:
122 (tb->num_owners < smallest_size || smallest_size == -1)) { 122 (tb->num_owners < smallest_size || smallest_size == -1)) {
123 smallest_size = tb->num_owners; 123 smallest_size = tb->num_owners;
124 smallest_rover = rover; 124 smallest_rover = rover;
125 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 && 125 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
126 !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
127 spin_unlock(&head->lock); 126 spin_unlock(&head->lock);
128 snum = smallest_rover; 127 snum = smallest_rover;
129 goto have_snum; 128 goto have_snum;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index dd1b20eca1a2..9df4e635fb5f 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -354,7 +354,8 @@ static void inetpeer_free_rcu(struct rcu_head *head)
354} 354}
355 355
356/* May be called with local BH enabled. */ 356/* May be called with local BH enabled. */
357static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base) 357static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
358 struct inet_peer __rcu **stack[PEER_MAXDEPTH])
358{ 359{
359 int do_free; 360 int do_free;
360 361
@@ -368,7 +369,6 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
368 * We use refcnt=-1 to alert lockless readers this entry is deleted. 369 * We use refcnt=-1 to alert lockless readers this entry is deleted.
369 */ 370 */
370 if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { 371 if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
371 struct inet_peer __rcu **stack[PEER_MAXDEPTH];
372 struct inet_peer __rcu ***stackptr, ***delp; 372 struct inet_peer __rcu ***stackptr, ***delp;
373 if (lookup(&p->daddr, stack, base) != p) 373 if (lookup(&p->daddr, stack, base) != p)
374 BUG(); 374 BUG();
@@ -422,7 +422,7 @@ static struct inet_peer_base *peer_to_base(struct inet_peer *p)
422} 422}
423 423
424/* May be called with local BH enabled. */ 424/* May be called with local BH enabled. */
425static int cleanup_once(unsigned long ttl) 425static int cleanup_once(unsigned long ttl, struct inet_peer __rcu **stack[PEER_MAXDEPTH])
426{ 426{
427 struct inet_peer *p = NULL; 427 struct inet_peer *p = NULL;
428 428
@@ -454,7 +454,7 @@ static int cleanup_once(unsigned long ttl)
454 * happen because of entry limits in route cache. */ 454 * happen because of entry limits in route cache. */
455 return -1; 455 return -1;
456 456
457 unlink_from_pool(p, peer_to_base(p)); 457 unlink_from_pool(p, peer_to_base(p), stack);
458 return 0; 458 return 0;
459} 459}
460 460
@@ -524,7 +524,7 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
524 524
525 if (base->total >= inet_peer_threshold) 525 if (base->total >= inet_peer_threshold)
526 /* Remove one less-recently-used entry. */ 526 /* Remove one less-recently-used entry. */
527 cleanup_once(0); 527 cleanup_once(0, stack);
528 528
529 return p; 529 return p;
530} 530}
@@ -540,6 +540,7 @@ static void peer_check_expire(unsigned long dummy)
540{ 540{
541 unsigned long now = jiffies; 541 unsigned long now = jiffies;
542 int ttl, total; 542 int ttl, total;
543 struct inet_peer __rcu **stack[PEER_MAXDEPTH];
543 544
544 total = compute_total(); 545 total = compute_total();
545 if (total >= inet_peer_threshold) 546 if (total >= inet_peer_threshold)
@@ -548,7 +549,7 @@ static void peer_check_expire(unsigned long dummy)
548 ttl = inet_peer_maxttl 549 ttl = inet_peer_maxttl
549 - (inet_peer_maxttl - inet_peer_minttl) / HZ * 550 - (inet_peer_maxttl - inet_peer_minttl) / HZ *
550 total / inet_peer_threshold * HZ; 551 total / inet_peer_threshold * HZ;
551 while (!cleanup_once(ttl)) { 552 while (!cleanup_once(ttl, stack)) {
552 if (jiffies != now) 553 if (jiffies != now)
553 break; 554 break;
554 } 555 }
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 28a736f3442f..2391b24e8251 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -329,7 +329,7 @@ int ip_options_compile(struct net *net,
329 pp_ptr = optptr + 2; 329 pp_ptr = optptr + 2;
330 goto error; 330 goto error;
331 } 331 }
332 if (skb) { 332 if (rt) {
333 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); 333 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
334 opt->is_changed = 1; 334 opt->is_changed = 1;
335 } 335 }
@@ -371,7 +371,7 @@ int ip_options_compile(struct net *net,
371 goto error; 371 goto error;
372 } 372 }
373 opt->ts = optptr - iph; 373 opt->ts = optptr - iph;
374 if (skb) { 374 if (rt) {
375 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); 375 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
376 timeptr = (__be32*)&optptr[optptr[2]+3]; 376 timeptr = (__be32*)&optptr[optptr[2]+3];
377 } 377 }
@@ -603,7 +603,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
603 unsigned long orefdst; 603 unsigned long orefdst;
604 int err; 604 int err;
605 605
606 if (!opt->srr) 606 if (!opt->srr || !rt)
607 return 0; 607 return 0;
608 608
609 if (skb->pkt_type != PACKET_HOST) 609 if (skb->pkt_type != PACKET_HOST)
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 1a456652086b..321e6e84dbcc 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -311,7 +311,6 @@ static struct ctl_table ipv4_table[] = {
311 .mode = 0644, 311 .mode = 0644,
312 .proc_handler = proc_do_large_bitmap, 312 .proc_handler = proc_do_large_bitmap,
313 }, 313 },
314#ifdef CONFIG_IP_MULTICAST
315 { 314 {
316 .procname = "igmp_max_memberships", 315 .procname = "igmp_max_memberships",
317 .data = &sysctl_igmp_max_memberships, 316 .data = &sysctl_igmp_max_memberships,
@@ -319,8 +318,6 @@ static struct ctl_table ipv4_table[] = {
319 .mode = 0644, 318 .mode = 0644,
320 .proc_handler = proc_dointvec 319 .proc_handler = proc_dointvec
321 }, 320 },
322
323#endif
324 { 321 {
325 .procname = "igmp_max_msf", 322 .procname = "igmp_max_msf",
326 .data = &sysctl_igmp_max_msf, 323 .data = &sysctl_igmp_max_msf,
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 166054650466..f2c5b0fc0f21 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -44,7 +44,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
44 !sk2->sk_bound_dev_if || 44 !sk2->sk_bound_dev_if ||
45 sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && 45 sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
46 (!sk->sk_reuse || !sk2->sk_reuse || 46 (!sk->sk_reuse || !sk2->sk_reuse ||
47 ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) && 47 sk2->sk_state == TCP_LISTEN) &&
48 ipv6_rcv_saddr_equal(sk, sk2)) 48 ipv6_rcv_saddr_equal(sk, sk2))
49 break; 49 break;
50 } 50 }
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index c9890e25cd4c..cc616974a447 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1297,8 +1297,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
1297 /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ 1297 /* Note : socket.c set MSG_EOR on SEQPACKET sockets */
1298 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT | 1298 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT |
1299 MSG_NOSIGNAL)) { 1299 MSG_NOSIGNAL)) {
1300 err = -EINVAL; 1300 return -EINVAL;
1301 goto out;
1302 } 1301 }
1303 1302
1304 lock_sock(sk); 1303 lock_sock(sk);
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index fce9bd3bd3fe..5c04f3e42704 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -667,7 +667,7 @@ MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
667MODULE_DESCRIPTION("L2TP over IP"); 667MODULE_DESCRIPTION("L2TP over IP");
668MODULE_VERSION("1.0"); 668MODULE_VERSION("1.0");
669 669
670/* Use the value of SOCK_DGRAM (2) directory, because __stringify does't like 670/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
671 * enums 671 * enums
672 */ 672 */
673MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP); 673MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index 058f1e9a9128..903242111317 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -121,8 +121,7 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
121 s32 data_size = ntohs(pdulen) - llc_len; 121 s32 data_size = ntohs(pdulen) - llc_len;
122 122
123 if (data_size < 0 || 123 if (data_size < 0 ||
124 ((skb_tail_pointer(skb) - 124 !pskb_may_pull(skb, data_size))
125 (u8 *)pdu) - llc_len) < data_size)
126 return 0; 125 return 0;
127 if (unlikely(pskb_trim_rcsum(skb, data_size))) 126 if (unlikely(pskb_trim_rcsum(skb, data_size)))
128 return 0; 127 return 0;
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 00a33242e90c..a274300b6a56 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -343,6 +343,10 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
343 ipset_adtfn adtfn = set->variant->adt[adt]; 343 ipset_adtfn adtfn = set->variant->adt[adt];
344 struct ipmac data; 344 struct ipmac data;
345 345
346 /* MAC can be src only */
347 if (!(flags & IPSET_DIM_TWO_SRC))
348 return 0;
349
346 data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC)); 350 data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
347 if (data.id < map->first_ip || data.id > map->last_ip) 351 if (data.id < map->first_ip || data.id > map->last_ip)
348 return -IPSET_ERR_BITMAP_RANGE; 352 return -IPSET_ERR_BITMAP_RANGE;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 9152e69a162d..72d1ac611fdc 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1022,8 +1022,9 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
1022 if (cb->args[1] >= ip_set_max) 1022 if (cb->args[1] >= ip_set_max)
1023 goto out; 1023 goto out;
1024 1024
1025 pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
1026 max = cb->args[0] == DUMP_ONE ? cb->args[1] + 1 : ip_set_max; 1025 max = cb->args[0] == DUMP_ONE ? cb->args[1] + 1 : ip_set_max;
1026dump_last:
1027 pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
1027 for (; cb->args[1] < max; cb->args[1]++) { 1028 for (; cb->args[1] < max; cb->args[1]++) {
1028 index = (ip_set_id_t) cb->args[1]; 1029 index = (ip_set_id_t) cb->args[1];
1029 set = ip_set_list[index]; 1030 set = ip_set_list[index];
@@ -1038,8 +1039,8 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
1038 * so that lists (unions of sets) are dumped last. 1039 * so that lists (unions of sets) are dumped last.
1039 */ 1040 */
1040 if (cb->args[0] != DUMP_ONE && 1041 if (cb->args[0] != DUMP_ONE &&
1041 !((cb->args[0] == DUMP_ALL) ^ 1042 ((cb->args[0] == DUMP_ALL) ==
1042 (set->type->features & IPSET_DUMP_LAST))) 1043 !!(set->type->features & IPSET_DUMP_LAST)))
1043 continue; 1044 continue;
1044 pr_debug("List set: %s\n", set->name); 1045 pr_debug("List set: %s\n", set->name);
1045 if (!cb->args[2]) { 1046 if (!cb->args[2]) {
@@ -1083,6 +1084,12 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
1083 goto release_refcount; 1084 goto release_refcount;
1084 } 1085 }
1085 } 1086 }
1087 /* If we dump all sets, continue with dumping last ones */
1088 if (cb->args[0] == DUMP_ALL) {
1089 cb->args[0] = DUMP_LAST;
1090 cb->args[1] = 0;
1091 goto dump_last;
1092 }
1086 goto out; 1093 goto out;
1087 1094
1088nla_put_failure: 1095nla_put_failure:
@@ -1093,11 +1100,6 @@ release_refcount:
1093 pr_debug("release set %s\n", ip_set_list[index]->name); 1100 pr_debug("release set %s\n", ip_set_list[index]->name);
1094 ip_set_put_byindex(index); 1101 ip_set_put_byindex(index);
1095 } 1102 }
1096
1097 /* If we dump all sets, continue with dumping last ones */
1098 if (cb->args[0] == DUMP_ALL && cb->args[1] >= max && !cb->args[2])
1099 cb->args[0] = DUMP_LAST;
1100
1101out: 1103out:
1102 if (nlh) { 1104 if (nlh) {
1103 nlmsg_end(skb, nlh); 1105 nlmsg_end(skb, nlh);
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 061d48cec137..b3babaed7719 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -81,6 +81,7 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par)
81 if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) { 81 if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) {
82 pr_warning("Protocol error: set match dimension " 82 pr_warning("Protocol error: set match dimension "
83 "is over the limit!\n"); 83 "is over the limit!\n");
84 ip_set_nfnl_put(info->match_set.index);
84 return -ERANGE; 85 return -ERANGE;
85 } 86 }
86 87
@@ -135,6 +136,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
135 if (index == IPSET_INVALID_ID) { 136 if (index == IPSET_INVALID_ID) {
136 pr_warning("Cannot find del_set index %u as target\n", 137 pr_warning("Cannot find del_set index %u as target\n",
137 info->del_set.index); 138 info->del_set.index);
139 if (info->add_set.index != IPSET_INVALID_ID)
140 ip_set_nfnl_put(info->add_set.index);
138 return -ENOENT; 141 return -ENOENT;
139 } 142 }
140 } 143 }
@@ -142,6 +145,10 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
142 info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) { 145 info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) {
143 pr_warning("Protocol error: SET target dimension " 146 pr_warning("Protocol error: SET target dimension "
144 "is over the limit!\n"); 147 "is over the limit!\n");
148 if (info->add_set.index != IPSET_INVALID_ID)
149 ip_set_nfnl_put(info->add_set.index);
150 if (info->del_set.index != IPSET_INVALID_ID)
151 ip_set_nfnl_put(info->del_set.index);
145 return -ERANGE; 152 return -ERANGE;
146 } 153 }
147 154
@@ -192,6 +199,7 @@ set_match_checkentry(const struct xt_mtchk_param *par)
192 if (info->match_set.dim > IPSET_DIM_MAX) { 199 if (info->match_set.dim > IPSET_DIM_MAX) {
193 pr_warning("Protocol error: set match dimension " 200 pr_warning("Protocol error: set match dimension "
194 "is over the limit!\n"); 201 "is over the limit!\n");
202 ip_set_nfnl_put(info->match_set.index);
195 return -ERANGE; 203 return -ERANGE;
196 } 204 }
197 205
@@ -219,7 +227,7 @@ set_target(struct sk_buff *skb, const struct xt_action_param *par)
219 if (info->del_set.index != IPSET_INVALID_ID) 227 if (info->del_set.index != IPSET_INVALID_ID)
220 ip_set_del(info->del_set.index, 228 ip_set_del(info->del_set.index,
221 skb, par->family, 229 skb, par->family,
222 info->add_set.dim, 230 info->del_set.dim,
223 info->del_set.flags); 231 info->del_set.flags);
224 232
225 return XT_CONTINUE; 233 return XT_CONTINUE;
@@ -245,13 +253,19 @@ set_target_checkentry(const struct xt_tgchk_param *par)
245 if (index == IPSET_INVALID_ID) { 253 if (index == IPSET_INVALID_ID) {
246 pr_warning("Cannot find del_set index %u as target\n", 254 pr_warning("Cannot find del_set index %u as target\n",
247 info->del_set.index); 255 info->del_set.index);
256 if (info->add_set.index != IPSET_INVALID_ID)
257 ip_set_nfnl_put(info->add_set.index);
248 return -ENOENT; 258 return -ENOENT;
249 } 259 }
250 } 260 }
251 if (info->add_set.dim > IPSET_DIM_MAX || 261 if (info->add_set.dim > IPSET_DIM_MAX ||
252 info->del_set.flags > IPSET_DIM_MAX) { 262 info->del_set.dim > IPSET_DIM_MAX) {
253 pr_warning("Protocol error: SET target dimension " 263 pr_warning("Protocol error: SET target dimension "
254 "is over the limit!\n"); 264 "is over the limit!\n");
265 if (info->add_set.index != IPSET_INVALID_ID)
266 ip_set_nfnl_put(info->add_set.index);
267 if (info->del_set.index != IPSET_INVALID_ID)
268 ip_set_nfnl_put(info->del_set.index);
255 return -ERANGE; 269 return -ERANGE;
256 } 270 }
257 271
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 0698cad61763..1a21c571aa03 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -569,6 +569,8 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
569 sctp_assoc_set_primary(asoc, transport); 569 sctp_assoc_set_primary(asoc, transport);
570 if (asoc->peer.active_path == peer) 570 if (asoc->peer.active_path == peer)
571 asoc->peer.active_path = transport; 571 asoc->peer.active_path = transport;
572 if (asoc->peer.retran_path == peer)
573 asoc->peer.retran_path = transport;
572 if (asoc->peer.last_data_from == peer) 574 if (asoc->peer.last_data_from == peer)
573 asoc->peer.last_data_from = transport; 575 asoc->peer.last_data_from = transport;
574 576
@@ -1323,6 +1325,8 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1323 1325
1324 if (t) 1326 if (t)
1325 asoc->peer.retran_path = t; 1327 asoc->peer.retran_path = t;
1328 else
1329 t = asoc->peer.retran_path;
1326 1330
1327 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association" 1331 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association"
1328 " %p addr: ", 1332 " %p addr: ",
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index dff27d5e22fd..61b1f5ada96a 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -554,7 +554,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
554 memcpy(&ssf->ssf_info, &chunk->sinfo, sizeof(struct sctp_sndrcvinfo)); 554 memcpy(&ssf->ssf_info, &chunk->sinfo, sizeof(struct sctp_sndrcvinfo));
555 555
556 /* Per TSVWG discussion with Randy. Allow the application to 556 /* Per TSVWG discussion with Randy. Allow the application to
557 * resemble a fragmented message. 557 * reassemble a fragmented message.
558 */ 558 */
559 ssf->ssf_info.sinfo_flags = chunk->chunk_hdr->flags; 559 ssf->ssf_info.sinfo_flags = chunk->chunk_hdr->flags;
560 560
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 659326c3e895..006ad817cd5f 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -332,7 +332,7 @@ static int conf_choice(struct menu *menu)
332 } 332 }
333 if (!child) 333 if (!child)
334 continue; 334 continue;
335 if (line[strlen(line) - 1] == '?') { 335 if (line[0] && line[strlen(line) - 1] == '?') {
336 print_help(child); 336 print_help(child);
337 continue; 337 continue;
338 } 338 }
diff --git a/security/capability.c b/security/capability.c
index 2984ea4f776f..bbb51156261b 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -181,7 +181,7 @@ static int cap_inode_follow_link(struct dentry *dentry,
181 return 0; 181 return 0;
182} 182}
183 183
184static int cap_inode_permission(struct inode *inode, int mask) 184static int cap_inode_permission(struct inode *inode, int mask, unsigned flags)
185{ 185{
186 return 0; 186 return 0;
187} 187}
diff --git a/security/security.c b/security/security.c
index 101142369db4..4ba6d4cc061f 100644
--- a/security/security.c
+++ b/security/security.c
@@ -518,16 +518,14 @@ int security_inode_permission(struct inode *inode, int mask)
518{ 518{
519 if (unlikely(IS_PRIVATE(inode))) 519 if (unlikely(IS_PRIVATE(inode)))
520 return 0; 520 return 0;
521 return security_ops->inode_permission(inode, mask); 521 return security_ops->inode_permission(inode, mask, 0);
522} 522}
523 523
524int security_inode_exec_permission(struct inode *inode, unsigned int flags) 524int security_inode_exec_permission(struct inode *inode, unsigned int flags)
525{ 525{
526 if (unlikely(IS_PRIVATE(inode))) 526 if (unlikely(IS_PRIVATE(inode)))
527 return 0; 527 return 0;
528 if (flags) 528 return security_ops->inode_permission(inode, MAY_EXEC, flags);
529 return -ECHILD;
530 return security_ops->inode_permission(inode, MAY_EXEC);
531} 529}
532 530
533int security_inode_setattr(struct dentry *dentry, struct iattr *attr) 531int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 9da6420e2056..1d027e29ce8d 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -471,6 +471,7 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
471 * @avd: access vector decisions 471 * @avd: access vector decisions
472 * @result: result from avc_has_perm_noaudit 472 * @result: result from avc_has_perm_noaudit
473 * @a: auxiliary audit data 473 * @a: auxiliary audit data
474 * @flags: VFS walk flags
474 * 475 *
475 * Audit the granting or denial of permissions in accordance 476 * Audit the granting or denial of permissions in accordance
476 * with the policy. This function is typically called by 477 * with the policy. This function is typically called by
@@ -481,9 +482,10 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
481 * be performed under a lock, to allow the lock to be released 482 * be performed under a lock, to allow the lock to be released
482 * before calling the auditing code. 483 * before calling the auditing code.
483 */ 484 */
484void avc_audit(u32 ssid, u32 tsid, 485int avc_audit(u32 ssid, u32 tsid,
485 u16 tclass, u32 requested, 486 u16 tclass, u32 requested,
486 struct av_decision *avd, int result, struct common_audit_data *a) 487 struct av_decision *avd, int result, struct common_audit_data *a,
488 unsigned flags)
487{ 489{
488 struct common_audit_data stack_data; 490 struct common_audit_data stack_data;
489 u32 denied, audited; 491 u32 denied, audited;
@@ -515,11 +517,24 @@ void avc_audit(u32 ssid, u32 tsid,
515 else 517 else
516 audited = requested & avd->auditallow; 518 audited = requested & avd->auditallow;
517 if (!audited) 519 if (!audited)
518 return; 520 return 0;
521
519 if (!a) { 522 if (!a) {
520 a = &stack_data; 523 a = &stack_data;
521 COMMON_AUDIT_DATA_INIT(a, NONE); 524 COMMON_AUDIT_DATA_INIT(a, NONE);
522 } 525 }
526
527 /*
528 * When in a RCU walk do the audit on the RCU retry. This is because
529 * the collection of the dname in an inode audit message is not RCU
530 * safe. Note this may drop some audits when the situation changes
531 * during retry. However this is logically just as if the operation
532 * happened a little later.
533 */
534 if ((a->type == LSM_AUDIT_DATA_FS) &&
535 (flags & IPERM_FLAG_RCU))
536 return -ECHILD;
537
523 a->selinux_audit_data.tclass = tclass; 538 a->selinux_audit_data.tclass = tclass;
524 a->selinux_audit_data.requested = requested; 539 a->selinux_audit_data.requested = requested;
525 a->selinux_audit_data.ssid = ssid; 540 a->selinux_audit_data.ssid = ssid;
@@ -529,6 +544,7 @@ void avc_audit(u32 ssid, u32 tsid,
529 a->lsm_pre_audit = avc_audit_pre_callback; 544 a->lsm_pre_audit = avc_audit_pre_callback;
530 a->lsm_post_audit = avc_audit_post_callback; 545 a->lsm_post_audit = avc_audit_post_callback;
531 common_lsm_audit(a); 546 common_lsm_audit(a);
547 return 0;
532} 548}
533 549
534/** 550/**
@@ -793,6 +809,7 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
793 * @tclass: target security class 809 * @tclass: target security class
794 * @requested: requested permissions, interpreted based on @tclass 810 * @requested: requested permissions, interpreted based on @tclass
795 * @auditdata: auxiliary audit data 811 * @auditdata: auxiliary audit data
812 * @flags: VFS walk flags
796 * 813 *
797 * Check the AVC to determine whether the @requested permissions are granted 814 * Check the AVC to determine whether the @requested permissions are granted
798 * for the SID pair (@ssid, @tsid), interpreting the permissions 815 * for the SID pair (@ssid, @tsid), interpreting the permissions
@@ -802,14 +819,19 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
802 * permissions are granted, -%EACCES if any permissions are denied, or 819 * permissions are granted, -%EACCES if any permissions are denied, or
803 * another -errno upon other errors. 820 * another -errno upon other errors.
804 */ 821 */
805int avc_has_perm(u32 ssid, u32 tsid, u16 tclass, 822int avc_has_perm_flags(u32 ssid, u32 tsid, u16 tclass,
806 u32 requested, struct common_audit_data *auditdata) 823 u32 requested, struct common_audit_data *auditdata,
824 unsigned flags)
807{ 825{
808 struct av_decision avd; 826 struct av_decision avd;
809 int rc; 827 int rc, rc2;
810 828
811 rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd); 829 rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd);
812 avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata); 830
831 rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata,
832 flags);
833 if (rc2)
834 return rc2;
813 return rc; 835 return rc;
814} 836}
815 837
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index f9c3764e4859..f7cf0ea6faea 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1446,8 +1446,11 @@ static int task_has_capability(struct task_struct *tsk,
1446 } 1446 }
1447 1447
1448 rc = avc_has_perm_noaudit(sid, sid, sclass, av, 0, &avd); 1448 rc = avc_has_perm_noaudit(sid, sid, sclass, av, 0, &avd);
1449 if (audit == SECURITY_CAP_AUDIT) 1449 if (audit == SECURITY_CAP_AUDIT) {
1450 avc_audit(sid, sid, sclass, av, &avd, rc, &ad); 1450 int rc2 = avc_audit(sid, sid, sclass, av, &avd, rc, &ad, 0);
1451 if (rc2)
1452 return rc2;
1453 }
1451 return rc; 1454 return rc;
1452} 1455}
1453 1456
@@ -1467,7 +1470,8 @@ static int task_has_system(struct task_struct *tsk,
1467static int inode_has_perm(const struct cred *cred, 1470static int inode_has_perm(const struct cred *cred,
1468 struct inode *inode, 1471 struct inode *inode,
1469 u32 perms, 1472 u32 perms,
1470 struct common_audit_data *adp) 1473 struct common_audit_data *adp,
1474 unsigned flags)
1471{ 1475{
1472 struct inode_security_struct *isec; 1476 struct inode_security_struct *isec;
1473 struct common_audit_data ad; 1477 struct common_audit_data ad;
@@ -1487,7 +1491,7 @@ static int inode_has_perm(const struct cred *cred,
1487 ad.u.fs.inode = inode; 1491 ad.u.fs.inode = inode;
1488 } 1492 }
1489 1493
1490 return avc_has_perm(sid, isec->sid, isec->sclass, perms, adp); 1494 return avc_has_perm_flags(sid, isec->sid, isec->sclass, perms, adp, flags);
1491} 1495}
1492 1496
1493/* Same as inode_has_perm, but pass explicit audit data containing 1497/* Same as inode_has_perm, but pass explicit audit data containing
@@ -1504,7 +1508,7 @@ static inline int dentry_has_perm(const struct cred *cred,
1504 COMMON_AUDIT_DATA_INIT(&ad, FS); 1508 COMMON_AUDIT_DATA_INIT(&ad, FS);
1505 ad.u.fs.path.mnt = mnt; 1509 ad.u.fs.path.mnt = mnt;
1506 ad.u.fs.path.dentry = dentry; 1510 ad.u.fs.path.dentry = dentry;
1507 return inode_has_perm(cred, inode, av, &ad); 1511 return inode_has_perm(cred, inode, av, &ad, 0);
1508} 1512}
1509 1513
1510/* Check whether a task can use an open file descriptor to 1514/* Check whether a task can use an open file descriptor to
@@ -1540,7 +1544,7 @@ static int file_has_perm(const struct cred *cred,
1540 /* av is zero if only checking access to the descriptor. */ 1544 /* av is zero if only checking access to the descriptor. */
1541 rc = 0; 1545 rc = 0;
1542 if (av) 1546 if (av)
1543 rc = inode_has_perm(cred, inode, av, &ad); 1547 rc = inode_has_perm(cred, inode, av, &ad, 0);
1544 1548
1545out: 1549out:
1546 return rc; 1550 return rc;
@@ -2103,7 +2107,7 @@ static inline void flush_unauthorized_files(const struct cred *cred,
2103 file = file_priv->file; 2107 file = file_priv->file;
2104 inode = file->f_path.dentry->d_inode; 2108 inode = file->f_path.dentry->d_inode;
2105 if (inode_has_perm(cred, inode, 2109 if (inode_has_perm(cred, inode,
2106 FILE__READ | FILE__WRITE, NULL)) { 2110 FILE__READ | FILE__WRITE, NULL, 0)) {
2107 drop_tty = 1; 2111 drop_tty = 1;
2108 } 2112 }
2109 } 2113 }
@@ -2635,7 +2639,7 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct nameidata *na
2635 return dentry_has_perm(cred, NULL, dentry, FILE__READ); 2639 return dentry_has_perm(cred, NULL, dentry, FILE__READ);
2636} 2640}
2637 2641
2638static int selinux_inode_permission(struct inode *inode, int mask) 2642static int selinux_inode_permission(struct inode *inode, int mask, unsigned flags)
2639{ 2643{
2640 const struct cred *cred = current_cred(); 2644 const struct cred *cred = current_cred();
2641 struct common_audit_data ad; 2645 struct common_audit_data ad;
@@ -2657,7 +2661,7 @@ static int selinux_inode_permission(struct inode *inode, int mask)
2657 2661
2658 perms = file_mask_to_av(inode->i_mode, mask); 2662 perms = file_mask_to_av(inode->i_mode, mask);
2659 2663
2660 return inode_has_perm(cred, inode, perms, &ad); 2664 return inode_has_perm(cred, inode, perms, &ad, flags);
2661} 2665}
2662 2666
2663static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr) 2667static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
@@ -3205,7 +3209,7 @@ static int selinux_dentry_open(struct file *file, const struct cred *cred)
3205 * new inode label or new policy. 3209 * new inode label or new policy.
3206 * This check is not redundant - do not remove. 3210 * This check is not redundant - do not remove.
3207 */ 3211 */
3208 return inode_has_perm(cred, inode, open_file_to_av(file), NULL); 3212 return inode_has_perm(cred, inode, open_file_to_av(file), NULL, 0);
3209} 3213}
3210 3214
3211/* task security operations */ 3215/* task security operations */
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index 5615081b73ec..e77b2ac2908b 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -54,11 +54,11 @@ struct avc_cache_stats {
54 54
55void __init avc_init(void); 55void __init avc_init(void);
56 56
57void avc_audit(u32 ssid, u32 tsid, 57int avc_audit(u32 ssid, u32 tsid,
58 u16 tclass, u32 requested, 58 u16 tclass, u32 requested,
59 struct av_decision *avd, 59 struct av_decision *avd,
60 int result, 60 int result,
61 struct common_audit_data *a); 61 struct common_audit_data *a, unsigned flags);
62 62
63#define AVC_STRICT 1 /* Ignore permissive mode. */ 63#define AVC_STRICT 1 /* Ignore permissive mode. */
64int avc_has_perm_noaudit(u32 ssid, u32 tsid, 64int avc_has_perm_noaudit(u32 ssid, u32 tsid,
@@ -66,9 +66,17 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
66 unsigned flags, 66 unsigned flags,
67 struct av_decision *avd); 67 struct av_decision *avd);
68 68
69int avc_has_perm(u32 ssid, u32 tsid, 69int avc_has_perm_flags(u32 ssid, u32 tsid,
70 u16 tclass, u32 requested, 70 u16 tclass, u32 requested,
71 struct common_audit_data *auditdata); 71 struct common_audit_data *auditdata,
72 unsigned);
73
74static inline int avc_has_perm(u32 ssid, u32 tsid,
75 u16 tclass, u32 requested,
76 struct common_audit_data *auditdata)
77{
78 return avc_has_perm_flags(ssid, tsid, tclass, requested, auditdata, 0);
79}
72 80
73u32 avc_policy_seqno(void); 81u32 avc_policy_seqno(void);
74 82
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index c6f8fcadae07..400a5d5cde61 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -686,7 +686,7 @@ static int smack_inode_rename(struct inode *old_inode,
686 * 686 *
687 * Returns 0 if access is permitted, -EACCES otherwise 687 * Returns 0 if access is permitted, -EACCES otherwise
688 */ 688 */
689static int smack_inode_permission(struct inode *inode, int mask) 689static int smack_inode_permission(struct inode *inode, int mask, unsigned flags)
690{ 690{
691 struct smk_audit_info ad; 691 struct smk_audit_info ad;
692 692
@@ -696,6 +696,10 @@ static int smack_inode_permission(struct inode *inode, int mask)
696 */ 696 */
697 if (mask == 0) 697 if (mask == 0)
698 return 0; 698 return 0;
699
700 /* May be droppable after audit */
701 if (flags & IPERM_FLAG_RCU)
702 return -ECHILD;
699 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); 703 smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
700 smk_ad_setfield_u_fs_inode(&ad, inode); 704 smk_ad_setfield_u_fs_inode(&ad, inode);
701 return smk_curacc(smk_of_inode(inode), mask, &ad); 705 return smk_curacc(smk_of_inode(inode), mask, &ad);
diff --git a/sound/aoa/codecs/tas.c b/sound/aoa/codecs/tas.c
index 58804c7acfcf..fd2188c3df2b 100644
--- a/sound/aoa/codecs/tas.c
+++ b/sound/aoa/codecs/tas.c
@@ -170,7 +170,7 @@ static void tas_set_volume(struct tas *tas)
170 /* analysing the volume and mixer tables shows 170 /* analysing the volume and mixer tables shows
171 * that they are similar enough when we shift 171 * that they are similar enough when we shift
172 * the mixer table down by 4 bits. The error 172 * the mixer table down by 4 bits. The error
173 * is minuscule, in just one item the error 173 * is miniscule, in just one item the error
174 * is 1, at a value of 0x07f17b (mixer table 174 * is 1, at a value of 0x07f17b (mixer table
175 * value is 0x07f17a) */ 175 * value is 0x07f17a) */
176 tmp = tas_gaintable[left]; 176 tmp = tas_gaintable[left];
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 430f41db6044..759ade12e758 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -937,6 +937,7 @@ void snd_hda_shutup_pins(struct hda_codec *codec)
937} 937}
938EXPORT_SYMBOL_HDA(snd_hda_shutup_pins); 938EXPORT_SYMBOL_HDA(snd_hda_shutup_pins);
939 939
940#ifdef SND_HDA_NEEDS_RESUME
940/* Restore the pin controls cleared previously via snd_hda_shutup_pins() */ 941/* Restore the pin controls cleared previously via snd_hda_shutup_pins() */
941static void restore_shutup_pins(struct hda_codec *codec) 942static void restore_shutup_pins(struct hda_codec *codec)
942{ 943{
@@ -953,6 +954,7 @@ static void restore_shutup_pins(struct hda_codec *codec)
953 } 954 }
954 codec->pins_shutup = 0; 955 codec->pins_shutup = 0;
955} 956}
957#endif
956 958
957static void init_hda_cache(struct hda_cache_rec *cache, 959static void init_hda_cache(struct hda_cache_rec *cache,
958 unsigned int record_size); 960 unsigned int record_size);
@@ -1329,6 +1331,7 @@ static void purify_inactive_streams(struct hda_codec *codec)
1329 } 1331 }
1330} 1332}
1331 1333
1334#ifdef SND_HDA_NEEDS_RESUME
1332/* clean up all streams; called from suspend */ 1335/* clean up all streams; called from suspend */
1333static void hda_cleanup_all_streams(struct hda_codec *codec) 1336static void hda_cleanup_all_streams(struct hda_codec *codec)
1334{ 1337{
@@ -1340,6 +1343,7 @@ static void hda_cleanup_all_streams(struct hda_codec *codec)
1340 really_cleanup_stream(codec, p); 1343 really_cleanup_stream(codec, p);
1341 } 1344 }
1342} 1345}
1346#endif
1343 1347
1344/* 1348/*
1345 * amp access functions 1349 * amp access functions
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 52928d9a72da..d3bd2c10180f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -14868,6 +14868,23 @@ static void alc269_fixup_hweq(struct hda_codec *codec,
14868 alc_write_coef_idx(codec, 0x1e, coef | 0x80); 14868 alc_write_coef_idx(codec, 0x1e, coef | 0x80);
14869} 14869}
14870 14870
14871static void alc271_fixup_dmic(struct hda_codec *codec,
14872 const struct alc_fixup *fix, int action)
14873{
14874 static struct hda_verb verbs[] = {
14875 {0x20, AC_VERB_SET_COEF_INDEX, 0x0d},
14876 {0x20, AC_VERB_SET_PROC_COEF, 0x4000},
14877 {}
14878 };
14879 unsigned int cfg;
14880
14881 if (strcmp(codec->chip_name, "ALC271X"))
14882 return;
14883 cfg = snd_hda_codec_get_pincfg(codec, 0x12);
14884 if (get_defcfg_connect(cfg) == AC_JACK_PORT_FIXED)
14885 snd_hda_sequence_write(codec, verbs);
14886}
14887
14871enum { 14888enum {
14872 ALC269_FIXUP_SONY_VAIO, 14889 ALC269_FIXUP_SONY_VAIO,
14873 ALC275_FIXUP_SONY_VAIO_GPIO2, 14890 ALC275_FIXUP_SONY_VAIO_GPIO2,
@@ -14876,6 +14893,7 @@ enum {
14876 ALC269_FIXUP_ASUS_G73JW, 14893 ALC269_FIXUP_ASUS_G73JW,
14877 ALC269_FIXUP_LENOVO_EAPD, 14894 ALC269_FIXUP_LENOVO_EAPD,
14878 ALC275_FIXUP_SONY_HWEQ, 14895 ALC275_FIXUP_SONY_HWEQ,
14896 ALC271_FIXUP_DMIC,
14879}; 14897};
14880 14898
14881static const struct alc_fixup alc269_fixups[] = { 14899static const struct alc_fixup alc269_fixups[] = {
@@ -14929,7 +14947,11 @@ static const struct alc_fixup alc269_fixups[] = {
14929 .v.func = alc269_fixup_hweq, 14947 .v.func = alc269_fixup_hweq,
14930 .chained = true, 14948 .chained = true,
14931 .chain_id = ALC275_FIXUP_SONY_VAIO_GPIO2 14949 .chain_id = ALC275_FIXUP_SONY_VAIO_GPIO2
14932 } 14950 },
14951 [ALC271_FIXUP_DMIC] = {
14952 .type = ALC_FIXUP_FUNC,
14953 .v.func = alc271_fixup_dmic,
14954 },
14933}; 14955};
14934 14956
14935static struct snd_pci_quirk alc269_fixup_tbl[] = { 14957static struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -14938,6 +14960,7 @@ static struct snd_pci_quirk alc269_fixup_tbl[] = {
14938 SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), 14960 SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
14939 SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), 14961 SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
14940 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), 14962 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
14963 SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
14941 SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), 14964 SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
14942 SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE), 14965 SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
14943 SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE), 14966 SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c
index f7cd346fd727..f5ccdbf7ebc6 100644
--- a/sound/soc/codecs/jz4740.c
+++ b/sound/soc/codecs/jz4740.c
@@ -308,8 +308,6 @@ static int jz4740_codec_dev_probe(struct snd_soc_codec *codec)
308 snd_soc_dapm_add_routes(dapm, jz4740_codec_dapm_routes, 308 snd_soc_dapm_add_routes(dapm, jz4740_codec_dapm_routes,
309 ARRAY_SIZE(jz4740_codec_dapm_routes)); 309 ARRAY_SIZE(jz4740_codec_dapm_routes));
310 310
311 snd_soc_dapm_new_widgets(codec);
312
313 jz4740_codec_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 311 jz4740_codec_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
314 312
315 return 0; 313 return 0;
diff --git a/sound/soc/codecs/sn95031.c b/sound/soc/codecs/sn95031.c
index a54d2a5b28f6..4d9fb279e146 100644
--- a/sound/soc/codecs/sn95031.c
+++ b/sound/soc/codecs/sn95031.c
@@ -927,7 +927,7 @@ static struct platform_driver sn95031_codec_driver = {
927 .owner = THIS_MODULE, 927 .owner = THIS_MODULE,
928 }, 928 },
929 .probe = sn95031_device_probe, 929 .probe = sn95031_device_probe,
930 .remove = sn95031_device_remove, 930 .remove = __devexit_p(sn95031_device_remove),
931}; 931};
932 932
933static int __init sn95031_init(void) 933static int __init sn95031_init(void)
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index ae1cadfae84c..f52b623bb692 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -247,8 +247,6 @@ static int wm8903_volatile_register(struct snd_soc_codec *codec, unsigned int re
247 case WM8903_REVISION_NUMBER: 247 case WM8903_REVISION_NUMBER:
248 case WM8903_INTERRUPT_STATUS_1: 248 case WM8903_INTERRUPT_STATUS_1:
249 case WM8903_WRITE_SEQUENCER_4: 249 case WM8903_WRITE_SEQUENCER_4:
250 case WM8903_POWER_MANAGEMENT_3:
251 case WM8903_POWER_MANAGEMENT_2:
252 case WM8903_DC_SERVO_READBACK_1: 250 case WM8903_DC_SERVO_READBACK_1:
253 case WM8903_DC_SERVO_READBACK_2: 251 case WM8903_DC_SERVO_READBACK_2:
254 case WM8903_DC_SERVO_READBACK_3: 252 case WM8903_DC_SERVO_READBACK_3:
@@ -875,34 +873,40 @@ SND_SOC_DAPM_MIXER("Left Speaker Mixer", WM8903_POWER_MANAGEMENT_4, 1, 0,
875SND_SOC_DAPM_MIXER("Right Speaker Mixer", WM8903_POWER_MANAGEMENT_4, 0, 0, 873SND_SOC_DAPM_MIXER("Right Speaker Mixer", WM8903_POWER_MANAGEMENT_4, 0, 0,
876 right_speaker_mixer, ARRAY_SIZE(right_speaker_mixer)), 874 right_speaker_mixer, ARRAY_SIZE(right_speaker_mixer)),
877 875
878SND_SOC_DAPM_PGA_S("Left Headphone Output PGA", 0, WM8903_ANALOGUE_HP_0, 876SND_SOC_DAPM_PGA_S("Left Headphone Output PGA", 0, WM8903_POWER_MANAGEMENT_2,
879 4, 0, NULL, 0), 877 1, 0, NULL, 0),
880SND_SOC_DAPM_PGA_S("Right Headphone Output PGA", 0, WM8903_ANALOGUE_HP_0, 878SND_SOC_DAPM_PGA_S("Right Headphone Output PGA", 0, WM8903_POWER_MANAGEMENT_2,
881 0, 0, NULL, 0), 879 0, 0, NULL, 0),
882 880
883SND_SOC_DAPM_PGA_S("Left Line Output PGA", 0, WM8903_ANALOGUE_LINEOUT_0, 4, 0, 881SND_SOC_DAPM_PGA_S("Left Line Output PGA", 0, WM8903_POWER_MANAGEMENT_3, 1, 0,
884 NULL, 0), 882 NULL, 0),
885SND_SOC_DAPM_PGA_S("Right Line Output PGA", 0, WM8903_ANALOGUE_LINEOUT_0, 0, 0, 883SND_SOC_DAPM_PGA_S("Right Line Output PGA", 0, WM8903_POWER_MANAGEMENT_3, 0, 0,
886 NULL, 0), 884 NULL, 0),
887 885
888SND_SOC_DAPM_PGA_S("HPL_RMV_SHORT", 4, WM8903_ANALOGUE_HP_0, 7, 0, NULL, 0), 886SND_SOC_DAPM_PGA_S("HPL_RMV_SHORT", 4, WM8903_ANALOGUE_HP_0, 7, 0, NULL, 0),
889SND_SOC_DAPM_PGA_S("HPL_ENA_OUTP", 3, WM8903_ANALOGUE_HP_0, 6, 0, NULL, 0), 887SND_SOC_DAPM_PGA_S("HPL_ENA_OUTP", 3, WM8903_ANALOGUE_HP_0, 6, 0, NULL, 0),
890SND_SOC_DAPM_PGA_S("HPL_ENA_DLY", 1, WM8903_ANALOGUE_HP_0, 5, 0, NULL, 0), 888SND_SOC_DAPM_PGA_S("HPL_ENA_DLY", 2, WM8903_ANALOGUE_HP_0, 5, 0, NULL, 0),
889SND_SOC_DAPM_PGA_S("HPL_ENA", 1, WM8903_ANALOGUE_HP_0, 4, 0, NULL, 0),
891SND_SOC_DAPM_PGA_S("HPR_RMV_SHORT", 4, WM8903_ANALOGUE_HP_0, 3, 0, NULL, 0), 890SND_SOC_DAPM_PGA_S("HPR_RMV_SHORT", 4, WM8903_ANALOGUE_HP_0, 3, 0, NULL, 0),
892SND_SOC_DAPM_PGA_S("HPR_ENA_OUTP", 3, WM8903_ANALOGUE_HP_0, 2, 0, NULL, 0), 891SND_SOC_DAPM_PGA_S("HPR_ENA_OUTP", 3, WM8903_ANALOGUE_HP_0, 2, 0, NULL, 0),
893SND_SOC_DAPM_PGA_S("HPR_ENA_DLY", 1, WM8903_ANALOGUE_HP_0, 1, 0, NULL, 0), 892SND_SOC_DAPM_PGA_S("HPR_ENA_DLY", 2, WM8903_ANALOGUE_HP_0, 1, 0, NULL, 0),
893SND_SOC_DAPM_PGA_S("HPR_ENA", 1, WM8903_ANALOGUE_HP_0, 0, 0, NULL, 0),
894 894
895SND_SOC_DAPM_PGA_S("LINEOUTL_RMV_SHORT", 4, WM8903_ANALOGUE_LINEOUT_0, 7, 0, 895SND_SOC_DAPM_PGA_S("LINEOUTL_RMV_SHORT", 4, WM8903_ANALOGUE_LINEOUT_0, 7, 0,
896 NULL, 0), 896 NULL, 0),
897SND_SOC_DAPM_PGA_S("LINEOUTL_ENA_OUTP", 3, WM8903_ANALOGUE_LINEOUT_0, 6, 0, 897SND_SOC_DAPM_PGA_S("LINEOUTL_ENA_OUTP", 3, WM8903_ANALOGUE_LINEOUT_0, 6, 0,
898 NULL, 0), 898 NULL, 0),
899SND_SOC_DAPM_PGA_S("LINEOUTL_ENA_DLY", 1, WM8903_ANALOGUE_LINEOUT_0, 5, 0, 899SND_SOC_DAPM_PGA_S("LINEOUTL_ENA_DLY", 2, WM8903_ANALOGUE_LINEOUT_0, 5, 0,
900 NULL, 0),
901SND_SOC_DAPM_PGA_S("LINEOUTL_ENA", 1, WM8903_ANALOGUE_LINEOUT_0, 4, 0,
900 NULL, 0), 902 NULL, 0),
901SND_SOC_DAPM_PGA_S("LINEOUTR_RMV_SHORT", 4, WM8903_ANALOGUE_LINEOUT_0, 3, 0, 903SND_SOC_DAPM_PGA_S("LINEOUTR_RMV_SHORT", 4, WM8903_ANALOGUE_LINEOUT_0, 3, 0,
902 NULL, 0), 904 NULL, 0),
903SND_SOC_DAPM_PGA_S("LINEOUTR_ENA_OUTP", 3, WM8903_ANALOGUE_LINEOUT_0, 2, 0, 905SND_SOC_DAPM_PGA_S("LINEOUTR_ENA_OUTP", 3, WM8903_ANALOGUE_LINEOUT_0, 2, 0,
904 NULL, 0), 906 NULL, 0),
905SND_SOC_DAPM_PGA_S("LINEOUTR_ENA_DLY", 1, WM8903_ANALOGUE_LINEOUT_0, 1, 0, 907SND_SOC_DAPM_PGA_S("LINEOUTR_ENA_DLY", 2, WM8903_ANALOGUE_LINEOUT_0, 1, 0,
908 NULL, 0),
909SND_SOC_DAPM_PGA_S("LINEOUTR_ENA", 1, WM8903_ANALOGUE_LINEOUT_0, 0, 0,
906 NULL, 0), 910 NULL, 0),
907 911
908SND_SOC_DAPM_SUPPLY("DCS Master", WM8903_DC_SERVO_0, 4, 0, NULL, 0), 912SND_SOC_DAPM_SUPPLY("DCS Master", WM8903_DC_SERVO_0, 4, 0, NULL, 0),
@@ -1037,10 +1041,14 @@ static const struct snd_soc_dapm_route intercon[] = {
1037 { "Left Speaker PGA", NULL, "Left Speaker Mixer" }, 1041 { "Left Speaker PGA", NULL, "Left Speaker Mixer" },
1038 { "Right Speaker PGA", NULL, "Right Speaker Mixer" }, 1042 { "Right Speaker PGA", NULL, "Right Speaker Mixer" },
1039 1043
1040 { "HPL_ENA_DLY", NULL, "Left Headphone Output PGA" }, 1044 { "HPL_ENA", NULL, "Left Headphone Output PGA" },
1041 { "HPR_ENA_DLY", NULL, "Right Headphone Output PGA" }, 1045 { "HPR_ENA", NULL, "Right Headphone Output PGA" },
1042 { "LINEOUTL_ENA_DLY", NULL, "Left Line Output PGA" }, 1046 { "HPL_ENA_DLY", NULL, "HPL_ENA" },
1043 { "LINEOUTR_ENA_DLY", NULL, "Right Line Output PGA" }, 1047 { "HPR_ENA_DLY", NULL, "HPR_ENA" },
1048 { "LINEOUTL_ENA", NULL, "Left Line Output PGA" },
1049 { "LINEOUTR_ENA", NULL, "Right Line Output PGA" },
1050 { "LINEOUTL_ENA_DLY", NULL, "LINEOUTL_ENA" },
1051 { "LINEOUTR_ENA_DLY", NULL, "LINEOUTR_ENA" },
1044 1052
1045 { "HPL_DCS", NULL, "DCS Master" }, 1053 { "HPL_DCS", NULL, "DCS Master" },
1046 { "HPR_DCS", NULL, "DCS Master" }, 1054 { "HPR_DCS", NULL, "DCS Master" },
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 3290333b2bb9..84e1bd1d2822 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -3261,20 +3261,36 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
3261 wm8994_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 3261 wm8994_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
3262 3262
3263 /* Latch volume updates (right only; we always do left then right). */ 3263 /* Latch volume updates (right only; we always do left then right). */
3264 snd_soc_update_bits(codec, WM8994_AIF1_DAC1_LEFT_VOLUME,
3265 WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU);
3264 snd_soc_update_bits(codec, WM8994_AIF1_DAC1_RIGHT_VOLUME, 3266 snd_soc_update_bits(codec, WM8994_AIF1_DAC1_RIGHT_VOLUME,
3265 WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU); 3267 WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU);
3268 snd_soc_update_bits(codec, WM8994_AIF1_DAC2_LEFT_VOLUME,
3269 WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU);
3266 snd_soc_update_bits(codec, WM8994_AIF1_DAC2_RIGHT_VOLUME, 3270 snd_soc_update_bits(codec, WM8994_AIF1_DAC2_RIGHT_VOLUME,
3267 WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU); 3271 WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU);
3272 snd_soc_update_bits(codec, WM8994_AIF2_DAC_LEFT_VOLUME,
3273 WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU);
3268 snd_soc_update_bits(codec, WM8994_AIF2_DAC_RIGHT_VOLUME, 3274 snd_soc_update_bits(codec, WM8994_AIF2_DAC_RIGHT_VOLUME,
3269 WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU); 3275 WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU);
3276 snd_soc_update_bits(codec, WM8994_AIF1_ADC1_LEFT_VOLUME,
3277 WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU);
3270 snd_soc_update_bits(codec, WM8994_AIF1_ADC1_RIGHT_VOLUME, 3278 snd_soc_update_bits(codec, WM8994_AIF1_ADC1_RIGHT_VOLUME,
3271 WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU); 3279 WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU);
3280 snd_soc_update_bits(codec, WM8994_AIF1_ADC2_LEFT_VOLUME,
3281 WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU);
3272 snd_soc_update_bits(codec, WM8994_AIF1_ADC2_RIGHT_VOLUME, 3282 snd_soc_update_bits(codec, WM8994_AIF1_ADC2_RIGHT_VOLUME,
3273 WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU); 3283 WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU);
3284 snd_soc_update_bits(codec, WM8994_AIF2_ADC_LEFT_VOLUME,
3285 WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU);
3274 snd_soc_update_bits(codec, WM8994_AIF2_ADC_RIGHT_VOLUME, 3286 snd_soc_update_bits(codec, WM8994_AIF2_ADC_RIGHT_VOLUME,
3275 WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU); 3287 WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU);
3288 snd_soc_update_bits(codec, WM8994_DAC1_LEFT_VOLUME,
3289 WM8994_DAC1_VU, WM8994_DAC1_VU);
3276 snd_soc_update_bits(codec, WM8994_DAC1_RIGHT_VOLUME, 3290 snd_soc_update_bits(codec, WM8994_DAC1_RIGHT_VOLUME,
3277 WM8994_DAC1_VU, WM8994_DAC1_VU); 3291 WM8994_DAC1_VU, WM8994_DAC1_VU);
3292 snd_soc_update_bits(codec, WM8994_DAC2_LEFT_VOLUME,
3293 WM8994_DAC2_VU, WM8994_DAC2_VU);
3278 snd_soc_update_bits(codec, WM8994_DAC2_RIGHT_VOLUME, 3294 snd_soc_update_bits(codec, WM8994_DAC2_RIGHT_VOLUME,
3279 WM8994_DAC2_VU, WM8994_DAC2_VU); 3295 WM8994_DAC2_VU, WM8994_DAC2_VU);
3280 3296
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 7b6b3c18e299..4005e9af5d61 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -740,12 +740,12 @@ static const struct snd_soc_dapm_route analogue_routes[] = {
740 740
741 { "SPKL", "Input Switch", "MIXINL" }, 741 { "SPKL", "Input Switch", "MIXINL" },
742 { "SPKL", "IN1LP Switch", "IN1LP" }, 742 { "SPKL", "IN1LP Switch", "IN1LP" },
743 { "SPKL", "Output Switch", "Left Output Mixer" }, 743 { "SPKL", "Output Switch", "Left Output PGA" },
744 { "SPKL", NULL, "TOCLK" }, 744 { "SPKL", NULL, "TOCLK" },
745 745
746 { "SPKR", "Input Switch", "MIXINR" }, 746 { "SPKR", "Input Switch", "MIXINR" },
747 { "SPKR", "IN1RP Switch", "IN1RP" }, 747 { "SPKR", "IN1RP Switch", "IN1RP" },
748 { "SPKR", "Output Switch", "Right Output Mixer" }, 748 { "SPKR", "Output Switch", "Right Output PGA" },
749 { "SPKR", NULL, "TOCLK" }, 749 { "SPKR", NULL, "TOCLK" },
750 750
751 { "SPKL Boost", "Direct Voice Switch", "Direct Voice" }, 751 { "SPKL Boost", "Direct Voice Switch", "Direct Voice" },
@@ -767,8 +767,8 @@ static const struct snd_soc_dapm_route analogue_routes[] = {
767 { "SPKOUTRP", NULL, "SPKR Driver" }, 767 { "SPKOUTRP", NULL, "SPKR Driver" },
768 { "SPKOUTRN", NULL, "SPKR Driver" }, 768 { "SPKOUTRN", NULL, "SPKR Driver" },
769 769
770 { "Left Headphone Mux", "Mixer", "Left Output Mixer" }, 770 { "Left Headphone Mux", "Mixer", "Left Output PGA" },
771 { "Right Headphone Mux", "Mixer", "Right Output Mixer" }, 771 { "Right Headphone Mux", "Mixer", "Right Output PGA" },
772 772
773 { "Headphone PGA", NULL, "Left Headphone Mux" }, 773 { "Headphone PGA", NULL, "Left Headphone Mux" },
774 { "Headphone PGA", NULL, "Right Headphone Mux" }, 774 { "Headphone PGA", NULL, "Right Headphone Mux" },
diff --git a/sound/soc/mid-x86/sst_platform.c b/sound/soc/mid-x86/sst_platform.c
index b2e9198a983a..d567c322a2fb 100644
--- a/sound/soc/mid-x86/sst_platform.c
+++ b/sound/soc/mid-x86/sst_platform.c
@@ -116,18 +116,20 @@ struct snd_soc_dai_driver sst_platform_dai[] = {
116static inline void sst_set_stream_status(struct sst_runtime_stream *stream, 116static inline void sst_set_stream_status(struct sst_runtime_stream *stream,
117 int state) 117 int state)
118{ 118{
119 spin_lock(&stream->status_lock); 119 unsigned long flags;
120 spin_lock_irqsave(&stream->status_lock, flags);
120 stream->stream_status = state; 121 stream->stream_status = state;
121 spin_unlock(&stream->status_lock); 122 spin_unlock_irqrestore(&stream->status_lock, flags);
122} 123}
123 124
124static inline int sst_get_stream_status(struct sst_runtime_stream *stream) 125static inline int sst_get_stream_status(struct sst_runtime_stream *stream)
125{ 126{
126 int state; 127 int state;
128 unsigned long flags;
127 129
128 spin_lock(&stream->status_lock); 130 spin_lock_irqsave(&stream->status_lock, flags);
129 state = stream->stream_status; 131 state = stream->stream_status;
130 spin_unlock(&stream->status_lock); 132 spin_unlock_irqrestore(&stream->status_lock, flags);
131 return state; 133 return state;
132} 134}
133 135
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
index 38aac7d57a59..9c7e8b48aed6 100644
--- a/sound/soc/samsung/pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -350,8 +350,8 @@ static int s3c_pcm_set_fmt(struct snd_soc_dai *cpu_dai,
350 ctl = readl(regs + S3C_PCM_CTL); 350 ctl = readl(regs + S3C_PCM_CTL);
351 351
352 switch (fmt & SND_SOC_DAIFMT_INV_MASK) { 352 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
353 case SND_SOC_DAIFMT_NB_NF: 353 case SND_SOC_DAIFMT_IB_NF:
354 /* Nothing to do, NB_NF by default */ 354 /* Nothing to do, IB_NF by default */
355 break; 355 break;
356 default: 356 default:
357 dev_err(pcm->dev, "Unsupported clock inversion!\n"); 357 dev_err(pcm->dev, "Unsupported clock inversion!\n");
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 0c9997e2d8c0..23c0e83d4c19 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -1200,10 +1200,11 @@ static int fsi_probe(struct platform_device *pdev)
1200 master->fsib.master = master; 1200 master->fsib.master = master;
1201 1201
1202 pm_runtime_enable(&pdev->dev); 1202 pm_runtime_enable(&pdev->dev);
1203 pm_runtime_resume(&pdev->dev);
1204 dev_set_drvdata(&pdev->dev, master); 1203 dev_set_drvdata(&pdev->dev, master);
1205 1204
1205 pm_runtime_get_sync(&pdev->dev);
1206 fsi_soft_all_reset(master); 1206 fsi_soft_all_reset(master);
1207 pm_runtime_put_sync(&pdev->dev);
1207 1208
1208 ret = request_irq(irq, &fsi_interrupt, IRQF_DISABLED, 1209 ret = request_irq(irq, &fsi_interrupt, IRQF_DISABLED,
1209 id_entry->name, master); 1210 id_entry->name, master);
@@ -1218,8 +1219,17 @@ static int fsi_probe(struct platform_device *pdev)
1218 goto exit_free_irq; 1219 goto exit_free_irq;
1219 } 1220 }
1220 1221
1221 return snd_soc_register_dais(&pdev->dev, fsi_soc_dai, ARRAY_SIZE(fsi_soc_dai)); 1222 ret = snd_soc_register_dais(&pdev->dev, fsi_soc_dai,
1223 ARRAY_SIZE(fsi_soc_dai));
1224 if (ret < 0) {
1225 dev_err(&pdev->dev, "cannot snd dai register\n");
1226 goto exit_snd_soc;
1227 }
1228
1229 return ret;
1222 1230
1231exit_snd_soc:
1232 snd_soc_unregister_platform(&pdev->dev);
1223exit_free_irq: 1233exit_free_irq:
1224 free_irq(irq, master); 1234 free_irq(irq, master);
1225exit_iounmap: 1235exit_iounmap:
@@ -1238,12 +1248,11 @@ static int fsi_remove(struct platform_device *pdev)
1238 1248
1239 master = dev_get_drvdata(&pdev->dev); 1249 master = dev_get_drvdata(&pdev->dev);
1240 1250
1241 snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(fsi_soc_dai)); 1251 free_irq(master->irq, master);
1242 snd_soc_unregister_platform(&pdev->dev);
1243
1244 pm_runtime_disable(&pdev->dev); 1252 pm_runtime_disable(&pdev->dev);
1245 1253
1246 free_irq(master->irq, master); 1254 snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(fsi_soc_dai));
1255 snd_soc_unregister_platform(&pdev->dev);
1247 1256
1248 iounmap(master->base); 1257 iounmap(master->base);
1249 kfree(master); 1258 kfree(master);
@@ -1321,3 +1330,4 @@ module_exit(fsi_mobile_exit);
1321MODULE_LICENSE("GPL"); 1330MODULE_LICENSE("GPL");
1322MODULE_DESCRIPTION("SuperH onchip FSI audio driver"); 1331MODULE_DESCRIPTION("SuperH onchip FSI audio driver");
1323MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>"); 1332MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
1333MODULE_ALIAS("platform:fsi-pcm-audio");
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index b76b74db0968..d8562ce4de7a 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -629,6 +629,7 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
629 runtime->hw.rates |= codec_dai_drv->capture.rates; 629 runtime->hw.rates |= codec_dai_drv->capture.rates;
630 } 630 }
631 631
632 ret = -EINVAL;
632 snd_pcm_limit_hw_rates(runtime); 633 snd_pcm_limit_hw_rates(runtime);
633 if (!runtime->hw.rates) { 634 if (!runtime->hw.rates) {
634 printk(KERN_ERR "asoc: %s <-> %s No matching rates\n", 635 printk(KERN_ERR "asoc: %s <-> %s No matching rates\n",
@@ -640,7 +641,8 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
640 codec_dai->name, cpu_dai->name); 641 codec_dai->name, cpu_dai->name);
641 goto config_err; 642 goto config_err;
642 } 643 }
643 if (!runtime->hw.channels_min || !runtime->hw.channels_max) { 644 if (!runtime->hw.channels_min || !runtime->hw.channels_max ||
645 runtime->hw.channels_min > runtime->hw.channels_max) {
644 printk(KERN_ERR "asoc: %s <-> %s No matching channels\n", 646 printk(KERN_ERR "asoc: %s <-> %s No matching channels\n",
645 codec_dai->name, cpu_dai->name); 647 codec_dai->name, cpu_dai->name);
646 goto config_err; 648 goto config_err;
@@ -2060,6 +2062,7 @@ const struct dev_pm_ops snd_soc_pm_ops = {
2060 .resume = snd_soc_resume, 2062 .resume = snd_soc_resume,
2061 .poweroff = snd_soc_poweroff, 2063 .poweroff = snd_soc_poweroff,
2062}; 2064};
2065EXPORT_SYMBOL_GPL(snd_soc_pm_ops);
2063 2066
2064/* ASoC platform driver */ 2067/* ASoC platform driver */
2065static struct platform_driver soc_driver = { 2068static struct platform_driver soc_driver = {
diff --git a/sound/soc/tegra/harmony.c b/sound/soc/tegra/harmony.c
index 8585957477eb..556a57133925 100644
--- a/sound/soc/tegra/harmony.c
+++ b/sound/soc/tegra/harmony.c
@@ -370,6 +370,7 @@ static struct platform_driver tegra_snd_harmony_driver = {
370 .driver = { 370 .driver = {
371 .name = DRV_NAME, 371 .name = DRV_NAME,
372 .owner = THIS_MODULE, 372 .owner = THIS_MODULE,
373 .pm = &snd_soc_pm_ops,
373 }, 374 },
374 .probe = tegra_snd_harmony_probe, 375 .probe = tegra_snd_harmony_probe,
375 .remove = __devexit_p(tegra_snd_harmony_remove), 376 .remove = __devexit_p(tegra_snd_harmony_remove),
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 17d1dcb3c667..416538248a4b 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -163,6 +163,7 @@ static void config_attr(struct perf_evsel *evsel, struct perf_evlist *evlist)
163 struct perf_event_attr *attr = &evsel->attr; 163 struct perf_event_attr *attr = &evsel->attr;
164 int track = !evsel->idx; /* only the first counter needs these */ 164 int track = !evsel->idx; /* only the first counter needs these */
165 165
166 attr->inherit = !no_inherit;
166 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 167 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
167 PERF_FORMAT_TOTAL_TIME_RUNNING | 168 PERF_FORMAT_TOTAL_TIME_RUNNING |
168 PERF_FORMAT_ID; 169 PERF_FORMAT_ID;
@@ -251,6 +252,9 @@ static void open_counters(struct perf_evlist *evlist)
251{ 252{
252 struct perf_evsel *pos; 253 struct perf_evsel *pos;
253 254
255 if (evlist->cpus->map[0] < 0)
256 no_inherit = true;
257
254 list_for_each_entry(pos, &evlist->entries, node) { 258 list_for_each_entry(pos, &evlist->entries, node) {
255 struct perf_event_attr *attr = &pos->attr; 259 struct perf_event_attr *attr = &pos->attr;
256 /* 260 /*
@@ -271,8 +275,7 @@ static void open_counters(struct perf_evlist *evlist)
271retry_sample_id: 275retry_sample_id:
272 attr->sample_id_all = sample_id_all_avail ? 1 : 0; 276 attr->sample_id_all = sample_id_all_avail ? 1 : 0;
273try_again: 277try_again:
274 if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group, 278 if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group) < 0) {
275 !no_inherit) < 0) {
276 int err = errno; 279 int err = errno;
277 280
278 if (err == EPERM || err == EACCES) { 281 if (err == EPERM || err == EACCES) {
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index e2109f9b43eb..03f0e45f1479 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -167,16 +167,17 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
167 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 167 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
168 PERF_FORMAT_TOTAL_TIME_RUNNING; 168 PERF_FORMAT_TOTAL_TIME_RUNNING;
169 169
170 attr->inherit = !no_inherit;
171
170 if (system_wide) 172 if (system_wide)
171 return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false, false); 173 return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false);
172 174
173 attr->inherit = !no_inherit;
174 if (target_pid == -1 && target_tid == -1) { 175 if (target_pid == -1 && target_tid == -1) {
175 attr->disabled = 1; 176 attr->disabled = 1;
176 attr->enable_on_exec = 1; 177 attr->enable_on_exec = 1;
177 } 178 }
178 179
179 return perf_evsel__open_per_thread(evsel, evsel_list->threads, false, false); 180 return perf_evsel__open_per_thread(evsel, evsel_list->threads, false);
180} 181}
181 182
182/* 183/*
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 1b2106c58f66..11e3c8458362 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -290,7 +290,7 @@ static int test__open_syscall_event(void)
290 goto out_thread_map_delete; 290 goto out_thread_map_delete;
291 } 291 }
292 292
293 if (perf_evsel__open_per_thread(evsel, threads, false, false) < 0) { 293 if (perf_evsel__open_per_thread(evsel, threads, false) < 0) {
294 pr_debug("failed to open counter: %s, " 294 pr_debug("failed to open counter: %s, "
295 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 295 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
296 strerror(errno)); 296 strerror(errno));
@@ -303,7 +303,7 @@ static int test__open_syscall_event(void)
303 } 303 }
304 304
305 if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) { 305 if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
306 pr_debug("perf_evsel__open_read_on_cpu\n"); 306 pr_debug("perf_evsel__read_on_cpu\n");
307 goto out_close_fd; 307 goto out_close_fd;
308 } 308 }
309 309
@@ -365,7 +365,7 @@ static int test__open_syscall_event_on_all_cpus(void)
365 goto out_thread_map_delete; 365 goto out_thread_map_delete;
366 } 366 }
367 367
368 if (perf_evsel__open(evsel, cpus, threads, false, false) < 0) { 368 if (perf_evsel__open(evsel, cpus, threads, false) < 0) {
369 pr_debug("failed to open counter: %s, " 369 pr_debug("failed to open counter: %s, "
370 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 370 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
371 strerror(errno)); 371 strerror(errno));
@@ -418,7 +418,7 @@ static int test__open_syscall_event_on_all_cpus(void)
418 continue; 418 continue;
419 419
420 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { 420 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
421 pr_debug("perf_evsel__open_read_on_cpu\n"); 421 pr_debug("perf_evsel__read_on_cpu\n");
422 err = -1; 422 err = -1;
423 break; 423 break;
424 } 424 }
@@ -529,7 +529,7 @@ static int test__basic_mmap(void)
529 529
530 perf_evlist__add(evlist, evsels[i]); 530 perf_evlist__add(evlist, evsels[i]);
531 531
532 if (perf_evsel__open(evsels[i], cpus, threads, false, false) < 0) { 532 if (perf_evsel__open(evsels[i], cpus, threads, false) < 0) {
533 pr_debug("failed to open counter: %s, " 533 pr_debug("failed to open counter: %s, "
534 "tweak /proc/sys/kernel/perf_event_paranoid?\n", 534 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
535 strerror(errno)); 535 strerror(errno));
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index fc1273e976c5..7e3d6e310bf8 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -845,9 +845,10 @@ static void start_counters(struct perf_evlist *evlist)
845 } 845 }
846 846
847 attr->mmap = 1; 847 attr->mmap = 1;
848 attr->inherit = inherit;
848try_again: 849try_again:
849 if (perf_evsel__open(counter, top.evlist->cpus, 850 if (perf_evsel__open(counter, top.evlist->cpus,
850 top.evlist->threads, group, inherit) < 0) { 851 top.evlist->threads, group) < 0) {
851 int err = errno; 852 int err = errno;
852 853
853 if (err == EPERM || err == EACCES) { 854 if (err == EPERM || err == EACCES) {
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index 9fea75535221..96bee5c46008 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -13,7 +13,7 @@ cgroupfs_find_mountpoint(char *buf, size_t maxlen)
13{ 13{
14 FILE *fp; 14 FILE *fp;
15 char mountpoint[MAX_PATH+1], tokens[MAX_PATH+1], type[MAX_PATH+1]; 15 char mountpoint[MAX_PATH+1], tokens[MAX_PATH+1], type[MAX_PATH+1];
16 char *token, *saved_ptr; 16 char *token, *saved_ptr = NULL;
17 int found = 0; 17 int found = 0;
18 18
19 fp = fopen("/proc/mounts", "r"); 19 fp = fopen("/proc/mounts", "r");
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index d852cefa20de..45da8d186b49 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -12,6 +12,7 @@
12#include "evlist.h" 12#include "evlist.h"
13#include "evsel.h" 13#include "evsel.h"
14#include "util.h" 14#include "util.h"
15#include "debug.h"
15 16
16#include <sys/mman.h> 17#include <sys/mman.h>
17 18
@@ -250,15 +251,19 @@ int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
250 return evlist->mmap != NULL ? 0 : -ENOMEM; 251 return evlist->mmap != NULL ? 0 : -ENOMEM;
251} 252}
252 253
253static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot, 254static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel,
254 int mask, int fd) 255 int cpu, int prot, int mask, int fd)
255{ 256{
256 evlist->mmap[cpu].prev = 0; 257 evlist->mmap[cpu].prev = 0;
257 evlist->mmap[cpu].mask = mask; 258 evlist->mmap[cpu].mask = mask;
258 evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot, 259 evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot,
259 MAP_SHARED, fd, 0); 260 MAP_SHARED, fd, 0);
260 if (evlist->mmap[cpu].base == MAP_FAILED) 261 if (evlist->mmap[cpu].base == MAP_FAILED) {
262 if (evlist->cpus->map[cpu] == -1 && evsel->attr.inherit)
263 ui__warning("Inherit is not allowed on per-task "
264 "events using mmap.\n");
261 return -1; 265 return -1;
266 }
262 267
263 perf_evlist__add_pollfd(evlist, fd); 268 perf_evlist__add_pollfd(evlist, fd);
264 return 0; 269 return 0;
@@ -312,7 +317,8 @@ int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
312 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, 317 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT,
313 FD(first_evsel, cpu, 0)) != 0) 318 FD(first_evsel, cpu, 0)) != 0)
314 goto out_unmap; 319 goto out_unmap;
315 } else if (__perf_evlist__mmap(evlist, cpu, prot, mask, fd) < 0) 320 } else if (__perf_evlist__mmap(evlist, evsel, cpu,
321 prot, mask, fd) < 0)
316 goto out_unmap; 322 goto out_unmap;
317 323
318 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 324 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 662596afd7f1..d6fd59beb860 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -175,7 +175,7 @@ int __perf_evsel__read(struct perf_evsel *evsel,
175} 175}
176 176
177static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 177static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
178 struct thread_map *threads, bool group, bool inherit) 178 struct thread_map *threads, bool group)
179{ 179{
180 int cpu, thread; 180 int cpu, thread;
181 unsigned long flags = 0; 181 unsigned long flags = 0;
@@ -192,19 +192,6 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
192 192
193 for (cpu = 0; cpu < cpus->nr; cpu++) { 193 for (cpu = 0; cpu < cpus->nr; cpu++) {
194 int group_fd = -1; 194 int group_fd = -1;
195 /*
196 * Don't allow mmap() of inherited per-task counters. This
197 * would create a performance issue due to all children writing
198 * to the same buffer.
199 *
200 * FIXME:
201 * Proper fix is not to pass 'inherit' to perf_evsel__open*,
202 * but a 'flags' parameter, with 'group' folded there as well,
203 * then introduce a PERF_O_{MMAP,GROUP,INHERIT} enum, and if
204 * O_MMAP is set, emit a warning if cpu < 0 and O_INHERIT is
205 * set. Lets go for the minimal fix first tho.
206 */
207 evsel->attr.inherit = (cpus->map[cpu] >= 0) && inherit;
208 195
209 for (thread = 0; thread < threads->nr; thread++) { 196 for (thread = 0; thread < threads->nr; thread++) {
210 197
@@ -253,7 +240,7 @@ static struct {
253}; 240};
254 241
255int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 242int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
256 struct thread_map *threads, bool group, bool inherit) 243 struct thread_map *threads, bool group)
257{ 244{
258 if (cpus == NULL) { 245 if (cpus == NULL) {
259 /* Work around old compiler warnings about strict aliasing */ 246 /* Work around old compiler warnings about strict aliasing */
@@ -263,19 +250,19 @@ int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
263 if (threads == NULL) 250 if (threads == NULL)
264 threads = &empty_thread_map.map; 251 threads = &empty_thread_map.map;
265 252
266 return __perf_evsel__open(evsel, cpus, threads, group, inherit); 253 return __perf_evsel__open(evsel, cpus, threads, group);
267} 254}
268 255
269int perf_evsel__open_per_cpu(struct perf_evsel *evsel, 256int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
270 struct cpu_map *cpus, bool group, bool inherit) 257 struct cpu_map *cpus, bool group)
271{ 258{
272 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group, inherit); 259 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group);
273} 260}
274 261
275int perf_evsel__open_per_thread(struct perf_evsel *evsel, 262int perf_evsel__open_per_thread(struct perf_evsel *evsel,
276 struct thread_map *threads, bool group, bool inherit) 263 struct thread_map *threads, bool group)
277{ 264{
278 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, inherit); 265 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group);
279} 266}
280 267
281static int perf_event__parse_id_sample(const union perf_event *event, u64 type, 268static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 6710ab538342..f79bb2c09a6c 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -81,11 +81,11 @@ void perf_evsel__free_id(struct perf_evsel *evsel);
81void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); 81void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
82 82
83int perf_evsel__open_per_cpu(struct perf_evsel *evsel, 83int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
84 struct cpu_map *cpus, bool group, bool inherit); 84 struct cpu_map *cpus, bool group);
85int perf_evsel__open_per_thread(struct perf_evsel *evsel, 85int perf_evsel__open_per_thread(struct perf_evsel *evsel,
86 struct thread_map *threads, bool group, bool inherit); 86 struct thread_map *threads, bool group);
87int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 87int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
88 struct thread_map *threads, bool group, bool inherit); 88 struct thread_map *threads, bool group);
89 89
90#define perf_evsel__match(evsel, t, c) \ 90#define perf_evsel__match(evsel, t, c) \
91 (evsel->attr.type == PERF_TYPE_##t && \ 91 (evsel->attr.type == PERF_TYPE_##t && \
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index a9f2d7e1204d..f5e38451fdc5 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -498,11 +498,11 @@ static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
498 struct cpu_map *cpus = NULL; 498 struct cpu_map *cpus = NULL;
499 struct thread_map *threads = NULL; 499 struct thread_map *threads = NULL;
500 PyObject *pcpus = NULL, *pthreads = NULL; 500 PyObject *pcpus = NULL, *pthreads = NULL;
501 int group = 0, overwrite = 0; 501 int group = 0, inherit = 0;
502 static char *kwlist[] = {"cpus", "threads", "group", "overwrite", NULL, NULL}; 502 static char *kwlist[] = {"cpus", "threads", "group", "inherit", NULL, NULL};
503 503
504 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, 504 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
505 &pcpus, &pthreads, &group, &overwrite)) 505 &pcpus, &pthreads, &group, &inherit))
506 return NULL; 506 return NULL;
507 507
508 if (pthreads != NULL) 508 if (pthreads != NULL)
@@ -511,7 +511,8 @@ static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
511 if (pcpus != NULL) 511 if (pcpus != NULL)
512 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus; 512 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
513 513
514 if (perf_evsel__open(evsel, cpus, threads, group, overwrite) < 0) { 514 evsel->attr.inherit = inherit;
515 if (perf_evsel__open(evsel, cpus, threads, group) < 0) {
515 PyErr_SetFromErrno(PyExc_OSError); 516 PyErr_SetFromErrno(PyExc_OSError);
516 return NULL; 517 return NULL;
517 } 518 }
diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c
index 8c17a8730e4a..15633d608133 100644
--- a/tools/perf/util/ui/browsers/annotate.c
+++ b/tools/perf/util/ui/browsers/annotate.c
@@ -256,10 +256,9 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
256 int refresh) 256 int refresh)
257{ 257{
258 struct objdump_line *pos, *n; 258 struct objdump_line *pos, *n;
259 struct annotation *notes = symbol__annotation(sym); 259 struct annotation *notes;
260 struct annotate_browser browser = { 260 struct annotate_browser browser = {
261 .b = { 261 .b = {
262 .entries = &notes->src->source,
263 .refresh = ui_browser__list_head_refresh, 262 .refresh = ui_browser__list_head_refresh,
264 .seek = ui_browser__list_head_seek, 263 .seek = ui_browser__list_head_seek,
265 .write = annotate_browser__write, 264 .write = annotate_browser__write,
@@ -281,6 +280,8 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
281 280
282 ui_helpline__push("Press <- or ESC to exit"); 281 ui_helpline__push("Press <- or ESC to exit");
283 282
283 notes = symbol__annotation(sym);
284
284 list_for_each_entry(pos, &notes->src->source, node) { 285 list_for_each_entry(pos, &notes->src->source, node) {
285 struct objdump_line_rb_node *rbpos; 286 struct objdump_line_rb_node *rbpos;
286 size_t line_len = strlen(pos->line); 287 size_t line_len = strlen(pos->line);
@@ -291,6 +292,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
291 rbpos->idx = browser.b.nr_entries++; 292 rbpos->idx = browser.b.nr_entries++;
292 } 293 }
293 294
295 browser.b.entries = &notes->src->source,
294 browser.b.width += 18; /* Percentage */ 296 browser.b.width += 18; /* Percentage */
295 ret = annotate_browser__run(&browser, evidx, refresh); 297 ret = annotate_browser__run(&browser, evidx, refresh);
296 list_for_each_entry_safe(pos, n, &notes->src->source, node) { 298 list_for_each_entry_safe(pos, n, &notes->src->source, node) {
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
index 798efdca3ead..5d767c622dfc 100644
--- a/tools/perf/util/ui/browsers/hists.c
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -851,7 +851,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel,
851 goto out_free_stack; 851 goto out_free_stack;
852 case 'a': 852 case 'a':
853 if (browser->selection == NULL || 853 if (browser->selection == NULL ||
854 browser->selection->map == NULL || 854 browser->selection->sym == NULL ||
855 browser->selection->map->dso->annotate_warned) 855 browser->selection->map->dso->annotate_warned)
856 continue; 856 continue;
857 goto do_annotate; 857 goto do_annotate;